aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-24 15:06:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-24 15:06:40 -0400
commit98b98d316349e9a028e632629fe813d07fa5afdd (patch)
treecaaf6a662a86c5e2a418f0929ca05f0748803ac5
parent0d66cba1ac3ad38614077443d604d6a09cec99de (diff)
parent931474c4c30633400ff0dff8fb452ae20e01d067 (diff)
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (169 commits) drivers/gpu/drm/radeon/atom.c: fix warning drm/radeon/kms: bump kms version number drm/radeon/kms: properly set num banks for fusion asics drm/radeon/kms/atom: move dig phy init out of modesetting drm/radeon/kms/cayman: fix typo in register mask drm/radeon/kms: fix typo in spread spectrum code drm/radeon/kms: fix tile_config value reported to userspace on cayman. drm/radeon/kms: fix incorrect comparison in cayman setup code. drm/radeon/kms: add wait idle ioctl for eg->cayman drm/radeon/cayman: setup hdp to invalidate and flush when asked drm/radeon/evergreen/btc/fusion: setup hdp to invalidate and flush when asked agp/uninorth: Fix lockups with radeon KMS and >1x. drm/radeon/kms: the SS_Id field in the LCD table if for LVDS only drm/radeon/kms: properly set the CLK_REF bit for DCE3 devices drm/radeon/kms: fixup eDP connector handling drm/radeon/kms: bail early for eDP in hotplug callback drm/radeon/kms: simplify hotplug handler logic drm/radeon/kms: rewrite DP handling drm/radeon/kms/atom: add support for setting DP panel mode drm/radeon/kms: atombios.h updates for DP panel mode ...
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/agp/intel-agp.h8
-rw-r--r--drivers/char/agp/intel-gtt.c10
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c61
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c207
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/drm_modes.c156
-rw-r--r--drivers/gpu/drm/drm_stub.c21
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c131
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h113
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c311
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h35
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2303
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h19
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h35
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h208
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c383
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c510
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c323
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mpeg.c311
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c68
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c442
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c256
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c135
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c226
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc870
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h534
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c169
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h527
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c142
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c600
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c20
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h22
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c132
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c1046
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c8
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c117
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c252
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h19
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/gpu/vga/vgaarb.c113
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/mxm-wmi.c111
-rw-r--r--include/drm/drmP.h49
-rw-r--r--include/drm/drm_crtc.h6
-rw-r--r--include/drm/drm_dp_helper.h5
-rw-r--r--include/drm/drm_edid.h25
-rw-r--r--include/drm/drm_fb_helper.h16
-rw-r--r--include/linux/mxm-wmi.h33
-rw-r--r--include/linux/pci.h7
104 files changed, 9392 insertions, 4045 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 0b415248ae25..b64825ddaf32 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2245,10 +2245,10 @@ F: drivers/gpu/drm/
2245F: include/drm/ 2245F: include/drm/
2246 2246
2247INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2247INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
2248M: Chris Wilson <chris@chris-wilson.co.uk> 2248M: Keith Packard <keithp@keithp.com>
2249L: intel-gfx@lists.freedesktop.org (subscribers-only) 2249L: intel-gfx@lists.freedesktop.org (subscribers-only)
2250L: dri-devel@lists.freedesktop.org 2250L: dri-devel@lists.freedesktop.org
2251T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git 2251T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6.git
2252S: Supported 2252S: Supported
2253F: drivers/gpu/drm/i915 2253F: drivers/gpu/drm/i915
2254F: include/drm/i915* 2254F: include/drm/i915*
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index b0a0dccc98c1..b427711be4be 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -903,6 +903,9 @@ static struct pci_device_id agp_intel_pci_table[] = {
903 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), 903 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
904 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), 904 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), 905 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
906 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
906 { } 909 { }
907}; 910};
908 911
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 5feebe2800e9..999803ce10dc 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -225,6 +225,14 @@
225#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 225#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
226#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ 226#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
227#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A 227#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
228#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
229#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
230#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
231#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
232#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
233#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
234#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
235#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
228 236
229int intel_gmch_probe(struct pci_dev *pdev, 237int intel_gmch_probe(struct pci_dev *pdev,
230 struct agp_bridge_data *bridge); 238 struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 0d09b537bb9a..85151019dde1 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1420,6 +1420,16 @@ static const struct intel_gtt_driver_description {
1420 "Sandybridge", &sandybridge_gtt_driver }, 1420 "Sandybridge", &sandybridge_gtt_driver },
1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1422 "Sandybridge", &sandybridge_gtt_driver }, 1422 "Sandybridge", &sandybridge_gtt_driver },
1423 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
1424 "Ivybridge", &sandybridge_gtt_driver },
1425 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
1426 "Ivybridge", &sandybridge_gtt_driver },
1427 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
1428 "Ivybridge", &sandybridge_gtt_driver },
1429 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
1430 "Ivybridge", &sandybridge_gtt_driver },
1431 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
1432 "Ivybridge", &sandybridge_gtt_driver },
1423 { 0, NULL, NULL } 1433 { 0, NULL, NULL }
1424}; 1434};
1425 1435
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index f845a8f718b3..a32c492baf5c 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -80,7 +80,7 @@ static void uninorth_tlbflush(struct agp_memory *mem)
80 ctrl | UNI_N_CFG_GART_INVAL); 80 ctrl | UNI_N_CFG_GART_INVAL);
81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl); 81 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
82 82
83 if (uninorth_rev <= 0x30) { 83 if (!mem && uninorth_rev <= 0x30) {
84 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 84 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
85 ctrl | UNI_N_CFG_GART_2xRESET); 85 ctrl | UNI_N_CFG_GART_2xRESET);
86 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 86 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index adc9358c9bec..0a9357c66ff8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1413,6 +1413,64 @@ end:
1413EXPORT_SYMBOL(drm_detect_monitor_audio); 1413EXPORT_SYMBOL(drm_detect_monitor_audio);
1414 1414
1415/** 1415/**
1416 * drm_add_display_info - pull display info out if present
1417 * @edid: EDID data
1418 * @info: display info (attached to connector)
1419 *
1420 * Grab any available display info and stuff it into the drm_display_info
1421 * structure that's part of the connector. Useful for tracking bpp and
1422 * color spaces.
1423 */
1424static void drm_add_display_info(struct edid *edid,
1425 struct drm_display_info *info)
1426{
1427 info->width_mm = edid->width_cm * 10;
1428 info->height_mm = edid->height_cm * 10;
1429
1430 /* driver figures it out in this case */
1431 info->bpc = 0;
1432 info->color_formats = 0;
1433
1434 /* Only defined for 1.4 with digital displays */
1435 if (edid->revision < 4)
1436 return;
1437
1438 if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
1439 return;
1440
1441 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
1442 case DRM_EDID_DIGITAL_DEPTH_6:
1443 info->bpc = 6;
1444 break;
1445 case DRM_EDID_DIGITAL_DEPTH_8:
1446 info->bpc = 8;
1447 break;
1448 case DRM_EDID_DIGITAL_DEPTH_10:
1449 info->bpc = 10;
1450 break;
1451 case DRM_EDID_DIGITAL_DEPTH_12:
1452 info->bpc = 12;
1453 break;
1454 case DRM_EDID_DIGITAL_DEPTH_14:
1455 info->bpc = 14;
1456 break;
1457 case DRM_EDID_DIGITAL_DEPTH_16:
1458 info->bpc = 16;
1459 break;
1460 case DRM_EDID_DIGITAL_DEPTH_UNDEF:
1461 default:
1462 info->bpc = 0;
1463 break;
1464 }
1465
1466 info->color_formats = DRM_COLOR_FORMAT_RGB444;
1467 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
1468 info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
1469 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
1470 info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
1471}
1472
1473/**
1416 * drm_add_edid_modes - add modes from EDID data, if available 1474 * drm_add_edid_modes - add modes from EDID data, if available
1417 * @connector: connector we're probing 1475 * @connector: connector we're probing
1418 * @edid: edid data 1476 * @edid: edid data
@@ -1460,8 +1518,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1460 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 1518 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
1461 edid_fixup_preferred(connector, quirks); 1519 edid_fixup_preferred(connector, quirks);
1462 1520
1463 connector->display_info.width_mm = edid->width_cm * 10; 1521 drm_add_display_info(edid, &connector->display_info);
1464 connector->display_info.height_mm = edid->height_cm * 10;
1465 1522
1466 return num_modes; 1523 return num_modes;
1467} 1524}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 140b9525b48a..802b61ac3139 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -70,174 +70,50 @@ fail:
70} 70}
71EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); 71EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
72 72
73/**
74 * drm_fb_helper_connector_parse_command_line - parse command line for connector
75 * @connector - connector to parse line for
76 * @mode_option - per connector mode option
77 *
78 * This parses the connector specific then generic command lines for
79 * modes and options to configure the connector.
80 *
81 * This uses the same parameters as the fb modedb.c, except for extra
82 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
83 *
84 * enable/enable Digital/disable bit at the end
85 */
86static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
87 const char *mode_option)
88{
89 const char *name;
90 unsigned int namelen;
91 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
92 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
93 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
94 int i;
95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
96 struct drm_fb_helper_cmdline_mode *cmdline_mode;
97 struct drm_connector *connector;
98
99 if (!fb_helper_conn)
100 return false;
101 connector = fb_helper_conn->connector;
102
103 cmdline_mode = &fb_helper_conn->cmdline_mode;
104 if (!mode_option)
105 mode_option = fb_mode_option;
106
107 if (!mode_option) {
108 cmdline_mode->specified = false;
109 return false;
110 }
111
112 name = mode_option;
113 namelen = strlen(name);
114 for (i = namelen-1; i >= 0; i--) {
115 switch (name[i]) {
116 case '@':
117 namelen = i;
118 if (!refresh_specified && !bpp_specified &&
119 !yres_specified) {
120 refresh = simple_strtol(&name[i+1], NULL, 10);
121 refresh_specified = 1;
122 if (cvt || rb)
123 cvt = 0;
124 } else
125 goto done;
126 break;
127 case '-':
128 namelen = i;
129 if (!bpp_specified && !yres_specified) {
130 bpp = simple_strtol(&name[i+1], NULL, 10);
131 bpp_specified = 1;
132 if (cvt || rb)
133 cvt = 0;
134 } else
135 goto done;
136 break;
137 case 'x':
138 if (!yres_specified) {
139 yres = simple_strtol(&name[i+1], NULL, 10);
140 yres_specified = 1;
141 } else
142 goto done;
143 case '0' ... '9':
144 break;
145 case 'M':
146 if (!yres_specified)
147 cvt = 1;
148 break;
149 case 'R':
150 if (cvt)
151 rb = 1;
152 break;
153 case 'm':
154 if (!cvt)
155 margins = 1;
156 break;
157 case 'i':
158 if (!cvt)
159 interlace = 1;
160 break;
161 case 'e':
162 force = DRM_FORCE_ON;
163 break;
164 case 'D':
165 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
166 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
167 force = DRM_FORCE_ON;
168 else
169 force = DRM_FORCE_ON_DIGITAL;
170 break;
171 case 'd':
172 force = DRM_FORCE_OFF;
173 break;
174 default:
175 goto done;
176 }
177 }
178 if (i < 0 && yres_specified) {
179 xres = simple_strtol(name, NULL, 10);
180 res_specified = 1;
181 }
182done:
183
184 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
185 drm_get_connector_name(connector), xres, yres,
186 (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
187 "", (margins) ? " with margins" : "", (interlace) ?
188 " interlaced" : "");
189
190 if (force) {
191 const char *s;
192 switch (force) {
193 case DRM_FORCE_OFF: s = "OFF"; break;
194 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
195 default:
196 case DRM_FORCE_ON: s = "ON"; break;
197 }
198
199 DRM_INFO("forcing %s connector %s\n",
200 drm_get_connector_name(connector), s);
201 connector->force = force;
202 }
203
204 if (res_specified) {
205 cmdline_mode->specified = true;
206 cmdline_mode->xres = xres;
207 cmdline_mode->yres = yres;
208 }
209
210 if (refresh_specified) {
211 cmdline_mode->refresh_specified = true;
212 cmdline_mode->refresh = refresh;
213 }
214
215 if (bpp_specified) {
216 cmdline_mode->bpp_specified = true;
217 cmdline_mode->bpp = bpp;
218 }
219 cmdline_mode->rb = rb ? true : false;
220 cmdline_mode->cvt = cvt ? true : false;
221 cmdline_mode->interlace = interlace ? true : false;
222
223 return true;
224}
225
226static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) 73static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
227{ 74{
228 struct drm_fb_helper_connector *fb_helper_conn; 75 struct drm_fb_helper_connector *fb_helper_conn;
229 int i; 76 int i;
230 77
231 for (i = 0; i < fb_helper->connector_count; i++) { 78 for (i = 0; i < fb_helper->connector_count; i++) {
79 struct drm_cmdline_mode *mode;
80 struct drm_connector *connector;
232 char *option = NULL; 81 char *option = NULL;
233 82
234 fb_helper_conn = fb_helper->connector_info[i]; 83 fb_helper_conn = fb_helper->connector_info[i];
84 connector = fb_helper_conn->connector;
85 mode = &fb_helper_conn->cmdline_mode;
235 86
236 /* do something on return - turn off connector maybe */ 87 /* do something on return - turn off connector maybe */
237 if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option)) 88 if (fb_get_options(drm_get_connector_name(connector), &option))
238 continue; 89 continue;
239 90
240 drm_fb_helper_connector_parse_command_line(fb_helper_conn, option); 91 if (drm_mode_parse_command_line_for_connector(option,
92 connector,
93 mode)) {
94 if (mode->force) {
95 const char *s;
96 switch (mode->force) {
97 case DRM_FORCE_OFF: s = "OFF"; break;
98 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
99 default:
100 case DRM_FORCE_ON: s = "ON"; break;
101 }
102
103 DRM_INFO("forcing %s connector %s\n",
104 drm_get_connector_name(connector), s);
105 connector->force = mode->force;
106 }
107
108 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
109 drm_get_connector_name(connector),
110 mode->xres, mode->yres,
111 mode->refresh_specified ? mode->refresh : 60,
112 mode->rb ? " reduced blanking" : "",
113 mode->margins ? " with margins" : "",
114 mode->interlace ? " interlaced" : "");
115 }
116
241 } 117 }
242 return 0; 118 return 0;
243} 119}
@@ -901,7 +777,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
901 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 777 /* first up get a count of crtcs now in use and new min/maxes width/heights */
902 for (i = 0; i < fb_helper->connector_count; i++) { 778 for (i = 0; i < fb_helper->connector_count; i++) {
903 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 779 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
904 struct drm_fb_helper_cmdline_mode *cmdline_mode; 780 struct drm_cmdline_mode *cmdline_mode;
905 781
906 cmdline_mode = &fb_helper_conn->cmdline_mode; 782 cmdline_mode = &fb_helper_conn->cmdline_mode;
907 783
@@ -1123,7 +999,7 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_conn
1123 999
1124static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) 1000static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1125{ 1001{
1126 struct drm_fb_helper_cmdline_mode *cmdline_mode; 1002 struct drm_cmdline_mode *cmdline_mode;
1127 cmdline_mode = &fb_connector->cmdline_mode; 1003 cmdline_mode = &fb_connector->cmdline_mode;
1128 return cmdline_mode->specified; 1004 return cmdline_mode->specified;
1129} 1005}
@@ -1131,7 +1007,7 @@ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
1131static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 1007static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
1132 int width, int height) 1008 int width, int height)
1133{ 1009{
1134 struct drm_fb_helper_cmdline_mode *cmdline_mode; 1010 struct drm_cmdline_mode *cmdline_mode;
1135 struct drm_display_mode *mode = NULL; 1011 struct drm_display_mode *mode = NULL;
1136 1012
1137 cmdline_mode = &fb_helper_conn->cmdline_mode; 1013 cmdline_mode = &fb_helper_conn->cmdline_mode;
@@ -1163,19 +1039,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
1163 } 1039 }
1164 1040
1165create_mode: 1041create_mode:
1166 if (cmdline_mode->cvt) 1042 mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
1167 mode = drm_cvt_mode(fb_helper_conn->connector->dev, 1043 cmdline_mode);
1168 cmdline_mode->xres, cmdline_mode->yres,
1169 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1170 cmdline_mode->rb, cmdline_mode->interlace,
1171 cmdline_mode->margins);
1172 else
1173 mode = drm_gtf_mode(fb_helper_conn->connector->dev,
1174 cmdline_mode->xres, cmdline_mode->yres,
1175 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1176 cmdline_mode->interlace,
1177 cmdline_mode->margins);
1178 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1179 list_add(&mode->head, &fb_helper_conn->connector->modes); 1044 list_add(&mode->head, &fb_helper_conn->connector->modes);
1180 return mode; 1045 return mode;
1181} 1046}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a1f12cb043de..2022a5c966bb 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -684,10 +684,11 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
684 */ 684 */
685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); 685 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
686 686
687 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", 687 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
688 crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, 688 crtc, (int)vbl_status, hpos, vpos,
689 raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, 689 (long)raw_time.tv_sec, (long)raw_time.tv_usec,
690 (int) duration_ns/1000, i); 690 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
691 (int)duration_ns/1000, i);
691 692
692 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; 693 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
693 if (invbl) 694 if (invbl)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 25bf87390f53..c2d32f20e2fb 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -974,3 +974,159 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
974 } 974 }
975} 975}
976EXPORT_SYMBOL(drm_mode_connector_list_update); 976EXPORT_SYMBOL(drm_mode_connector_list_update);
977
978/**
979 * drm_mode_parse_command_line_for_connector - parse command line for connector
980 * @mode_option - per connector mode option
981 * @connector - connector to parse line for
982 *
983 * This parses the connector specific then generic command lines for
984 * modes and options to configure the connector.
985 *
986 * This uses the same parameters as the fb modedb.c, except for extra
987 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
988 *
989 * enable/enable Digital/disable bit at the end
990 */
991bool drm_mode_parse_command_line_for_connector(const char *mode_option,
992 struct drm_connector *connector,
993 struct drm_cmdline_mode *mode)
994{
995 const char *name;
996 unsigned int namelen;
997 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
998 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
999 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
1000 int i;
1001 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
1002
1003#ifdef CONFIG_FB
1004 if (!mode_option)
1005 mode_option = fb_mode_option;
1006#endif
1007
1008 if (!mode_option) {
1009 mode->specified = false;
1010 return false;
1011 }
1012
1013 name = mode_option;
1014 namelen = strlen(name);
1015 for (i = namelen-1; i >= 0; i--) {
1016 switch (name[i]) {
1017 case '@':
1018 namelen = i;
1019 if (!refresh_specified && !bpp_specified &&
1020 !yres_specified) {
1021 refresh = simple_strtol(&name[i+1], NULL, 10);
1022 refresh_specified = 1;
1023 if (cvt || rb)
1024 cvt = 0;
1025 } else
1026 goto done;
1027 break;
1028 case '-':
1029 namelen = i;
1030 if (!bpp_specified && !yres_specified) {
1031 bpp = simple_strtol(&name[i+1], NULL, 10);
1032 bpp_specified = 1;
1033 if (cvt || rb)
1034 cvt = 0;
1035 } else
1036 goto done;
1037 break;
1038 case 'x':
1039 if (!yres_specified) {
1040 yres = simple_strtol(&name[i+1], NULL, 10);
1041 yres_specified = 1;
1042 } else
1043 goto done;
1044 case '0' ... '9':
1045 break;
1046 case 'M':
1047 if (!yres_specified)
1048 cvt = 1;
1049 break;
1050 case 'R':
1051 if (cvt)
1052 rb = 1;
1053 break;
1054 case 'm':
1055 if (!cvt)
1056 margins = 1;
1057 break;
1058 case 'i':
1059 if (!cvt)
1060 interlace = 1;
1061 break;
1062 case 'e':
1063 force = DRM_FORCE_ON;
1064 break;
1065 case 'D':
1066 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
1067 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
1068 force = DRM_FORCE_ON;
1069 else
1070 force = DRM_FORCE_ON_DIGITAL;
1071 break;
1072 case 'd':
1073 force = DRM_FORCE_OFF;
1074 break;
1075 default:
1076 goto done;
1077 }
1078 }
1079 if (i < 0 && yres_specified) {
1080 xres = simple_strtol(name, NULL, 10);
1081 res_specified = 1;
1082 }
1083done:
1084 if (res_specified) {
1085 mode->specified = true;
1086 mode->xres = xres;
1087 mode->yres = yres;
1088 }
1089
1090 if (refresh_specified) {
1091 mode->refresh_specified = true;
1092 mode->refresh = refresh;
1093 }
1094
1095 if (bpp_specified) {
1096 mode->bpp_specified = true;
1097 mode->bpp = bpp;
1098 }
1099 mode->rb = rb ? true : false;
1100 mode->cvt = cvt ? true : false;
1101 mode->interlace = interlace ? true : false;
1102 mode->force = force;
1103
1104 return true;
1105}
1106EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
1107
1108struct drm_display_mode *
1109drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1110 struct drm_cmdline_mode *cmd)
1111{
1112 struct drm_display_mode *mode;
1113
1114 if (cmd->cvt)
1115 mode = drm_cvt_mode(dev,
1116 cmd->xres, cmd->yres,
1117 cmd->refresh_specified ? cmd->refresh : 60,
1118 cmd->rb, cmd->interlace,
1119 cmd->margins);
1120 else
1121 mode = drm_gtf_mode(dev,
1122 cmd->xres, cmd->yres,
1123 cmd->refresh_specified ? cmd->refresh : 60,
1124 cmd->interlace,
1125 cmd->margins);
1126 if (!mode)
1127 return NULL;
1128
1129 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1130 return mode;
1131}
1132EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 001273d57f2d..6d7b083c5b77 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -62,6 +62,26 @@ struct idr drm_minors_idr;
62struct class *drm_class; 62struct class *drm_class;
63struct proc_dir_entry *drm_proc_root; 63struct proc_dir_entry *drm_proc_root;
64struct dentry *drm_debugfs_root; 64struct dentry *drm_debugfs_root;
65
66int drm_err(const char *func, const char *format, ...)
67{
68 struct va_format vaf;
69 va_list args;
70 int r;
71
72 va_start(args, format);
73
74 vaf.fmt = format;
75 vaf.va = &args;
76
77 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
78
79 va_end(args);
80
81 return r;
82}
83EXPORT_SYMBOL(drm_err);
84
65void drm_ut_debug_printk(unsigned int request_level, 85void drm_ut_debug_printk(unsigned int request_level,
66 const char *prefix, 86 const char *prefix,
67 const char *function_name, 87 const char *function_name,
@@ -78,6 +98,7 @@ void drm_ut_debug_printk(unsigned int request_level,
78 } 98 }
79} 99}
80EXPORT_SYMBOL(drm_ut_debug_printk); 100EXPORT_SYMBOL(drm_ut_debug_printk);
101
81static int drm_minor_get_id(struct drm_device *dev, int type) 102static int drm_minor_get_id(struct drm_device *dev, int type)
82{ 103{
83 int new_id; 104 int new_id;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 87c8e29465e3..51c2257b11e6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -106,11 +106,12 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
106 } 106 }
107} 107}
108 108
109static const char *agp_type_str(int type) 109static const char *cache_level_str(int type)
110{ 110{
111 switch (type) { 111 switch (type) {
112 case 0: return " uncached"; 112 case I915_CACHE_NONE: return " uncached";
113 case 1: return " snooped"; 113 case I915_CACHE_LLC: return " snooped (LLC)";
114 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
114 default: return ""; 115 default: return "";
115 } 116 }
116} 117}
@@ -127,7 +128,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
127 obj->base.write_domain, 128 obj->base.write_domain,
128 obj->last_rendering_seqno, 129 obj->last_rendering_seqno,
129 obj->last_fenced_seqno, 130 obj->last_fenced_seqno,
130 agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY), 131 cache_level_str(obj->cache_level),
131 obj->dirty ? " dirty" : "", 132 obj->dirty ? " dirty" : "",
132 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 133 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
133 if (obj->base.name) 134 if (obj->base.name)
@@ -714,7 +715,7 @@ static void print_error_buffers(struct seq_file *m,
714 dirty_flag(err->dirty), 715 dirty_flag(err->dirty),
715 purgeable_flag(err->purgeable), 716 purgeable_flag(err->purgeable),
716 ring_str(err->ring), 717 ring_str(err->ring),
717 agp_type_str(err->agp_type)); 718 cache_level_str(err->cache_level));
718 719
719 if (err->name) 720 if (err->name)
720 seq_printf(m, " (name: %d)", err->name); 721 seq_printf(m, " (name: %d)", err->name);
@@ -852,6 +853,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
852 struct drm_info_node *node = (struct drm_info_node *) m->private; 853 struct drm_info_node *node = (struct drm_info_node *) m->private;
853 struct drm_device *dev = node->minor->dev; 854 struct drm_device *dev = node->minor->dev;
854 drm_i915_private_t *dev_priv = dev->dev_private; 855 drm_i915_private_t *dev_priv = dev->dev_private;
856 int ret;
855 857
856 if (IS_GEN5(dev)) { 858 if (IS_GEN5(dev)) {
857 u16 rgvswctl = I915_READ16(MEMSWCTL); 859 u16 rgvswctl = I915_READ16(MEMSWCTL);
@@ -873,7 +875,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
873 int max_freq; 875 int max_freq;
874 876
875 /* RPSTAT1 is in the GT power well */ 877 /* RPSTAT1 is in the GT power well */
876 __gen6_gt_force_wake_get(dev_priv); 878 ret = mutex_lock_interruptible(&dev->struct_mutex);
879 if (ret)
880 return ret;
881
882 gen6_gt_force_wake_get(dev_priv);
877 883
878 rpstat = I915_READ(GEN6_RPSTAT1); 884 rpstat = I915_READ(GEN6_RPSTAT1);
879 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 885 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
@@ -883,6 +889,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
883 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 889 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
884 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 890 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
885 891
892 gen6_gt_force_wake_put(dev_priv);
893 mutex_unlock(&dev->struct_mutex);
894
886 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 895 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
887 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 896 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
888 seq_printf(m, "Render p-state ratio: %d\n", 897 seq_printf(m, "Render p-state ratio: %d\n",
@@ -917,8 +926,6 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
917 max_freq = rp_state_cap & 0xff; 926 max_freq = rp_state_cap & 0xff;
918 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 927 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
919 max_freq * 50); 928 max_freq * 50);
920
921 __gen6_gt_force_wake_put(dev_priv);
922 } else { 929 } else {
923 seq_printf(m, "no P-state info available\n"); 930 seq_printf(m, "no P-state info available\n");
924 } 931 }
@@ -1058,6 +1065,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1058 case FBC_MULTIPLE_PIPES: 1065 case FBC_MULTIPLE_PIPES:
1059 seq_printf(m, "multiple pipes are enabled"); 1066 seq_printf(m, "multiple pipes are enabled");
1060 break; 1067 break;
1068 case FBC_MODULE_PARAM:
1069 seq_printf(m, "disabled per module param (default off)");
1070 break;
1061 default: 1071 default:
1062 seq_printf(m, "unknown reason"); 1072 seq_printf(m, "unknown reason");
1063 } 1073 }
@@ -1186,6 +1196,42 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1186 return 0; 1196 return 0;
1187} 1197}
1188 1198
1199static int i915_context_status(struct seq_file *m, void *unused)
1200{
1201 struct drm_info_node *node = (struct drm_info_node *) m->private;
1202 struct drm_device *dev = node->minor->dev;
1203 drm_i915_private_t *dev_priv = dev->dev_private;
1204 int ret;
1205
1206 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1207 if (ret)
1208 return ret;
1209
1210 seq_printf(m, "power context ");
1211 describe_obj(m, dev_priv->pwrctx);
1212 seq_printf(m, "\n");
1213
1214 seq_printf(m, "render context ");
1215 describe_obj(m, dev_priv->renderctx);
1216 seq_printf(m, "\n");
1217
1218 mutex_unlock(&dev->mode_config.mutex);
1219
1220 return 0;
1221}
1222
1223static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1224{
1225 struct drm_info_node *node = (struct drm_info_node *) m->private;
1226 struct drm_device *dev = node->minor->dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private;
1228
1229 seq_printf(m, "forcewake count = %d\n",
1230 atomic_read(&dev_priv->forcewake_count));
1231
1232 return 0;
1233}
1234
1189static int 1235static int
1190i915_wedged_open(struct inode *inode, 1236i915_wedged_open(struct inode *inode,
1191 struct file *filp) 1237 struct file *filp)
@@ -1288,6 +1334,67 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1288 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1334 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1289} 1335}
1290 1336
1337static int i915_forcewake_open(struct inode *inode, struct file *file)
1338{
1339 struct drm_device *dev = inode->i_private;
1340 struct drm_i915_private *dev_priv = dev->dev_private;
1341 int ret;
1342
1343 if (!IS_GEN6(dev))
1344 return 0;
1345
1346 ret = mutex_lock_interruptible(&dev->struct_mutex);
1347 if (ret)
1348 return ret;
1349 gen6_gt_force_wake_get(dev_priv);
1350 mutex_unlock(&dev->struct_mutex);
1351
1352 return 0;
1353}
1354
1355int i915_forcewake_release(struct inode *inode, struct file *file)
1356{
1357 struct drm_device *dev = inode->i_private;
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359
1360 if (!IS_GEN6(dev))
1361 return 0;
1362
1363 /*
1364 * It's bad that we can potentially hang userspace if struct_mutex gets
1365 * forever stuck. However, if we cannot acquire this lock it means that
1366 * almost certainly the driver has hung, is not unload-able. Therefore
1367 * hanging here is probably a minor inconvenience not to be seen my
1368 * almost every user.
1369 */
1370 mutex_lock(&dev->struct_mutex);
1371 gen6_gt_force_wake_put(dev_priv);
1372 mutex_unlock(&dev->struct_mutex);
1373
1374 return 0;
1375}
1376
1377static const struct file_operations i915_forcewake_fops = {
1378 .owner = THIS_MODULE,
1379 .open = i915_forcewake_open,
1380 .release = i915_forcewake_release,
1381};
1382
1383static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1384{
1385 struct drm_device *dev = minor->dev;
1386 struct dentry *ent;
1387
1388 ent = debugfs_create_file("i915_forcewake_user",
1389 S_IRUSR,
1390 root, dev,
1391 &i915_forcewake_fops);
1392 if (IS_ERR(ent))
1393 return PTR_ERR(ent);
1394
1395 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1396}
1397
1291static struct drm_info_list i915_debugfs_list[] = { 1398static struct drm_info_list i915_debugfs_list[] = {
1292 {"i915_capabilities", i915_capabilities, 0}, 1399 {"i915_capabilities", i915_capabilities, 0},
1293 {"i915_gem_objects", i915_gem_object_info, 0}, 1400 {"i915_gem_objects", i915_gem_object_info, 0},
@@ -1324,6 +1431,8 @@ static struct drm_info_list i915_debugfs_list[] = {
1324 {"i915_sr_status", i915_sr_status, 0}, 1431 {"i915_sr_status", i915_sr_status, 0},
1325 {"i915_opregion", i915_opregion, 0}, 1432 {"i915_opregion", i915_opregion, 0},
1326 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1433 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1434 {"i915_context_status", i915_context_status, 0},
1435 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1327}; 1436};
1328#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1437#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1329 1438
@@ -1335,6 +1444,10 @@ int i915_debugfs_init(struct drm_minor *minor)
1335 if (ret) 1444 if (ret)
1336 return ret; 1445 return ret;
1337 1446
1447 ret = i915_forcewake_create(minor->debugfs_root, minor);
1448 if (ret)
1449 return ret;
1450
1338 return drm_debugfs_create_files(i915_debugfs_list, 1451 return drm_debugfs_create_files(i915_debugfs_list,
1339 I915_DEBUGFS_ENTRIES, 1452 I915_DEBUGFS_ENTRIES,
1340 minor->debugfs_root, minor); 1453 minor->debugfs_root, minor);
@@ -1344,6 +1457,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1344{ 1457{
1345 drm_debugfs_remove_files(i915_debugfs_list, 1458 drm_debugfs_remove_files(i915_debugfs_list,
1346 I915_DEBUGFS_ENTRIES, minor); 1459 I915_DEBUGFS_ENTRIES, minor);
1460 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1461 1, minor);
1347 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1462 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1348 1, minor); 1463 1, minor);
1349} 1464}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 12876f2795d2..0239e9974bf2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -571,7 +571,7 @@ static int i915_quiescent(struct drm_device *dev)
571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 571 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
572 572
573 i915_kernel_lost_context(dev); 573 i915_kernel_lost_context(dev);
574 return intel_wait_ring_buffer(ring, ring->size - 8); 574 return intel_wait_ring_idle(ring);
575} 575}
576 576
577static int i915_flush_ioctl(struct drm_device *dev, void *data, 577static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -1176,11 +1176,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1176 return can_switch; 1176 return can_switch;
1177} 1177}
1178 1178
1179static int i915_load_modeset_init(struct drm_device *dev) 1179static int i915_load_gem_init(struct drm_device *dev)
1180{ 1180{
1181 struct drm_i915_private *dev_priv = dev->dev_private; 1181 struct drm_i915_private *dev_priv = dev->dev_private;
1182 unsigned long prealloc_size, gtt_size, mappable_size; 1182 unsigned long prealloc_size, gtt_size, mappable_size;
1183 int ret = 0; 1183 int ret;
1184 1184
1185 prealloc_size = dev_priv->mm.gtt->stolen_size; 1185 prealloc_size = dev_priv->mm.gtt->stolen_size;
1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 1186 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
@@ -1204,7 +1204,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1204 ret = i915_gem_init_ringbuffer(dev); 1204 ret = i915_gem_init_ringbuffer(dev);
1205 mutex_unlock(&dev->struct_mutex); 1205 mutex_unlock(&dev->struct_mutex);
1206 if (ret) 1206 if (ret)
1207 goto out; 1207 return ret;
1208 1208
1209 /* Try to set up FBC with a reasonable compressed buffer size */ 1209 /* Try to set up FBC with a reasonable compressed buffer size */
1210 if (I915_HAS_FBC(dev) && i915_powersave) { 1210 if (I915_HAS_FBC(dev) && i915_powersave) {
@@ -1222,6 +1222,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
1222 1222
1223 /* Allow hardware batchbuffers unless told otherwise. */ 1223 /* Allow hardware batchbuffers unless told otherwise. */
1224 dev_priv->allow_batchbuffer = 1; 1224 dev_priv->allow_batchbuffer = 1;
1225 return 0;
1226}
1227
1228static int i915_load_modeset_init(struct drm_device *dev)
1229{
1230 struct drm_i915_private *dev_priv = dev->dev_private;
1231 int ret;
1225 1232
1226 ret = intel_parse_bios(dev); 1233 ret = intel_parse_bios(dev);
1227 if (ret) 1234 if (ret)
@@ -1236,7 +1243,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1236 */ 1243 */
1237 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1244 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1238 if (ret && ret != -ENODEV) 1245 if (ret && ret != -ENODEV)
1239 goto cleanup_ringbuffer; 1246 goto out;
1240 1247
1241 intel_register_dsm_handler(); 1248 intel_register_dsm_handler();
1242 1249
@@ -1253,10 +1260,40 @@ static int i915_load_modeset_init(struct drm_device *dev)
1253 1260
1254 intel_modeset_init(dev); 1261 intel_modeset_init(dev);
1255 1262
1256 ret = drm_irq_install(dev); 1263 ret = i915_load_gem_init(dev);
1257 if (ret) 1264 if (ret)
1258 goto cleanup_vga_switcheroo; 1265 goto cleanup_vga_switcheroo;
1259 1266
1267 intel_modeset_gem_init(dev);
1268
1269 if (IS_IVYBRIDGE(dev)) {
1270 /* Share pre & uninstall handlers with ILK/SNB */
1271 dev->driver->irq_handler = ivybridge_irq_handler;
1272 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1273 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
1274 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1275 dev->driver->enable_vblank = ivybridge_enable_vblank;
1276 dev->driver->disable_vblank = ivybridge_disable_vblank;
1277 } else if (HAS_PCH_SPLIT(dev)) {
1278 dev->driver->irq_handler = ironlake_irq_handler;
1279 dev->driver->irq_preinstall = ironlake_irq_preinstall;
1280 dev->driver->irq_postinstall = ironlake_irq_postinstall;
1281 dev->driver->irq_uninstall = ironlake_irq_uninstall;
1282 dev->driver->enable_vblank = ironlake_enable_vblank;
1283 dev->driver->disable_vblank = ironlake_disable_vblank;
1284 } else {
1285 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
1286 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
1287 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
1288 dev->driver->irq_handler = i915_driver_irq_handler;
1289 dev->driver->enable_vblank = i915_enable_vblank;
1290 dev->driver->disable_vblank = i915_disable_vblank;
1291 }
1292
1293 ret = drm_irq_install(dev);
1294 if (ret)
1295 goto cleanup_gem;
1296
1260 /* Always safe in the mode setting case. */ 1297 /* Always safe in the mode setting case. */
1261 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1298 /* FIXME: do pre/post-mode set stuff in core KMS code */
1262 dev->vblank_disable_allowed = 1; 1299 dev->vblank_disable_allowed = 1;
@@ -1274,14 +1311,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
1274 1311
1275cleanup_irq: 1312cleanup_irq:
1276 drm_irq_uninstall(dev); 1313 drm_irq_uninstall(dev);
1314cleanup_gem:
1315 mutex_lock(&dev->struct_mutex);
1316 i915_gem_cleanup_ringbuffer(dev);
1317 mutex_unlock(&dev->struct_mutex);
1277cleanup_vga_switcheroo: 1318cleanup_vga_switcheroo:
1278 vga_switcheroo_unregister_client(dev->pdev); 1319 vga_switcheroo_unregister_client(dev->pdev);
1279cleanup_vga_client: 1320cleanup_vga_client:
1280 vga_client_register(dev->pdev, NULL, NULL, NULL); 1321 vga_client_register(dev->pdev, NULL, NULL, NULL);
1281cleanup_ringbuffer:
1282 mutex_lock(&dev->struct_mutex);
1283 i915_gem_cleanup_ringbuffer(dev);
1284 mutex_unlock(&dev->struct_mutex);
1285out: 1322out:
1286 return ret; 1323 return ret;
1287} 1324}
@@ -1982,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1982 2019
1983 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2020 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1984 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2021 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1985 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 2022 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
1986 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2023 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1987 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2024 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1988 } 2025 }
@@ -2025,6 +2062,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2025 2062
2026 spin_lock_init(&dev_priv->irq_lock); 2063 spin_lock_init(&dev_priv->irq_lock);
2027 spin_lock_init(&dev_priv->error_lock); 2064 spin_lock_init(&dev_priv->error_lock);
2065 spin_lock_init(&dev_priv->rps_lock);
2028 2066
2029 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2067 if (IS_MOBILE(dev) || !IS_GEN2(dev))
2030 dev_priv->num_pipe = 2; 2068 dev_priv->num_pipe = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 32d1b3e829c8..0defd4270594 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -52,9 +52,12 @@ module_param_named(powersave, i915_powersave, int, 0600);
52unsigned int i915_semaphores = 0; 52unsigned int i915_semaphores = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 53module_param_named(semaphores, i915_semaphores, int, 0600);
54 54
55unsigned int i915_enable_rc6 = 0; 55unsigned int i915_enable_rc6 = 1;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
57 57
58unsigned int i915_enable_fbc = 0;
59module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
60
58unsigned int i915_lvds_downclock = 0; 61unsigned int i915_lvds_downclock = 0;
59module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 62module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
60 63
@@ -169,7 +172,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
169static const struct intel_device_info intel_ironlake_m_info = { 172static const struct intel_device_info intel_ironlake_m_info = {
170 .gen = 5, .is_mobile = 1, 173 .gen = 5, .is_mobile = 1,
171 .need_gfx_hws = 1, .has_hotplug = 1, 174 .need_gfx_hws = 1, .has_hotplug = 1,
172 .has_fbc = 0, /* disabled due to buggy hardware */ 175 .has_fbc = 1,
173 .has_bsd_ring = 1, 176 .has_bsd_ring = 1,
174}; 177};
175 178
@@ -188,6 +191,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
188 .has_blt_ring = 1, 191 .has_blt_ring = 1,
189}; 192};
190 193
194static const struct intel_device_info intel_ivybridge_d_info = {
195 .is_ivybridge = 1, .gen = 7,
196 .need_gfx_hws = 1, .has_hotplug = 1,
197 .has_bsd_ring = 1,
198 .has_blt_ring = 1,
199};
200
201static const struct intel_device_info intel_ivybridge_m_info = {
202 .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
203 .need_gfx_hws = 1, .has_hotplug = 1,
204 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
205 .has_bsd_ring = 1,
206 .has_blt_ring = 1,
207};
208
191static const struct pci_device_id pciidlist[] = { /* aka */ 209static const struct pci_device_id pciidlist[] = { /* aka */
192 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 210 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
193 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ 211 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
@@ -227,6 +245,11 @@ static const struct pci_device_id pciidlist[] = { /* aka */
227 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 245 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
228 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 246 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
229 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 247 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
248 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
249 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
250 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
251 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
252 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
230 {0, 0, 0} 253 {0, 0, 0}
231}; 254};
232 255
@@ -235,7 +258,9 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
235#endif 258#endif
236 259
237#define INTEL_PCH_DEVICE_ID_MASK 0xff00 260#define INTEL_PCH_DEVICE_ID_MASK 0xff00
261#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
238#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 262#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
263#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
239 264
240void intel_detect_pch (struct drm_device *dev) 265void intel_detect_pch (struct drm_device *dev)
241{ 266{
@@ -254,16 +279,23 @@ void intel_detect_pch (struct drm_device *dev)
254 int id; 279 int id;
255 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 280 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
256 281
257 if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 282 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
283 dev_priv->pch_type = PCH_IBX;
284 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
285 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
258 dev_priv->pch_type = PCH_CPT; 286 dev_priv->pch_type = PCH_CPT;
259 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 287 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
288 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
289 /* PantherPoint is CPT compatible */
290 dev_priv->pch_type = PCH_CPT;
291 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
260 } 292 }
261 } 293 }
262 pci_dev_put(pch); 294 pci_dev_put(pch);
263 } 295 }
264} 296}
265 297
266void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 298static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
267{ 299{
268 int count; 300 int count;
269 301
@@ -279,12 +311,38 @@ void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
279 udelay(10); 311 udelay(10);
280} 312}
281 313
282void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 314/*
315 * Generally this is called implicitly by the register read function. However,
316 * if some sequence requires the GT to not power down then this function should
317 * be called at the beginning of the sequence followed by a call to
318 * gen6_gt_force_wake_put() at the end of the sequence.
319 */
320void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
321{
322 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
323
324 /* Forcewake is atomic in case we get in here without the lock */
325 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
326 __gen6_gt_force_wake_get(dev_priv);
327}
328
329static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
283{ 330{
284 I915_WRITE_NOTRACE(FORCEWAKE, 0); 331 I915_WRITE_NOTRACE(FORCEWAKE, 0);
285 POSTING_READ(FORCEWAKE); 332 POSTING_READ(FORCEWAKE);
286} 333}
287 334
335/*
336 * see gen6_gt_force_wake_get()
337 */
338void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
339{
340 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
341
342 if (atomic_dec_and_test(&dev_priv->forcewake_count))
343 __gen6_gt_force_wake_put(dev_priv);
344}
345
288void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 346void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
289{ 347{
290 int loop = 500; 348 int loop = 500;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1c1b27c97e5c..ee660355ae68 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -188,7 +188,7 @@ struct drm_i915_error_state {
188 u32 dirty:1; 188 u32 dirty:1;
189 u32 purgeable:1; 189 u32 purgeable:1;
190 u32 ring:4; 190 u32 ring:4;
191 u32 agp_type:1; 191 u32 cache_level:2;
192 } *active_bo, *pinned_bo; 192 } *active_bo, *pinned_bo;
193 u32 active_bo_count, pinned_bo_count; 193 u32 active_bo_count, pinned_bo_count;
194 struct intel_overlay_error_state *overlay; 194 struct intel_overlay_error_state *overlay;
@@ -203,12 +203,19 @@ struct drm_i915_display_funcs {
203 int (*get_display_clock_speed)(struct drm_device *dev); 203 int (*get_display_clock_speed)(struct drm_device *dev);
204 int (*get_fifo_size)(struct drm_device *dev, int plane); 204 int (*get_fifo_size)(struct drm_device *dev, int plane);
205 void (*update_wm)(struct drm_device *dev); 205 void (*update_wm)(struct drm_device *dev);
206 int (*crtc_mode_set)(struct drm_crtc *crtc,
207 struct drm_display_mode *mode,
208 struct drm_display_mode *adjusted_mode,
209 int x, int y,
210 struct drm_framebuffer *old_fb);
211 void (*fdi_link_train)(struct drm_crtc *crtc);
212 void (*init_clock_gating)(struct drm_device *dev);
213 void (*init_pch_clock_gating)(struct drm_device *dev);
206 /* clock updates for mode set */ 214 /* clock updates for mode set */
207 /* cursor updates */ 215 /* cursor updates */
208 /* render clock increase/decrease */ 216 /* render clock increase/decrease */
209 /* display clock increase/decrease */ 217 /* display clock increase/decrease */
210 /* pll clock increase/decrease */ 218 /* pll clock increase/decrease */
211 /* clock gating init */
212}; 219};
213 220
214struct intel_device_info { 221struct intel_device_info {
@@ -223,6 +230,7 @@ struct intel_device_info {
223 u8 is_pineview : 1; 230 u8 is_pineview : 1;
224 u8 is_broadwater : 1; 231 u8 is_broadwater : 1;
225 u8 is_crestline : 1; 232 u8 is_crestline : 1;
233 u8 is_ivybridge : 1;
226 u8 has_fbc : 1; 234 u8 has_fbc : 1;
227 u8 has_pipe_cxsr : 1; 235 u8 has_pipe_cxsr : 1;
228 u8 has_hotplug : 1; 236 u8 has_hotplug : 1;
@@ -242,6 +250,7 @@ enum no_fbc_reason {
242 FBC_BAD_PLANE, /* fbc not supported on plane */ 250 FBC_BAD_PLANE, /* fbc not supported on plane */
243 FBC_NOT_TILED, /* buffer not tiled */ 251 FBC_NOT_TILED, /* buffer not tiled */
244 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 252 FBC_MULTIPLE_PIPES, /* more than one pipe active */
253 FBC_MODULE_PARAM,
245}; 254};
246 255
247enum intel_pch { 256enum intel_pch {
@@ -676,6 +685,10 @@ typedef struct drm_i915_private {
676 685
677 bool mchbar_need_disable; 686 bool mchbar_need_disable;
678 687
688 struct work_struct rps_work;
689 spinlock_t rps_lock;
690 u32 pm_iir;
691
679 u8 cur_delay; 692 u8 cur_delay;
680 u8 min_delay; 693 u8 min_delay;
681 u8 max_delay; 694 u8 max_delay;
@@ -703,8 +716,16 @@ typedef struct drm_i915_private {
703 struct intel_fbdev *fbdev; 716 struct intel_fbdev *fbdev;
704 717
705 struct drm_property *broadcast_rgb_property; 718 struct drm_property *broadcast_rgb_property;
719
720 atomic_t forcewake_count;
706} drm_i915_private_t; 721} drm_i915_private_t;
707 722
723enum i915_cache_level {
724 I915_CACHE_NONE,
725 I915_CACHE_LLC,
726 I915_CACHE_LLC_MLC, /* gen6+ */
727};
728
708struct drm_i915_gem_object { 729struct drm_i915_gem_object {
709 struct drm_gem_object base; 730 struct drm_gem_object base;
710 731
@@ -791,6 +812,8 @@ struct drm_i915_gem_object {
791 unsigned int pending_fenced_gpu_access:1; 812 unsigned int pending_fenced_gpu_access:1;
792 unsigned int fenced_gpu_access:1; 813 unsigned int fenced_gpu_access:1;
793 814
815 unsigned int cache_level:2;
816
794 struct page **pages; 817 struct page **pages;
795 818
796 /** 819 /**
@@ -827,8 +850,6 @@ struct drm_i915_gem_object {
827 /** Record of address bit 17 of each page at last unbind. */ 850 /** Record of address bit 17 of each page at last unbind. */
828 unsigned long *bit_17; 851 unsigned long *bit_17;
829 852
830 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
831 uint32_t agp_type;
832 853
833 /** 854 /**
834 * If present, while GEM_DOMAIN_CPU is in the read domain this array 855 * If present, while GEM_DOMAIN_CPU is in the read domain this array
@@ -915,13 +936,21 @@ enum intel_chip_family {
915#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 936#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
916#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 937#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
917#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 938#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
939#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
918#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 940#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
919 941
942/*
943 * The genX designation typically refers to the render engine, so render
944 * capability related checks should use IS_GEN, while display and other checks
945 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
946 * chips, etc.).
947 */
920#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 948#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
921#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 949#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
922#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 950#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
923#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 951#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
924#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 952#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
953#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
925 954
926#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 955#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
927#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 956#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
@@ -948,8 +977,8 @@ enum intel_chip_family {
948#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 977#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
949#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 978#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
950 979
951#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) 980#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
952#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) 981#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
953 982
954#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 983#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
955#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 984#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -967,6 +996,7 @@ extern unsigned int i915_lvds_downclock;
967extern unsigned int i915_panel_use_ssc; 996extern unsigned int i915_panel_use_ssc;
968extern int i915_vbt_sdvo_panel_type; 997extern int i915_vbt_sdvo_panel_type;
969extern unsigned int i915_enable_rc6; 998extern unsigned int i915_enable_rc6;
999extern unsigned int i915_enable_fbc;
970 1000
971extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1001extern int i915_suspend(struct drm_device *dev, pm_message_t state);
972extern int i915_resume(struct drm_device *dev); 1002extern int i915_resume(struct drm_device *dev);
@@ -1010,12 +1040,27 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
1010extern void i915_driver_irq_preinstall(struct drm_device * dev); 1040extern void i915_driver_irq_preinstall(struct drm_device * dev);
1011extern int i915_driver_irq_postinstall(struct drm_device *dev); 1041extern int i915_driver_irq_postinstall(struct drm_device *dev);
1012extern void i915_driver_irq_uninstall(struct drm_device * dev); 1042extern void i915_driver_irq_uninstall(struct drm_device * dev);
1043
1044extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
1045extern void ironlake_irq_preinstall(struct drm_device *dev);
1046extern int ironlake_irq_postinstall(struct drm_device *dev);
1047extern void ironlake_irq_uninstall(struct drm_device *dev);
1048
1049extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
1050extern void ivybridge_irq_preinstall(struct drm_device *dev);
1051extern int ivybridge_irq_postinstall(struct drm_device *dev);
1052extern void ivybridge_irq_uninstall(struct drm_device *dev);
1053
1013extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1054extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1014 struct drm_file *file_priv); 1055 struct drm_file *file_priv);
1015extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1056extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1016 struct drm_file *file_priv); 1057 struct drm_file *file_priv);
1017extern int i915_enable_vblank(struct drm_device *dev, int crtc); 1058extern int i915_enable_vblank(struct drm_device *dev, int crtc);
1018extern void i915_disable_vblank(struct drm_device *dev, int crtc); 1059extern void i915_disable_vblank(struct drm_device *dev, int crtc);
1060extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
1061extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
1062extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
1063extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
1019extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 1064extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
1020extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); 1065extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
1021extern int i915_vblank_swap(struct drm_device *dev, void *data, 1066extern int i915_vblank_swap(struct drm_device *dev, void *data,
@@ -1265,6 +1310,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1265 1310
1266/* modesetting */ 1311/* modesetting */
1267extern void intel_modeset_init(struct drm_device *dev); 1312extern void intel_modeset_init(struct drm_device *dev);
1313extern void intel_modeset_gem_init(struct drm_device *dev);
1268extern void intel_modeset_cleanup(struct drm_device *dev); 1314extern void intel_modeset_cleanup(struct drm_device *dev);
1269extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1315extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1270extern void i8xx_disable_fbc(struct drm_device *dev); 1316extern void i8xx_disable_fbc(struct drm_device *dev);
@@ -1312,13 +1358,34 @@ extern void intel_display_print_error_state(struct seq_file *m,
1312 LOCK_TEST_WITH_RETURN(dev, file); \ 1358 LOCK_TEST_WITH_RETURN(dev, file); \
1313} while (0) 1359} while (0)
1314 1360
1361/* On SNB platform, before reading ring registers forcewake bit
1362 * must be set to prevent GT core from power down and stale values being
1363 * returned.
1364 */
1365void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1366void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1367void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1368
1369/* We give fast paths for the really cool registers */
1370#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1371 (((dev_priv)->info->gen >= 6) && \
1372 ((reg) < 0x40000) && \
1373 ((reg) != FORCEWAKE))
1315 1374
1316#define __i915_read(x, y) \ 1375#define __i915_read(x, y) \
1317static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1376static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1318 u##x val = read##y(dev_priv->regs + reg); \ 1377 u##x val = 0; \
1378 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1379 gen6_gt_force_wake_get(dev_priv); \
1380 val = read##y(dev_priv->regs + reg); \
1381 gen6_gt_force_wake_put(dev_priv); \
1382 } else { \
1383 val = read##y(dev_priv->regs + reg); \
1384 } \
1319 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 1385 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1320 return val; \ 1386 return val; \
1321} 1387}
1388
1322__i915_read(8, b) 1389__i915_read(8, b)
1323__i915_read(16, w) 1390__i915_read(16, w)
1324__i915_read(32, l) 1391__i915_read(32, l)
@@ -1328,6 +1395,9 @@ __i915_read(64, q)
1328#define __i915_write(x, y) \ 1395#define __i915_write(x, y) \
1329static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 1396static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1330 trace_i915_reg_rw(true, reg, val, sizeof(val)); \ 1397 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1398 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1399 __gen6_gt_wait_for_fifo(dev_priv); \
1400 } \
1331 write##y(val, dev_priv->regs + reg); \ 1401 write##y(val, dev_priv->regs + reg); \
1332} 1402}
1333__i915_write(8, b) 1403__i915_write(8, b)
@@ -1356,33 +1426,4 @@ __i915_write(64, q)
1356#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1426#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1357 1427
1358 1428
1359/* On SNB platform, before reading ring registers forcewake bit
1360 * must be set to prevent GT core from power down and stale values being
1361 * returned.
1362 */
1363void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1364void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1365void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1366
1367static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1368{
1369 u32 val;
1370
1371 if (dev_priv->info->gen >= 6) {
1372 __gen6_gt_force_wake_get(dev_priv);
1373 val = I915_READ(reg);
1374 __gen6_gt_force_wake_put(dev_priv);
1375 } else
1376 val = I915_READ(reg);
1377
1378 return val;
1379}
1380
1381static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1382 u32 reg, u32 val)
1383{
1384 if (dev_priv->info->gen >= 6)
1385 __gen6_gt_wait_for_fifo(dev_priv);
1386 I915_WRITE(reg, val);
1387}
1388#endif 1429#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7ce3f353af33..c6289034e29a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2673,6 +2673,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2673update: 2673update:
2674 obj->tiling_changed = false; 2674 obj->tiling_changed = false;
2675 switch (INTEL_INFO(dev)->gen) { 2675 switch (INTEL_INFO(dev)->gen) {
2676 case 7:
2676 case 6: 2677 case 6:
2677 ret = sandybridge_write_fence_reg(obj, pipelined); 2678 ret = sandybridge_write_fence_reg(obj, pipelined);
2678 break; 2679 break;
@@ -2706,6 +2707,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
2706 uint32_t fence_reg = reg - dev_priv->fence_regs; 2707 uint32_t fence_reg = reg - dev_priv->fence_regs;
2707 2708
2708 switch (INTEL_INFO(dev)->gen) { 2709 switch (INTEL_INFO(dev)->gen) {
2710 case 7:
2709 case 6: 2711 case 6:
2710 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); 2712 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2711 break; 2713 break;
@@ -2878,6 +2880,17 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2878 if (obj->pages == NULL) 2880 if (obj->pages == NULL)
2879 return; 2881 return;
2880 2882
2883 /* If the GPU is snooping the contents of the CPU cache,
2884 * we do not need to manually clear the CPU cache lines. However,
2885 * the caches are only snooped when the render cache is
2886 * flushed/invalidated. As we always have to emit invalidations
2887 * and flushes when moving into and out of the RENDER domain, correct
2888 * snooping behaviour occurs naturally as the result of our domain
2889 * tracking.
2890 */
2891 if (obj->cache_level != I915_CACHE_NONE)
2892 return;
2893
2881 trace_i915_gem_object_clflush(obj); 2894 trace_i915_gem_object_clflush(obj);
2882 2895
2883 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); 2896 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
@@ -3569,7 +3582,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3569 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3582 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3570 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3583 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3571 3584
3572 obj->agp_type = AGP_USER_MEMORY; 3585 obj->cache_level = I915_CACHE_NONE;
3573 obj->base.driver_private = NULL; 3586 obj->base.driver_private = NULL;
3574 obj->fence_reg = I915_FENCE_REG_NONE; 3587 obj->fence_reg = I915_FENCE_REG_NONE;
3575 INIT_LIST_HEAD(&obj->mm_list); 3588 INIT_LIST_HEAD(&obj->mm_list);
@@ -3845,25 +3858,10 @@ i915_gem_load(struct drm_device *dev)
3845 dev_priv->num_fence_regs = 8; 3858 dev_priv->num_fence_regs = 8;
3846 3859
3847 /* Initialize fence registers to zero */ 3860 /* Initialize fence registers to zero */
3848 switch (INTEL_INFO(dev)->gen) { 3861 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3849 case 6: 3862 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3850 for (i = 0; i < 16; i++)
3851 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3852 break;
3853 case 5:
3854 case 4:
3855 for (i = 0; i < 16; i++)
3856 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
3857 break;
3858 case 3:
3859 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3860 for (i = 0; i < 8; i++)
3861 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
3862 case 2:
3863 for (i = 0; i < 8; i++)
3864 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3865 break;
3866 } 3863 }
3864
3867 i915_gem_detect_bit_6_swizzle(dev); 3865 i915_gem_detect_bit_6_swizzle(dev);
3868 init_waitqueue_head(&dev_priv->pending_flip_queue); 3866 init_waitqueue_head(&dev_priv->pending_flip_queue);
3869 3867
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b0abdc64aa9f..e46b645773cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -29,6 +29,26 @@
29#include "i915_trace.h" 29#include "i915_trace.h"
30#include "intel_drv.h" 30#include "intel_drv.h"
31 31
32/* XXX kill agp_type! */
33static unsigned int cache_level_to_agp_type(struct drm_device *dev,
34 enum i915_cache_level cache_level)
35{
36 switch (cache_level) {
37 case I915_CACHE_LLC_MLC:
38 if (INTEL_INFO(dev)->gen >= 6)
39 return AGP_USER_CACHED_MEMORY_LLC_MLC;
40 /* Older chipsets do not have this extra level of CPU
41 * cacheing, so fallthrough and request the PTE simply
42 * as cached.
43 */
44 case I915_CACHE_LLC:
45 return AGP_USER_CACHED_MEMORY;
46 default:
47 case I915_CACHE_NONE:
48 return AGP_USER_MEMORY;
49 }
50}
51
32void i915_gem_restore_gtt_mappings(struct drm_device *dev) 52void i915_gem_restore_gtt_mappings(struct drm_device *dev)
33{ 53{
34 struct drm_i915_private *dev_priv = dev->dev_private; 54 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -39,6 +59,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40 60
41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
62 unsigned int agp_type =
63 cache_level_to_agp_type(dev, obj->cache_level);
64
42 i915_gem_clflush_object(obj); 65 i915_gem_clflush_object(obj);
43 66
44 if (dev_priv->mm.gtt->needs_dmar) { 67 if (dev_priv->mm.gtt->needs_dmar) {
@@ -46,15 +69,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
46 69
47 intel_gtt_insert_sg_entries(obj->sg_list, 70 intel_gtt_insert_sg_entries(obj->sg_list,
48 obj->num_sg, 71 obj->num_sg,
49 obj->gtt_space->start 72 obj->gtt_space->start >> PAGE_SHIFT,
50 >> PAGE_SHIFT, 73 agp_type);
51 obj->agp_type);
52 } else 74 } else
53 intel_gtt_insert_pages(obj->gtt_space->start 75 intel_gtt_insert_pages(obj->gtt_space->start
54 >> PAGE_SHIFT, 76 >> PAGE_SHIFT,
55 obj->base.size >> PAGE_SHIFT, 77 obj->base.size >> PAGE_SHIFT,
56 obj->pages, 78 obj->pages,
57 obj->agp_type); 79 agp_type);
58 } 80 }
59 81
60 intel_gtt_chipset_flush(); 82 intel_gtt_chipset_flush();
@@ -64,6 +86,7 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
64{ 86{
65 struct drm_device *dev = obj->base.dev; 87 struct drm_device *dev = obj->base.dev;
66 struct drm_i915_private *dev_priv = dev->dev_private; 88 struct drm_i915_private *dev_priv = dev->dev_private;
89 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
67 int ret; 90 int ret;
68 91
69 if (dev_priv->mm.gtt->needs_dmar) { 92 if (dev_priv->mm.gtt->needs_dmar) {
@@ -77,12 +100,12 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
77 intel_gtt_insert_sg_entries(obj->sg_list, 100 intel_gtt_insert_sg_entries(obj->sg_list,
78 obj->num_sg, 101 obj->num_sg,
79 obj->gtt_space->start >> PAGE_SHIFT, 102 obj->gtt_space->start >> PAGE_SHIFT,
80 obj->agp_type); 103 agp_type);
81 } else 104 } else
82 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, 105 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
83 obj->base.size >> PAGE_SHIFT, 106 obj->base.size >> PAGE_SHIFT,
84 obj->pages, 107 obj->pages,
85 obj->agp_type); 108 agp_type);
86 109
87 return 0; 110 return 0;
88} 111}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 281ad3d6115d..82d70fd9e933 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
94 94
95 if (IS_GEN5(dev) || IS_GEN6(dev)) { 95 if (INTEL_INFO(dev)->gen >= 5) {
96 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
97 * same swizzling setup. 97 * same swizzling setup.
98 */ 98 */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 188b497e5076..b79619a7b788 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -367,22 +367,30 @@ static void notify_ring(struct drm_device *dev,
367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
368} 368}
369 369
370static void gen6_pm_irq_handler(struct drm_device *dev) 370static void gen6_pm_rps_work(struct work_struct *work)
371{ 371{
372 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 372 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
373 rps_work);
373 u8 new_delay = dev_priv->cur_delay; 374 u8 new_delay = dev_priv->cur_delay;
374 u32 pm_iir; 375 u32 pm_iir, pm_imr;
376
377 spin_lock_irq(&dev_priv->rps_lock);
378 pm_iir = dev_priv->pm_iir;
379 dev_priv->pm_iir = 0;
380 pm_imr = I915_READ(GEN6_PMIMR);
381 spin_unlock_irq(&dev_priv->rps_lock);
375 382
376 pm_iir = I915_READ(GEN6_PMIIR);
377 if (!pm_iir) 383 if (!pm_iir)
378 return; 384 return;
379 385
386 mutex_lock(&dev_priv->dev->struct_mutex);
380 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 387 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
381 if (dev_priv->cur_delay != dev_priv->max_delay) 388 if (dev_priv->cur_delay != dev_priv->max_delay)
382 new_delay = dev_priv->cur_delay + 1; 389 new_delay = dev_priv->cur_delay + 1;
383 if (new_delay > dev_priv->max_delay) 390 if (new_delay > dev_priv->max_delay)
384 new_delay = dev_priv->max_delay; 391 new_delay = dev_priv->max_delay;
385 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { 392 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
393 gen6_gt_force_wake_get(dev_priv);
386 if (dev_priv->cur_delay != dev_priv->min_delay) 394 if (dev_priv->cur_delay != dev_priv->min_delay)
387 new_delay = dev_priv->cur_delay - 1; 395 new_delay = dev_priv->cur_delay - 1;
388 if (new_delay < dev_priv->min_delay) { 396 if (new_delay < dev_priv->min_delay) {
@@ -396,13 +404,19 @@ static void gen6_pm_irq_handler(struct drm_device *dev)
396 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 404 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
397 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); 405 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
398 } 406 }
399 407 gen6_gt_force_wake_put(dev_priv);
400 } 408 }
401 409
402 gen6_set_rps(dev, new_delay); 410 gen6_set_rps(dev_priv->dev, new_delay);
403 dev_priv->cur_delay = new_delay; 411 dev_priv->cur_delay = new_delay;
404 412
405 I915_WRITE(GEN6_PMIIR, pm_iir); 413 /*
414 * rps_lock not held here because clearing is non-destructive. There is
415 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
416 * by holding struct_mutex for the duration of the write.
417 */
418 I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
419 mutex_unlock(&dev_priv->dev->struct_mutex);
406} 420}
407 421
408static void pch_irq_handler(struct drm_device *dev) 422static void pch_irq_handler(struct drm_device *dev)
@@ -448,8 +462,97 @@ static void pch_irq_handler(struct drm_device *dev)
448 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 462 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
449} 463}
450 464
451static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 465irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
466{
467 struct drm_device *dev = (struct drm_device *) arg;
468 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
469 int ret = IRQ_NONE;
470 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
471 struct drm_i915_master_private *master_priv;
472
473 atomic_inc(&dev_priv->irq_received);
474
475 /* disable master interrupt before clearing iir */
476 de_ier = I915_READ(DEIER);
477 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
478 POSTING_READ(DEIER);
479
480 de_iir = I915_READ(DEIIR);
481 gt_iir = I915_READ(GTIIR);
482 pch_iir = I915_READ(SDEIIR);
483 pm_iir = I915_READ(GEN6_PMIIR);
484
485 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
486 goto done;
487
488 ret = IRQ_HANDLED;
489
490 if (dev->primary->master) {
491 master_priv = dev->primary->master->driver_priv;
492 if (master_priv->sarea_priv)
493 master_priv->sarea_priv->last_dispatch =
494 READ_BREADCRUMB(dev_priv);
495 }
496
497 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
498 notify_ring(dev, &dev_priv->ring[RCS]);
499 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
500 notify_ring(dev, &dev_priv->ring[VCS]);
501 if (gt_iir & GT_BLT_USER_INTERRUPT)
502 notify_ring(dev, &dev_priv->ring[BCS]);
503
504 if (de_iir & DE_GSE_IVB)
505 intel_opregion_gse_intr(dev);
506
507 if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
508 intel_prepare_page_flip(dev, 0);
509 intel_finish_page_flip_plane(dev, 0);
510 }
511
512 if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
513 intel_prepare_page_flip(dev, 1);
514 intel_finish_page_flip_plane(dev, 1);
515 }
516
517 if (de_iir & DE_PIPEA_VBLANK_IVB)
518 drm_handle_vblank(dev, 0);
519
520 if (de_iir & DE_PIPEB_VBLANK_IVB);
521 drm_handle_vblank(dev, 1);
522
523 /* check event from PCH */
524 if (de_iir & DE_PCH_EVENT_IVB) {
525 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
526 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
527 pch_irq_handler(dev);
528 }
529
530 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
531 unsigned long flags;
532 spin_lock_irqsave(&dev_priv->rps_lock, flags);
533 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
534 I915_WRITE(GEN6_PMIMR, pm_iir);
535 dev_priv->pm_iir |= pm_iir;
536 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
537 queue_work(dev_priv->wq, &dev_priv->rps_work);
538 }
539
540 /* should clear PCH hotplug event before clear CPU irq */
541 I915_WRITE(SDEIIR, pch_iir);
542 I915_WRITE(GTIIR, gt_iir);
543 I915_WRITE(DEIIR, de_iir);
544 I915_WRITE(GEN6_PMIIR, pm_iir);
545
546done:
547 I915_WRITE(DEIER, de_ier);
548 POSTING_READ(DEIER);
549
550 return ret;
551}
552
553irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
452{ 554{
555 struct drm_device *dev = (struct drm_device *) arg;
453 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 556 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
454 int ret = IRQ_NONE; 557 int ret = IRQ_NONE;
455 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 558 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
@@ -457,6 +560,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
457 struct drm_i915_master_private *master_priv; 560 struct drm_i915_master_private *master_priv;
458 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 561 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
459 562
563 atomic_inc(&dev_priv->irq_received);
564
460 if (IS_GEN6(dev)) 565 if (IS_GEN6(dev))
461 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; 566 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
462 567
@@ -526,13 +631,30 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
526 i915_handle_rps_change(dev); 631 i915_handle_rps_change(dev);
527 } 632 }
528 633
529 if (IS_GEN6(dev)) 634 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
530 gen6_pm_irq_handler(dev); 635 /*
636 * IIR bits should never already be set because IMR should
637 * prevent an interrupt from being shown in IIR. The warning
638 * displays a case where we've unsafely cleared
639 * dev_priv->pm_iir. Although missing an interrupt of the same
640 * type is not a problem, it displays a problem in the logic.
641 *
642 * The mask bit in IMR is cleared by rps_work.
643 */
644 unsigned long flags;
645 spin_lock_irqsave(&dev_priv->rps_lock, flags);
646 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
647 I915_WRITE(GEN6_PMIMR, pm_iir);
648 dev_priv->pm_iir |= pm_iir;
649 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
650 queue_work(dev_priv->wq, &dev_priv->rps_work);
651 }
531 652
532 /* should clear PCH hotplug event before clear CPU irq */ 653 /* should clear PCH hotplug event before clear CPU irq */
533 I915_WRITE(SDEIIR, pch_iir); 654 I915_WRITE(SDEIIR, pch_iir);
534 I915_WRITE(GTIIR, gt_iir); 655 I915_WRITE(GTIIR, gt_iir);
535 I915_WRITE(DEIIR, de_iir); 656 I915_WRITE(DEIIR, de_iir);
657 I915_WRITE(GEN6_PMIIR, pm_iir);
536 658
537done: 659done:
538 I915_WRITE(DEIER, de_ier); 660 I915_WRITE(DEIER, de_ier);
@@ -676,7 +798,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
676 err->dirty = obj->dirty; 798 err->dirty = obj->dirty;
677 err->purgeable = obj->madv != I915_MADV_WILLNEED; 799 err->purgeable = obj->madv != I915_MADV_WILLNEED;
678 err->ring = obj->ring ? obj->ring->id : 0; 800 err->ring = obj->ring ? obj->ring->id : 0;
679 err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; 801 err->cache_level = obj->cache_level;
680 802
681 if (++i == count) 803 if (++i == count)
682 break; 804 break;
@@ -1103,9 +1225,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1103 1225
1104 atomic_inc(&dev_priv->irq_received); 1226 atomic_inc(&dev_priv->irq_received);
1105 1227
1106 if (HAS_PCH_SPLIT(dev))
1107 return ironlake_irq_handler(dev);
1108
1109 iir = I915_READ(IIR); 1228 iir = I915_READ(IIR);
1110 1229
1111 if (INTEL_INFO(dev)->gen >= 4) 1230 if (INTEL_INFO(dev)->gen >= 4)
@@ -1344,10 +1463,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1344 return -EINVAL; 1463 return -EINVAL;
1345 1464
1346 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1465 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1347 if (HAS_PCH_SPLIT(dev)) 1466 if (INTEL_INFO(dev)->gen >= 4)
1348 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1349 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1350 else if (INTEL_INFO(dev)->gen >= 4)
1351 i915_enable_pipestat(dev_priv, pipe, 1467 i915_enable_pipestat(dev_priv, pipe,
1352 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1468 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1353 else 1469 else
@@ -1362,6 +1478,38 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1362 return 0; 1478 return 0;
1363} 1479}
1364 1480
1481int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1482{
1483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1484 unsigned long irqflags;
1485
1486 if (!i915_pipe_enabled(dev, pipe))
1487 return -EINVAL;
1488
1489 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1490 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1491 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1492 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1493
1494 return 0;
1495}
1496
1497int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1498{
1499 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1500 unsigned long irqflags;
1501
1502 if (!i915_pipe_enabled(dev, pipe))
1503 return -EINVAL;
1504
1505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1506 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1507 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1508 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1509
1510 return 0;
1511}
1512
1365/* Called from drm generic code, passed 'crtc' which 1513/* Called from drm generic code, passed 'crtc' which
1366 * we use as a pipe index 1514 * we use as a pipe index
1367 */ 1515 */
@@ -1375,13 +1523,31 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1375 I915_WRITE(INSTPM, 1523 I915_WRITE(INSTPM,
1376 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1524 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1377 1525
1378 if (HAS_PCH_SPLIT(dev)) 1526 i915_disable_pipestat(dev_priv, pipe,
1379 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1527 PIPE_VBLANK_INTERRUPT_ENABLE |
1380 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1528 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1381 else 1529 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1382 i915_disable_pipestat(dev_priv, pipe, 1530}
1383 PIPE_VBLANK_INTERRUPT_ENABLE | 1531
1384 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1532void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1533{
1534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1535 unsigned long irqflags;
1536
1537 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1538 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1539 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1540 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1541}
1542
1543void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1544{
1545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1546 unsigned long irqflags;
1547
1548 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1549 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1550 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
1385 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1551 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1386} 1552}
1387 1553
@@ -1562,10 +1728,17 @@ repeat:
1562 1728
1563/* drm_dma.h hooks 1729/* drm_dma.h hooks
1564*/ 1730*/
1565static void ironlake_irq_preinstall(struct drm_device *dev) 1731void ironlake_irq_preinstall(struct drm_device *dev)
1566{ 1732{
1567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1733 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1568 1734
1735 atomic_set(&dev_priv->irq_received, 0);
1736
1737 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1738 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1739 if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1741
1569 I915_WRITE(HWSTAM, 0xeffe); 1742 I915_WRITE(HWSTAM, 0xeffe);
1570 1743
1571 /* XXX hotplug from PCH */ 1744 /* XXX hotplug from PCH */
@@ -1585,7 +1758,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1585 POSTING_READ(SDEIER); 1758 POSTING_READ(SDEIER);
1586} 1759}
1587 1760
1588static int ironlake_irq_postinstall(struct drm_device *dev) 1761int ironlake_irq_postinstall(struct drm_device *dev)
1589{ 1762{
1590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1763 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1591 /* enable kind of interrupts always enabled */ 1764 /* enable kind of interrupts always enabled */
@@ -1594,6 +1767,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1594 u32 render_irqs; 1767 u32 render_irqs;
1595 u32 hotplug_mask; 1768 u32 hotplug_mask;
1596 1769
1770 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1771 if (HAS_BSD(dev))
1772 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1773 if (HAS_BLT(dev))
1774 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1775
1776 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1597 dev_priv->irq_mask = ~display_mask; 1777 dev_priv->irq_mask = ~display_mask;
1598 1778
1599 /* should always can generate irq */ 1779 /* should always can generate irq */
@@ -1650,6 +1830,56 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1650 return 0; 1830 return 0;
1651} 1831}
1652 1832
1833int ivybridge_irq_postinstall(struct drm_device *dev)
1834{
1835 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1836 /* enable kind of interrupts always enabled */
1837 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1838 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
1839 DE_PLANEB_FLIP_DONE_IVB;
1840 u32 render_irqs;
1841 u32 hotplug_mask;
1842
1843 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1844 if (HAS_BSD(dev))
1845 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1846 if (HAS_BLT(dev))
1847 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1848
1849 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1850 dev_priv->irq_mask = ~display_mask;
1851
1852 /* should always can generate irq */
1853 I915_WRITE(DEIIR, I915_READ(DEIIR));
1854 I915_WRITE(DEIMR, dev_priv->irq_mask);
1855 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
1856 DE_PIPEB_VBLANK_IVB);
1857 POSTING_READ(DEIER);
1858
1859 dev_priv->gt_irq_mask = ~0;
1860
1861 I915_WRITE(GTIIR, I915_READ(GTIIR));
1862 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1863
1864 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
1865 GT_BLT_USER_INTERRUPT;
1866 I915_WRITE(GTIER, render_irqs);
1867 POSTING_READ(GTIER);
1868
1869 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1870 SDE_PORTB_HOTPLUG_CPT |
1871 SDE_PORTC_HOTPLUG_CPT |
1872 SDE_PORTD_HOTPLUG_CPT);
1873 dev_priv->pch_irq_mask = ~hotplug_mask;
1874
1875 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1876 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1877 I915_WRITE(SDEIER, hotplug_mask);
1878 POSTING_READ(SDEIER);
1879
1880 return 0;
1881}
1882
1653void i915_driver_irq_preinstall(struct drm_device * dev) 1883void i915_driver_irq_preinstall(struct drm_device * dev)
1654{ 1884{
1655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1885 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1660,11 +1890,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1660 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1890 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1661 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1891 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1662 1892
1663 if (HAS_PCH_SPLIT(dev)) {
1664 ironlake_irq_preinstall(dev);
1665 return;
1666 }
1667
1668 if (I915_HAS_HOTPLUG(dev)) { 1893 if (I915_HAS_HOTPLUG(dev)) {
1669 I915_WRITE(PORT_HOTPLUG_EN, 0); 1894 I915_WRITE(PORT_HOTPLUG_EN, 0);
1670 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1895 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -1688,17 +1913,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1688 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1913 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1689 u32 error_mask; 1914 u32 error_mask;
1690 1915
1691 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1692 if (HAS_BSD(dev))
1693 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1694 if (HAS_BLT(dev))
1695 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1696
1697 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1916 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1698 1917
1699 if (HAS_PCH_SPLIT(dev))
1700 return ironlake_irq_postinstall(dev);
1701
1702 /* Unmask the interrupts that we always want on. */ 1918 /* Unmask the interrupts that we always want on. */
1703 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 1919 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1704 1920
@@ -1767,9 +1983,15 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1767 return 0; 1983 return 0;
1768} 1984}
1769 1985
1770static void ironlake_irq_uninstall(struct drm_device *dev) 1986void ironlake_irq_uninstall(struct drm_device *dev)
1771{ 1987{
1772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1988 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1989
1990 if (!dev_priv)
1991 return;
1992
1993 dev_priv->vblank_pipe = 0;
1994
1773 I915_WRITE(HWSTAM, 0xffffffff); 1995 I915_WRITE(HWSTAM, 0xffffffff);
1774 1996
1775 I915_WRITE(DEIMR, 0xffffffff); 1997 I915_WRITE(DEIMR, 0xffffffff);
@@ -1791,11 +2013,6 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1791 2013
1792 dev_priv->vblank_pipe = 0; 2014 dev_priv->vblank_pipe = 0;
1793 2015
1794 if (HAS_PCH_SPLIT(dev)) {
1795 ironlake_irq_uninstall(dev);
1796 return;
1797 }
1798
1799 if (I915_HAS_HOTPLUG(dev)) { 2016 if (I915_HAS_HOTPLUG(dev)) {
1800 I915_WRITE(PORT_HOTPLUG_EN, 0); 2017 I915_WRITE(PORT_HOTPLUG_EN, 0);
1801 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2018 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f39ac3a0fa93..2f967af8e62e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -291,6 +291,9 @@
291#define RING_MAX_IDLE(base) ((base)+0x54) 291#define RING_MAX_IDLE(base) ((base)+0x54)
292#define RING_HWS_PGA(base) ((base)+0x80) 292#define RING_HWS_PGA(base) ((base)+0x80)
293#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 293#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
294#define RENDER_HWS_PGA_GEN7 (0x04080)
295#define BSD_HWS_PGA_GEN7 (0x04180)
296#define BLT_HWS_PGA_GEN7 (0x04280)
294#define RING_ACTHD(base) ((base)+0x74) 297#define RING_ACTHD(base) ((base)+0x74)
295#define RING_NOPID(base) ((base)+0x94) 298#define RING_NOPID(base) ((base)+0x94)
296#define RING_IMR(base) ((base)+0xa8) 299#define RING_IMR(base) ((base)+0xa8)
@@ -2778,6 +2781,19 @@
2778#define DE_PIPEA_VSYNC (1 << 3) 2781#define DE_PIPEA_VSYNC (1 << 3)
2779#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 2782#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
2780 2783
2784/* More Ivybridge lolz */
2785#define DE_ERR_DEBUG_IVB (1<<30)
2786#define DE_GSE_IVB (1<<29)
2787#define DE_PCH_EVENT_IVB (1<<28)
2788#define DE_DP_A_HOTPLUG_IVB (1<<27)
2789#define DE_AUX_CHANNEL_A_IVB (1<<26)
2790#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
2791#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
2792#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
2793#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
2794#define DE_PIPEB_VBLANK_IVB (1<<5)
2795#define DE_PIPEA_VBLANK_IVB (1<<0)
2796
2781#define DEISR 0x44000 2797#define DEISR 0x44000
2782#define DEIMR 0x44004 2798#define DEIMR 0x44004
2783#define DEIIR 0x44008 2799#define DEIIR 0x44008
@@ -2809,6 +2825,7 @@
2809#define ILK_eDP_A_DISABLE (1<<24) 2825#define ILK_eDP_A_DISABLE (1<<24)
2810#define ILK_DESKTOP (1<<23) 2826#define ILK_DESKTOP (1<<23)
2811#define ILK_DSPCLK_GATE 0x42020 2827#define ILK_DSPCLK_GATE 0x42020
2828#define IVB_VRHUNIT_CLK_GATE (1<<28)
2812#define ILK_DPARB_CLK_GATE (1<<5) 2829#define ILK_DPARB_CLK_GATE (1<<5)
2813#define ILK_DPFD_CLK_GATE (1<<7) 2830#define ILK_DPFD_CLK_GATE (1<<7)
2814 2831
@@ -3057,6 +3074,9 @@
3057#define TRANS_6BPC (2<<5) 3074#define TRANS_6BPC (2<<5)
3058#define TRANS_12BPC (3<<5) 3075#define TRANS_12BPC (3<<5)
3059 3076
3077#define SOUTH_CHICKEN2 0xc2004
3078#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3079
3060#define _FDI_RXA_CHICKEN 0xc200c 3080#define _FDI_RXA_CHICKEN 0xc200c
3061#define _FDI_RXB_CHICKEN 0xc2010 3081#define _FDI_RXB_CHICKEN 0xc2010
3062#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) 3082#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
@@ -3104,7 +3124,15 @@
3104#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 3124#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
3105/* Ironlake: hardwired to 1 */ 3125/* Ironlake: hardwired to 1 */
3106#define FDI_TX_PLL_ENABLE (1<<14) 3126#define FDI_TX_PLL_ENABLE (1<<14)
3127
3128/* Ivybridge has different bits for lolz */
3129#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
3130#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
3131#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
3132#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
3133
3107/* both Tx and Rx */ 3134/* both Tx and Rx */
3135#define FDI_LINK_TRAIN_AUTO (1<<10)
3108#define FDI_SCRAMBLING_ENABLE (0<<7) 3136#define FDI_SCRAMBLING_ENABLE (0<<7)
3109#define FDI_SCRAMBLING_DISABLE (1<<7) 3137#define FDI_SCRAMBLING_DISABLE (1<<7)
3110 3138
@@ -3114,6 +3142,8 @@
3114#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 3142#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
3115#define FDI_RX_ENABLE (1<<31) 3143#define FDI_RX_ENABLE (1<<31)
3116/* train, dp width same as FDI_TX */ 3144/* train, dp width same as FDI_TX */
3145#define FDI_FS_ERRC_ENABLE (1<<27)
3146#define FDI_FE_ERRC_ENABLE (1<<26)
3117#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3147#define FDI_DP_PORT_WIDTH_X8 (7<<19)
3118#define FDI_8BPC (0<<16) 3148#define FDI_8BPC (0<<16)
3119#define FDI_10BPC (1<<16) 3149#define FDI_10BPC (1<<16)
@@ -3386,7 +3416,7 @@
3386#define GEN6_PMINTRMSK 0xA168 3416#define GEN6_PMINTRMSK 0xA168
3387 3417
3388#define GEN6_PMISR 0x44020 3418#define GEN6_PMISR 0x44020
3389#define GEN6_PMIMR 0x44024 3419#define GEN6_PMIMR 0x44024 /* rps_lock */
3390#define GEN6_PMIIR 0x44028 3420#define GEN6_PMIIR 0x44028
3391#define GEN6_PMIER 0x4402C 3421#define GEN6_PMIER 0x4402C
3392#define GEN6_PM_MBOX_EVENT (1<<25) 3422#define GEN6_PM_MBOX_EVENT (1<<25)
@@ -3396,6 +3426,9 @@
3396#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) 3426#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
3397#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) 3427#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
3398#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) 3428#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
3429#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
3430 GEN6_PM_RP_DOWN_THRESHOLD | \
3431 GEN6_PM_RP_DOWN_TIMEOUT)
3399 3432
3400#define GEN6_PCODE_MAILBOX 0x138124 3433#define GEN6_PCODE_MAILBOX 0x138124
3401#define GEN6_PCODE_READY (1<<31) 3434#define GEN6_PCODE_READY (1<<31)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index da474153a0a2..60a94d2b5264 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -863,8 +863,7 @@ int i915_restore_state(struct drm_device *dev)
863 I915_WRITE(IMR, dev_priv->saveIMR); 863 I915_WRITE(IMR, dev_priv->saveIMR);
864 } 864 }
865 865
866 /* Clock gating state */ 866 intel_init_clock_gating(dev);
867 intel_enable_clock_gating(dev);
868 867
869 if (IS_IRONLAKE_M(dev)) { 868 if (IS_IRONLAKE_M(dev)) {
870 ironlake_enable_drps(dev); 869 ironlake_enable_drps(dev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fb5b4d426ae0..927442a11925 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -214,9 +214,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
214 i915_lvds_downclock) { 214 i915_lvds_downclock) {
215 dev_priv->lvds_downclock_avail = 1; 215 dev_priv->lvds_downclock_avail = 1;
216 dev_priv->lvds_downclock = temp_downclock; 216 dev_priv->lvds_downclock = temp_downclock;
217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
218 "Normal Clock %dKHz, downclock %dKHz\n", 218 "Normal Clock %dKHz, downclock %dKHz\n",
219 temp_downclock, panel_fixed_mode->clock); 219 temp_downclock, panel_fixed_mode->clock);
220 } 220 }
221 return; 221 return;
222} 222}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index d03fc05b39c0..e93f93cc7e78 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -305,13 +305,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
305} 305}
306 306
307static enum drm_connector_status 307static enum drm_connector_status
308intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) 308intel_crt_load_detect(struct intel_crt *crt)
309{ 309{
310 struct drm_encoder *encoder = &crt->base.base; 310 struct drm_device *dev = crt->base.base.dev;
311 struct drm_device *dev = encoder->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = dev->dev_private;
313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 312 uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
314 uint32_t pipe = intel_crtc->pipe;
315 uint32_t save_bclrpat; 313 uint32_t save_bclrpat;
316 uint32_t save_vtotal; 314 uint32_t save_vtotal;
317 uint32_t vtotal, vactive; 315 uint32_t vtotal, vactive;
@@ -432,7 +430,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
432 struct drm_device *dev = connector->dev; 430 struct drm_device *dev = connector->dev;
433 struct intel_crt *crt = intel_attached_crt(connector); 431 struct intel_crt *crt = intel_attached_crt(connector);
434 struct drm_crtc *crtc; 432 struct drm_crtc *crtc;
435 int dpms_mode;
436 enum drm_connector_status status; 433 enum drm_connector_status status;
437 434
438 if (I915_HAS_HOTPLUG(dev)) { 435 if (I915_HAS_HOTPLUG(dev)) {
@@ -454,17 +451,18 @@ intel_crt_detect(struct drm_connector *connector, bool force)
454 /* for pre-945g platforms use load detect */ 451 /* for pre-945g platforms use load detect */
455 crtc = crt->base.base.crtc; 452 crtc = crt->base.base.crtc;
456 if (crtc && crtc->enabled) { 453 if (crtc && crtc->enabled) {
457 status = intel_crt_load_detect(crtc, crt); 454 status = intel_crt_load_detect(crt);
458 } else { 455 } else {
459 crtc = intel_get_load_detect_pipe(&crt->base, connector, 456 struct intel_load_detect_pipe tmp;
460 NULL, &dpms_mode); 457
461 if (crtc) { 458 if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
459 &tmp)) {
462 if (intel_crt_detect_ddc(connector)) 460 if (intel_crt_detect_ddc(connector))
463 status = connector_status_connected; 461 status = connector_status_connected;
464 else 462 else
465 status = intel_crt_load_detect(crtc, crt); 463 status = intel_crt_load_detect(crt);
466 intel_release_load_detect_pipe(&crt->base, 464 intel_release_load_detect_pipe(&crt->base, connector,
467 connector, dpms_mode); 465 &tmp);
468 } else 466 } else
469 status = connector_status_unknown; 467 status = connector_status_unknown;
470 } 468 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2166ee071ddb..f553ddfdc168 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -76,255 +76,6 @@ struct intel_limit {
76 int, int, intel_clock_t *); 76 int, int, intel_clock_t *);
77}; 77};
78 78
79#define I8XX_DOT_MIN 25000
80#define I8XX_DOT_MAX 350000
81#define I8XX_VCO_MIN 930000
82#define I8XX_VCO_MAX 1400000
83#define I8XX_N_MIN 3
84#define I8XX_N_MAX 16
85#define I8XX_M_MIN 96
86#define I8XX_M_MAX 140
87#define I8XX_M1_MIN 18
88#define I8XX_M1_MAX 26
89#define I8XX_M2_MIN 6
90#define I8XX_M2_MAX 16
91#define I8XX_P_MIN 4
92#define I8XX_P_MAX 128
93#define I8XX_P1_MIN 2
94#define I8XX_P1_MAX 33
95#define I8XX_P1_LVDS_MIN 1
96#define I8XX_P1_LVDS_MAX 6
97#define I8XX_P2_SLOW 4
98#define I8XX_P2_FAST 2
99#define I8XX_P2_LVDS_SLOW 14
100#define I8XX_P2_LVDS_FAST 7
101#define I8XX_P2_SLOW_LIMIT 165000
102
103#define I9XX_DOT_MIN 20000
104#define I9XX_DOT_MAX 400000
105#define I9XX_VCO_MIN 1400000
106#define I9XX_VCO_MAX 2800000
107#define PINEVIEW_VCO_MIN 1700000
108#define PINEVIEW_VCO_MAX 3500000
109#define I9XX_N_MIN 1
110#define I9XX_N_MAX 6
111/* Pineview's Ncounter is a ring counter */
112#define PINEVIEW_N_MIN 3
113#define PINEVIEW_N_MAX 6
114#define I9XX_M_MIN 70
115#define I9XX_M_MAX 120
116#define PINEVIEW_M_MIN 2
117#define PINEVIEW_M_MAX 256
118#define I9XX_M1_MIN 10
119#define I9XX_M1_MAX 22
120#define I9XX_M2_MIN 5
121#define I9XX_M2_MAX 9
122/* Pineview M1 is reserved, and must be 0 */
123#define PINEVIEW_M1_MIN 0
124#define PINEVIEW_M1_MAX 0
125#define PINEVIEW_M2_MIN 0
126#define PINEVIEW_M2_MAX 254
127#define I9XX_P_SDVO_DAC_MIN 5
128#define I9XX_P_SDVO_DAC_MAX 80
129#define I9XX_P_LVDS_MIN 7
130#define I9XX_P_LVDS_MAX 98
131#define PINEVIEW_P_LVDS_MIN 7
132#define PINEVIEW_P_LVDS_MAX 112
133#define I9XX_P1_MIN 1
134#define I9XX_P1_MAX 8
135#define I9XX_P2_SDVO_DAC_SLOW 10
136#define I9XX_P2_SDVO_DAC_FAST 5
137#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
138#define I9XX_P2_LVDS_SLOW 14
139#define I9XX_P2_LVDS_FAST 7
140#define I9XX_P2_LVDS_SLOW_LIMIT 112000
141
142/*The parameter is for SDVO on G4x platform*/
143#define G4X_DOT_SDVO_MIN 25000
144#define G4X_DOT_SDVO_MAX 270000
145#define G4X_VCO_MIN 1750000
146#define G4X_VCO_MAX 3500000
147#define G4X_N_SDVO_MIN 1
148#define G4X_N_SDVO_MAX 4
149#define G4X_M_SDVO_MIN 104
150#define G4X_M_SDVO_MAX 138
151#define G4X_M1_SDVO_MIN 17
152#define G4X_M1_SDVO_MAX 23
153#define G4X_M2_SDVO_MIN 5
154#define G4X_M2_SDVO_MAX 11
155#define G4X_P_SDVO_MIN 10
156#define G4X_P_SDVO_MAX 30
157#define G4X_P1_SDVO_MIN 1
158#define G4X_P1_SDVO_MAX 3
159#define G4X_P2_SDVO_SLOW 10
160#define G4X_P2_SDVO_FAST 10
161#define G4X_P2_SDVO_LIMIT 270000
162
163/*The parameter is for HDMI_DAC on G4x platform*/
164#define G4X_DOT_HDMI_DAC_MIN 22000
165#define G4X_DOT_HDMI_DAC_MAX 400000
166#define G4X_N_HDMI_DAC_MIN 1
167#define G4X_N_HDMI_DAC_MAX 4
168#define G4X_M_HDMI_DAC_MIN 104
169#define G4X_M_HDMI_DAC_MAX 138
170#define G4X_M1_HDMI_DAC_MIN 16
171#define G4X_M1_HDMI_DAC_MAX 23
172#define G4X_M2_HDMI_DAC_MIN 5
173#define G4X_M2_HDMI_DAC_MAX 11
174#define G4X_P_HDMI_DAC_MIN 5
175#define G4X_P_HDMI_DAC_MAX 80
176#define G4X_P1_HDMI_DAC_MIN 1
177#define G4X_P1_HDMI_DAC_MAX 8
178#define G4X_P2_HDMI_DAC_SLOW 10
179#define G4X_P2_HDMI_DAC_FAST 5
180#define G4X_P2_HDMI_DAC_LIMIT 165000
181
182/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
183#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
184#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
185#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
186#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
187#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
188#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
189#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
190#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
191#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
192#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
193#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
194#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
195#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
196#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
197#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
198#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
199#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
200
201/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
202#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
203#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
204#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
205#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
206#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
207#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
208#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
209#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
210#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
211#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
212#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
213#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
214#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
215#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
216#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
218#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
219
220/*The parameter is for DISPLAY PORT on G4x platform*/
221#define G4X_DOT_DISPLAY_PORT_MIN 161670
222#define G4X_DOT_DISPLAY_PORT_MAX 227000
223#define G4X_N_DISPLAY_PORT_MIN 1
224#define G4X_N_DISPLAY_PORT_MAX 2
225#define G4X_M_DISPLAY_PORT_MIN 97
226#define G4X_M_DISPLAY_PORT_MAX 108
227#define G4X_M1_DISPLAY_PORT_MIN 0x10
228#define G4X_M1_DISPLAY_PORT_MAX 0x12
229#define G4X_M2_DISPLAY_PORT_MIN 0x05
230#define G4X_M2_DISPLAY_PORT_MAX 0x06
231#define G4X_P_DISPLAY_PORT_MIN 10
232#define G4X_P_DISPLAY_PORT_MAX 20
233#define G4X_P1_DISPLAY_PORT_MIN 1
234#define G4X_P1_DISPLAY_PORT_MAX 2
235#define G4X_P2_DISPLAY_PORT_SLOW 10
236#define G4X_P2_DISPLAY_PORT_FAST 10
237#define G4X_P2_DISPLAY_PORT_LIMIT 0
238
239/* Ironlake / Sandybridge */
240/* as we calculate clock using (register_value + 2) for
241 N/M1/M2, so here the range value for them is (actual_value-2).
242 */
243#define IRONLAKE_DOT_MIN 25000
244#define IRONLAKE_DOT_MAX 350000
245#define IRONLAKE_VCO_MIN 1760000
246#define IRONLAKE_VCO_MAX 3510000
247#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
252
253/* We have parameter ranges for different type of outputs. */
254
255/* DAC & HDMI Refclk 120Mhz */
256#define IRONLAKE_DAC_N_MIN 1
257#define IRONLAKE_DAC_N_MAX 5
258#define IRONLAKE_DAC_M_MIN 79
259#define IRONLAKE_DAC_M_MAX 127
260#define IRONLAKE_DAC_P_MIN 5
261#define IRONLAKE_DAC_P_MAX 80
262#define IRONLAKE_DAC_P1_MIN 1
263#define IRONLAKE_DAC_P1_MAX 8
264#define IRONLAKE_DAC_P2_SLOW 10
265#define IRONLAKE_DAC_P2_FAST 5
266
267/* LVDS single-channel 120Mhz refclk */
268#define IRONLAKE_LVDS_S_N_MIN 1
269#define IRONLAKE_LVDS_S_N_MAX 3
270#define IRONLAKE_LVDS_S_M_MIN 79
271#define IRONLAKE_LVDS_S_M_MAX 118
272#define IRONLAKE_LVDS_S_P_MIN 28
273#define IRONLAKE_LVDS_S_P_MAX 112
274#define IRONLAKE_LVDS_S_P1_MIN 2
275#define IRONLAKE_LVDS_S_P1_MAX 8
276#define IRONLAKE_LVDS_S_P2_SLOW 14
277#define IRONLAKE_LVDS_S_P2_FAST 14
278
279/* LVDS dual-channel 120Mhz refclk */
280#define IRONLAKE_LVDS_D_N_MIN 1
281#define IRONLAKE_LVDS_D_N_MAX 3
282#define IRONLAKE_LVDS_D_M_MIN 79
283#define IRONLAKE_LVDS_D_M_MAX 127
284#define IRONLAKE_LVDS_D_P_MIN 14
285#define IRONLAKE_LVDS_D_P_MAX 56
286#define IRONLAKE_LVDS_D_P1_MIN 2
287#define IRONLAKE_LVDS_D_P1_MAX 8
288#define IRONLAKE_LVDS_D_P2_SLOW 7
289#define IRONLAKE_LVDS_D_P2_FAST 7
290
291/* LVDS single-channel 100Mhz refclk */
292#define IRONLAKE_LVDS_S_SSC_N_MIN 1
293#define IRONLAKE_LVDS_S_SSC_N_MAX 2
294#define IRONLAKE_LVDS_S_SSC_M_MIN 79
295#define IRONLAKE_LVDS_S_SSC_M_MAX 126
296#define IRONLAKE_LVDS_S_SSC_P_MIN 28
297#define IRONLAKE_LVDS_S_SSC_P_MAX 112
298#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
299#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
300#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
301#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
302
303/* LVDS dual-channel 100Mhz refclk */
304#define IRONLAKE_LVDS_D_SSC_N_MIN 1
305#define IRONLAKE_LVDS_D_SSC_N_MAX 3
306#define IRONLAKE_LVDS_D_SSC_M_MIN 79
307#define IRONLAKE_LVDS_D_SSC_M_MAX 126
308#define IRONLAKE_LVDS_D_SSC_P_MIN 14
309#define IRONLAKE_LVDS_D_SSC_P_MAX 42
310#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
311#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
312#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
313#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
314
315/* DisplayPort */
316#define IRONLAKE_DP_N_MIN 1
317#define IRONLAKE_DP_N_MAX 2
318#define IRONLAKE_DP_M_MIN 81
319#define IRONLAKE_DP_M_MAX 90
320#define IRONLAKE_DP_P_MIN 10
321#define IRONLAKE_DP_P_MAX 20
322#define IRONLAKE_DP_P2_FAST 10
323#define IRONLAKE_DP_P2_SLOW 10
324#define IRONLAKE_DP_P2_LIMIT 0
325#define IRONLAKE_DP_P1_MIN 1
326#define IRONLAKE_DP_P1_MAX 2
327
328/* FDI */ 79/* FDI */
329#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 80#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
330 81
@@ -353,292 +104,253 @@ intel_fdi_link_freq(struct drm_device *dev)
353} 104}
354 105
355static const intel_limit_t intel_limits_i8xx_dvo = { 106static const intel_limit_t intel_limits_i8xx_dvo = {
356 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 107 .dot = { .min = 25000, .max = 350000 },
357 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 108 .vco = { .min = 930000, .max = 1400000 },
358 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 109 .n = { .min = 3, .max = 16 },
359 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 110 .m = { .min = 96, .max = 140 },
360 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 111 .m1 = { .min = 18, .max = 26 },
361 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 112 .m2 = { .min = 6, .max = 16 },
362 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 113 .p = { .min = 4, .max = 128 },
363 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 114 .p1 = { .min = 2, .max = 33 },
364 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 115 .p2 = { .dot_limit = 165000,
365 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 116 .p2_slow = 4, .p2_fast = 2 },
366 .find_pll = intel_find_best_PLL, 117 .find_pll = intel_find_best_PLL,
367}; 118};
368 119
369static const intel_limit_t intel_limits_i8xx_lvds = { 120static const intel_limit_t intel_limits_i8xx_lvds = {
370 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 121 .dot = { .min = 25000, .max = 350000 },
371 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 122 .vco = { .min = 930000, .max = 1400000 },
372 .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, 123 .n = { .min = 3, .max = 16 },
373 .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, 124 .m = { .min = 96, .max = 140 },
374 .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, 125 .m1 = { .min = 18, .max = 26 },
375 .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, 126 .m2 = { .min = 6, .max = 16 },
376 .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, 127 .p = { .min = 4, .max = 128 },
377 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 128 .p1 = { .min = 1, .max = 6 },
378 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 129 .p2 = { .dot_limit = 165000,
379 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 130 .p2_slow = 14, .p2_fast = 7 },
380 .find_pll = intel_find_best_PLL, 131 .find_pll = intel_find_best_PLL,
381}; 132};
382 133
383static const intel_limit_t intel_limits_i9xx_sdvo = { 134static const intel_limit_t intel_limits_i9xx_sdvo = {
384 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 135 .dot = { .min = 20000, .max = 400000 },
385 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 136 .vco = { .min = 1400000, .max = 2800000 },
386 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 137 .n = { .min = 1, .max = 6 },
387 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 138 .m = { .min = 70, .max = 120 },
388 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 139 .m1 = { .min = 10, .max = 22 },
389 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 140 .m2 = { .min = 5, .max = 9 },
390 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 141 .p = { .min = 5, .max = 80 },
391 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 142 .p1 = { .min = 1, .max = 8 },
392 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 143 .p2 = { .dot_limit = 200000,
393 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 144 .p2_slow = 10, .p2_fast = 5 },
394 .find_pll = intel_find_best_PLL, 145 .find_pll = intel_find_best_PLL,
395}; 146};
396 147
397static const intel_limit_t intel_limits_i9xx_lvds = { 148static const intel_limit_t intel_limits_i9xx_lvds = {
398 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 149 .dot = { .min = 20000, .max = 400000 },
399 .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, 150 .vco = { .min = 1400000, .max = 2800000 },
400 .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, 151 .n = { .min = 1, .max = 6 },
401 .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, 152 .m = { .min = 70, .max = 120 },
402 .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, 153 .m1 = { .min = 10, .max = 22 },
403 .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, 154 .m2 = { .min = 5, .max = 9 },
404 .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, 155 .p = { .min = 7, .max = 98 },
405 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 156 .p1 = { .min = 1, .max = 8 },
406 /* The single-channel range is 25-112Mhz, and dual-channel 157 .p2 = { .dot_limit = 112000,
407 * is 80-224Mhz. Prefer single channel as much as possible. 158 .p2_slow = 14, .p2_fast = 7 },
408 */
409 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
410 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
411 .find_pll = intel_find_best_PLL, 159 .find_pll = intel_find_best_PLL,
412}; 160};
413 161
414 /* below parameter and function is for G4X Chipset Family*/ 162
415static const intel_limit_t intel_limits_g4x_sdvo = { 163static const intel_limit_t intel_limits_g4x_sdvo = {
416 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, 164 .dot = { .min = 25000, .max = 270000 },
417 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 165 .vco = { .min = 1750000, .max = 3500000},
418 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, 166 .n = { .min = 1, .max = 4 },
419 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, 167 .m = { .min = 104, .max = 138 },
420 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, 168 .m1 = { .min = 17, .max = 23 },
421 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, 169 .m2 = { .min = 5, .max = 11 },
422 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, 170 .p = { .min = 10, .max = 30 },
423 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, 171 .p1 = { .min = 1, .max = 3},
424 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, 172 .p2 = { .dot_limit = 270000,
425 .p2_slow = G4X_P2_SDVO_SLOW, 173 .p2_slow = 10,
426 .p2_fast = G4X_P2_SDVO_FAST 174 .p2_fast = 10
427 }, 175 },
428 .find_pll = intel_g4x_find_best_PLL, 176 .find_pll = intel_g4x_find_best_PLL,
429}; 177};
430 178
431static const intel_limit_t intel_limits_g4x_hdmi = { 179static const intel_limit_t intel_limits_g4x_hdmi = {
432 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, 180 .dot = { .min = 22000, .max = 400000 },
433 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, 181 .vco = { .min = 1750000, .max = 3500000},
434 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, 182 .n = { .min = 1, .max = 4 },
435 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, 183 .m = { .min = 104, .max = 138 },
436 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, 184 .m1 = { .min = 16, .max = 23 },
437 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, 185 .m2 = { .min = 5, .max = 11 },
438 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, 186 .p = { .min = 5, .max = 80 },
439 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, 187 .p1 = { .min = 1, .max = 8},
440 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, 188 .p2 = { .dot_limit = 165000,
441 .p2_slow = G4X_P2_HDMI_DAC_SLOW, 189 .p2_slow = 10, .p2_fast = 5 },
442 .p2_fast = G4X_P2_HDMI_DAC_FAST
443 },
444 .find_pll = intel_g4x_find_best_PLL, 190 .find_pll = intel_g4x_find_best_PLL,
445}; 191};
446 192
447static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 193static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
448 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, 194 .dot = { .min = 20000, .max = 115000 },
449 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, 195 .vco = { .min = 1750000, .max = 3500000 },
450 .vco = { .min = G4X_VCO_MIN, 196 .n = { .min = 1, .max = 3 },
451 .max = G4X_VCO_MAX }, 197 .m = { .min = 104, .max = 138 },
452 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, 198 .m1 = { .min = 17, .max = 23 },
453 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, 199 .m2 = { .min = 5, .max = 11 },
454 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, 200 .p = { .min = 28, .max = 112 },
455 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, 201 .p1 = { .min = 2, .max = 8 },
456 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, 202 .p2 = { .dot_limit = 0,
457 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, 203 .p2_slow = 14, .p2_fast = 14
458 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
459 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
460 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
461 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
462 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
463 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
464 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
465 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
466 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
467 }, 204 },
468 .find_pll = intel_g4x_find_best_PLL, 205 .find_pll = intel_g4x_find_best_PLL,
469}; 206};
470 207
471static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 208static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
472 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, 209 .dot = { .min = 80000, .max = 224000 },
473 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, 210 .vco = { .min = 1750000, .max = 3500000 },
474 .vco = { .min = G4X_VCO_MIN, 211 .n = { .min = 1, .max = 3 },
475 .max = G4X_VCO_MAX }, 212 .m = { .min = 104, .max = 138 },
476 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, 213 .m1 = { .min = 17, .max = 23 },
477 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, 214 .m2 = { .min = 5, .max = 11 },
478 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, 215 .p = { .min = 14, .max = 42 },
479 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, 216 .p1 = { .min = 2, .max = 6 },
480 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, 217 .p2 = { .dot_limit = 0,
481 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, 218 .p2_slow = 7, .p2_fast = 7
482 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
483 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
484 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
485 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
486 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
487 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
488 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
489 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
490 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
491 }, 219 },
492 .find_pll = intel_g4x_find_best_PLL, 220 .find_pll = intel_g4x_find_best_PLL,
493}; 221};
494 222
495static const intel_limit_t intel_limits_g4x_display_port = { 223static const intel_limit_t intel_limits_g4x_display_port = {
496 .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, 224 .dot = { .min = 161670, .max = 227000 },
497 .max = G4X_DOT_DISPLAY_PORT_MAX }, 225 .vco = { .min = 1750000, .max = 3500000},
498 .vco = { .min = G4X_VCO_MIN, 226 .n = { .min = 1, .max = 2 },
499 .max = G4X_VCO_MAX}, 227 .m = { .min = 97, .max = 108 },
500 .n = { .min = G4X_N_DISPLAY_PORT_MIN, 228 .m1 = { .min = 0x10, .max = 0x12 },
501 .max = G4X_N_DISPLAY_PORT_MAX }, 229 .m2 = { .min = 0x05, .max = 0x06 },
502 .m = { .min = G4X_M_DISPLAY_PORT_MIN, 230 .p = { .min = 10, .max = 20 },
503 .max = G4X_M_DISPLAY_PORT_MAX }, 231 .p1 = { .min = 1, .max = 2},
504 .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, 232 .p2 = { .dot_limit = 0,
505 .max = G4X_M1_DISPLAY_PORT_MAX }, 233 .p2_slow = 10, .p2_fast = 10 },
506 .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
507 .max = G4X_M2_DISPLAY_PORT_MAX },
508 .p = { .min = G4X_P_DISPLAY_PORT_MIN,
509 .max = G4X_P_DISPLAY_PORT_MAX },
510 .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
511 .max = G4X_P1_DISPLAY_PORT_MAX},
512 .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
513 .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
514 .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
515 .find_pll = intel_find_pll_g4x_dp, 234 .find_pll = intel_find_pll_g4x_dp,
516}; 235};
517 236
518static const intel_limit_t intel_limits_pineview_sdvo = { 237static const intel_limit_t intel_limits_pineview_sdvo = {
519 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 238 .dot = { .min = 20000, .max = 400000},
520 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 239 .vco = { .min = 1700000, .max = 3500000 },
521 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 240 /* Pineview's Ncounter is a ring counter */
522 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 241 .n = { .min = 3, .max = 6 },
523 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 242 .m = { .min = 2, .max = 256 },
524 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 243 /* Pineview only has one combined m divider, which we treat as m2. */
525 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 244 .m1 = { .min = 0, .max = 0 },
526 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 245 .m2 = { .min = 0, .max = 254 },
527 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 246 .p = { .min = 5, .max = 80 },
528 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 247 .p1 = { .min = 1, .max = 8 },
248 .p2 = { .dot_limit = 200000,
249 .p2_slow = 10, .p2_fast = 5 },
529 .find_pll = intel_find_best_PLL, 250 .find_pll = intel_find_best_PLL,
530}; 251};
531 252
532static const intel_limit_t intel_limits_pineview_lvds = { 253static const intel_limit_t intel_limits_pineview_lvds = {
533 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 254 .dot = { .min = 20000, .max = 400000 },
534 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, 255 .vco = { .min = 1700000, .max = 3500000 },
535 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, 256 .n = { .min = 3, .max = 6 },
536 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, 257 .m = { .min = 2, .max = 256 },
537 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, 258 .m1 = { .min = 0, .max = 0 },
538 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, 259 .m2 = { .min = 0, .max = 254 },
539 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, 260 .p = { .min = 7, .max = 112 },
540 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 261 .p1 = { .min = 1, .max = 8 },
541 /* Pineview only supports single-channel mode. */ 262 .p2 = { .dot_limit = 112000,
542 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 263 .p2_slow = 14, .p2_fast = 14 },
543 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
544 .find_pll = intel_find_best_PLL, 264 .find_pll = intel_find_best_PLL,
545}; 265};
546 266
267/* Ironlake / Sandybridge
268 *
269 * We calculate clock using (register_value + 2) for N/M1/M2, so here
270 * the range value for them is (actual_value - 2).
271 */
547static const intel_limit_t intel_limits_ironlake_dac = { 272static const intel_limit_t intel_limits_ironlake_dac = {
548 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 273 .dot = { .min = 25000, .max = 350000 },
549 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 274 .vco = { .min = 1760000, .max = 3510000 },
550 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, 275 .n = { .min = 1, .max = 5 },
551 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, 276 .m = { .min = 79, .max = 127 },
552 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 277 .m1 = { .min = 12, .max = 22 },
553 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 278 .m2 = { .min = 5, .max = 9 },
554 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, 279 .p = { .min = 5, .max = 80 },
555 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, 280 .p1 = { .min = 1, .max = 8 },
556 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 281 .p2 = { .dot_limit = 225000,
557 .p2_slow = IRONLAKE_DAC_P2_SLOW, 282 .p2_slow = 10, .p2_fast = 5 },
558 .p2_fast = IRONLAKE_DAC_P2_FAST },
559 .find_pll = intel_g4x_find_best_PLL, 283 .find_pll = intel_g4x_find_best_PLL,
560}; 284};
561 285
562static const intel_limit_t intel_limits_ironlake_single_lvds = { 286static const intel_limit_t intel_limits_ironlake_single_lvds = {
563 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 287 .dot = { .min = 25000, .max = 350000 },
564 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 288 .vco = { .min = 1760000, .max = 3510000 },
565 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, 289 .n = { .min = 1, .max = 3 },
566 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, 290 .m = { .min = 79, .max = 118 },
567 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 291 .m1 = { .min = 12, .max = 22 },
568 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 292 .m2 = { .min = 5, .max = 9 },
569 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, 293 .p = { .min = 28, .max = 112 },
570 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, 294 .p1 = { .min = 2, .max = 8 },
571 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 295 .p2 = { .dot_limit = 225000,
572 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, 296 .p2_slow = 14, .p2_fast = 14 },
573 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
574 .find_pll = intel_g4x_find_best_PLL, 297 .find_pll = intel_g4x_find_best_PLL,
575}; 298};
576 299
577static const intel_limit_t intel_limits_ironlake_dual_lvds = { 300static const intel_limit_t intel_limits_ironlake_dual_lvds = {
578 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 301 .dot = { .min = 25000, .max = 350000 },
579 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 302 .vco = { .min = 1760000, .max = 3510000 },
580 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, 303 .n = { .min = 1, .max = 3 },
581 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, 304 .m = { .min = 79, .max = 127 },
582 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 305 .m1 = { .min = 12, .max = 22 },
583 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 306 .m2 = { .min = 5, .max = 9 },
584 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, 307 .p = { .min = 14, .max = 56 },
585 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, 308 .p1 = { .min = 2, .max = 8 },
586 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 309 .p2 = { .dot_limit = 225000,
587 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, 310 .p2_slow = 7, .p2_fast = 7 },
588 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
589 .find_pll = intel_g4x_find_best_PLL, 311 .find_pll = intel_g4x_find_best_PLL,
590}; 312};
591 313
314/* LVDS 100mhz refclk limits. */
592static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 315static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
593 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 316 .dot = { .min = 25000, .max = 350000 },
594 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 317 .vco = { .min = 1760000, .max = 3510000 },
595 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, 318 .n = { .min = 1, .max = 2 },
596 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, 319 .m = { .min = 79, .max = 126 },
597 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 320 .m1 = { .min = 12, .max = 22 },
598 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 321 .m2 = { .min = 5, .max = 9 },
599 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, 322 .p = { .min = 28, .max = 112 },
600 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, 323 .p1 = { .min = 2,.max = 8 },
601 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 324 .p2 = { .dot_limit = 225000,
602 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, 325 .p2_slow = 14, .p2_fast = 14 },
603 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
604 .find_pll = intel_g4x_find_best_PLL, 326 .find_pll = intel_g4x_find_best_PLL,
605}; 327};
606 328
607static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 329static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
608 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 330 .dot = { .min = 25000, .max = 350000 },
609 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 331 .vco = { .min = 1760000, .max = 3510000 },
610 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, 332 .n = { .min = 1, .max = 3 },
611 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, 333 .m = { .min = 79, .max = 126 },
612 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 334 .m1 = { .min = 12, .max = 22 },
613 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 335 .m2 = { .min = 5, .max = 9 },
614 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, 336 .p = { .min = 14, .max = 42 },
615 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, 337 .p1 = { .min = 2,.max = 6 },
616 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 338 .p2 = { .dot_limit = 225000,
617 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, 339 .p2_slow = 7, .p2_fast = 7 },
618 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
619 .find_pll = intel_g4x_find_best_PLL, 340 .find_pll = intel_g4x_find_best_PLL,
620}; 341};
621 342
622static const intel_limit_t intel_limits_ironlake_display_port = { 343static const intel_limit_t intel_limits_ironlake_display_port = {
623 .dot = { .min = IRONLAKE_DOT_MIN, 344 .dot = { .min = 25000, .max = 350000 },
624 .max = IRONLAKE_DOT_MAX }, 345 .vco = { .min = 1760000, .max = 3510000},
625 .vco = { .min = IRONLAKE_VCO_MIN, 346 .n = { .min = 1, .max = 2 },
626 .max = IRONLAKE_VCO_MAX}, 347 .m = { .min = 81, .max = 90 },
627 .n = { .min = IRONLAKE_DP_N_MIN, 348 .m1 = { .min = 12, .max = 22 },
628 .max = IRONLAKE_DP_N_MAX }, 349 .m2 = { .min = 5, .max = 9 },
629 .m = { .min = IRONLAKE_DP_M_MIN, 350 .p = { .min = 10, .max = 20 },
630 .max = IRONLAKE_DP_M_MAX }, 351 .p1 = { .min = 1, .max = 2},
631 .m1 = { .min = IRONLAKE_M1_MIN, 352 .p2 = { .dot_limit = 0,
632 .max = IRONLAKE_M1_MAX }, 353 .p2_slow = 10, .p2_fast = 10 },
633 .m2 = { .min = IRONLAKE_M2_MIN,
634 .max = IRONLAKE_M2_MAX },
635 .p = { .min = IRONLAKE_DP_P_MIN,
636 .max = IRONLAKE_DP_P_MAX },
637 .p1 = { .min = IRONLAKE_DP_P1_MIN,
638 .max = IRONLAKE_DP_P1_MAX},
639 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
640 .p2_slow = IRONLAKE_DP_P2_SLOW,
641 .p2_fast = IRONLAKE_DP_P2_FAST },
642 .find_pll = intel_find_pll_ironlake_dp, 354 .find_pll = intel_find_pll_ironlake_dp,
643}; 355};
644 356
@@ -1828,7 +1540,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1828 u32 blt_ecoskpd; 1540 u32 blt_ecoskpd;
1829 1541
1830 /* Make sure blitter notifies FBC of writes */ 1542 /* Make sure blitter notifies FBC of writes */
1831 __gen6_gt_force_wake_get(dev_priv); 1543 gen6_gt_force_wake_get(dev_priv);
1832 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1544 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1833 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1545 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1834 GEN6_BLITTER_LOCK_SHIFT; 1546 GEN6_BLITTER_LOCK_SHIFT;
@@ -1839,7 +1551,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1839 GEN6_BLITTER_LOCK_SHIFT); 1551 GEN6_BLITTER_LOCK_SHIFT);
1840 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1552 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1841 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1553 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1842 __gen6_gt_force_wake_put(dev_priv); 1554 gen6_gt_force_wake_put(dev_priv);
1843} 1555}
1844 1556
1845static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1557static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -2019,6 +1731,11 @@ static void intel_update_fbc(struct drm_device *dev)
2019 intel_fb = to_intel_framebuffer(fb); 1731 intel_fb = to_intel_framebuffer(fb);
2020 obj = intel_fb->obj; 1732 obj = intel_fb->obj;
2021 1733
1734 if (!i915_enable_fbc) {
1735 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
1736 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1737 goto out_disable;
1738 }
2022 if (intel_fb->obj->base.size > dev_priv->cfb_size) { 1739 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
2023 DRM_DEBUG_KMS("framebuffer too large, disabling " 1740 DRM_DEBUG_KMS("framebuffer too large, disabling "
2024 "compression\n"); 1741 "compression\n");
@@ -2339,8 +2056,13 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2339 /* enable normal train */ 2056 /* enable normal train */
2340 reg = FDI_TX_CTL(pipe); 2057 reg = FDI_TX_CTL(pipe);
2341 temp = I915_READ(reg); 2058 temp = I915_READ(reg);
2342 temp &= ~FDI_LINK_TRAIN_NONE; 2059 if (IS_IVYBRIDGE(dev)) {
2343 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2060 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2061 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2062 } else {
2063 temp &= ~FDI_LINK_TRAIN_NONE;
2064 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2065 }
2344 I915_WRITE(reg, temp); 2066 I915_WRITE(reg, temp);
2345 2067
2346 reg = FDI_RX_CTL(pipe); 2068 reg = FDI_RX_CTL(pipe);
@@ -2357,6 +2079,11 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2357 /* wait one idle pattern time */ 2079 /* wait one idle pattern time */
2358 POSTING_READ(reg); 2080 POSTING_READ(reg);
2359 udelay(1000); 2081 udelay(1000);
2082
2083 /* IVB wants error correction enabled */
2084 if (IS_IVYBRIDGE(dev))
2085 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2086 FDI_FE_ERRC_ENABLE);
2360} 2087}
2361 2088
2362/* The FDI link training functions for ILK/Ibexpeak. */ 2089/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2584,7 +2311,116 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2584 DRM_DEBUG_KMS("FDI train done.\n"); 2311 DRM_DEBUG_KMS("FDI train done.\n");
2585} 2312}
2586 2313
2587static void ironlake_fdi_enable(struct drm_crtc *crtc) 2314/* Manual link training for Ivy Bridge A0 parts */
2315static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2316{
2317 struct drm_device *dev = crtc->dev;
2318 struct drm_i915_private *dev_priv = dev->dev_private;
2319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2320 int pipe = intel_crtc->pipe;
2321 u32 reg, temp, i;
2322
2323 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2324 for train result */
2325 reg = FDI_RX_IMR(pipe);
2326 temp = I915_READ(reg);
2327 temp &= ~FDI_RX_SYMBOL_LOCK;
2328 temp &= ~FDI_RX_BIT_LOCK;
2329 I915_WRITE(reg, temp);
2330
2331 POSTING_READ(reg);
2332 udelay(150);
2333
2334 /* enable CPU FDI TX and PCH FDI RX */
2335 reg = FDI_TX_CTL(pipe);
2336 temp = I915_READ(reg);
2337 temp &= ~(7 << 19);
2338 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2339 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2340 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2341 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2342 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2343 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2344
2345 reg = FDI_RX_CTL(pipe);
2346 temp = I915_READ(reg);
2347 temp &= ~FDI_LINK_TRAIN_AUTO;
2348 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2349 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2350 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2351
2352 POSTING_READ(reg);
2353 udelay(150);
2354
2355 for (i = 0; i < 4; i++ ) {
2356 reg = FDI_TX_CTL(pipe);
2357 temp = I915_READ(reg);
2358 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2359 temp |= snb_b_fdi_train_param[i];
2360 I915_WRITE(reg, temp);
2361
2362 POSTING_READ(reg);
2363 udelay(500);
2364
2365 reg = FDI_RX_IIR(pipe);
2366 temp = I915_READ(reg);
2367 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2368
2369 if (temp & FDI_RX_BIT_LOCK ||
2370 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2371 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2372 DRM_DEBUG_KMS("FDI train 1 done.\n");
2373 break;
2374 }
2375 }
2376 if (i == 4)
2377 DRM_ERROR("FDI train 1 fail!\n");
2378
2379 /* Train 2 */
2380 reg = FDI_TX_CTL(pipe);
2381 temp = I915_READ(reg);
2382 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2383 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2384 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2385 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2386 I915_WRITE(reg, temp);
2387
2388 reg = FDI_RX_CTL(pipe);
2389 temp = I915_READ(reg);
2390 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2391 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2392 I915_WRITE(reg, temp);
2393
2394 POSTING_READ(reg);
2395 udelay(150);
2396
2397 for (i = 0; i < 4; i++ ) {
2398 reg = FDI_TX_CTL(pipe);
2399 temp = I915_READ(reg);
2400 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2401 temp |= snb_b_fdi_train_param[i];
2402 I915_WRITE(reg, temp);
2403
2404 POSTING_READ(reg);
2405 udelay(500);
2406
2407 reg = FDI_RX_IIR(pipe);
2408 temp = I915_READ(reg);
2409 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2410
2411 if (temp & FDI_RX_SYMBOL_LOCK) {
2412 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2413 DRM_DEBUG_KMS("FDI train 2 done.\n");
2414 break;
2415 }
2416 }
2417 if (i == 4)
2418 DRM_ERROR("FDI train 2 fail!\n");
2419
2420 DRM_DEBUG_KMS("FDI train done.\n");
2421}
2422
2423static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2588{ 2424{
2589 struct drm_device *dev = crtc->dev; 2425 struct drm_device *dev = crtc->dev;
2590 struct drm_i915_private *dev_priv = dev->dev_private; 2426 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2757,10 +2593,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2757 u32 reg, temp; 2593 u32 reg, temp;
2758 2594
2759 /* For PCH output, training FDI link */ 2595 /* For PCH output, training FDI link */
2760 if (IS_GEN6(dev)) 2596 dev_priv->display.fdi_link_train(crtc);
2761 gen6_fdi_link_train(crtc);
2762 else
2763 ironlake_fdi_link_train(crtc);
2764 2597
2765 intel_enable_pch_pll(dev_priv, pipe); 2598 intel_enable_pch_pll(dev_priv, pipe);
2766 2599
@@ -2850,7 +2683,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2850 is_pch_port = intel_crtc_driving_pch(crtc); 2683 is_pch_port = intel_crtc_driving_pch(crtc);
2851 2684
2852 if (is_pch_port) 2685 if (is_pch_port)
2853 ironlake_fdi_enable(crtc); 2686 ironlake_fdi_pll_enable(crtc);
2854 else 2687 else
2855 ironlake_fdi_disable(crtc); 2688 ironlake_fdi_disable(crtc);
2856 2689
@@ -2873,7 +2706,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2873 ironlake_pch_enable(crtc); 2706 ironlake_pch_enable(crtc);
2874 2707
2875 intel_crtc_load_lut(crtc); 2708 intel_crtc_load_lut(crtc);
2709
2710 mutex_lock(&dev->struct_mutex);
2876 intel_update_fbc(dev); 2711 intel_update_fbc(dev);
2712 mutex_unlock(&dev->struct_mutex);
2713
2877 intel_crtc_update_cursor(crtc, true); 2714 intel_crtc_update_cursor(crtc, true);
2878} 2715}
2879 2716
@@ -2969,8 +2806,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2969 2806
2970 intel_crtc->active = false; 2807 intel_crtc->active = false;
2971 intel_update_watermarks(dev); 2808 intel_update_watermarks(dev);
2809
2810 mutex_lock(&dev->struct_mutex);
2972 intel_update_fbc(dev); 2811 intel_update_fbc(dev);
2973 intel_clear_scanline_wait(dev); 2812 intel_clear_scanline_wait(dev);
2813 mutex_unlock(&dev->struct_mutex);
2974} 2814}
2975 2815
2976static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 2816static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3497,11 +3337,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3497 1000; 3337 1000;
3498 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 3338 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3499 3339
3500 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 3340 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3501 3341
3502 wm_size = fifo_size - (entries_required + wm->guard_size); 3342 wm_size = fifo_size - (entries_required + wm->guard_size);
3503 3343
3504 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 3344 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3505 3345
3506 /* Don't promote wm_size to unsigned... */ 3346 /* Don't promote wm_size to unsigned... */
3507 if (wm_size > (long)wm->max_wm) 3347 if (wm_size > (long)wm->max_wm)
@@ -3823,13 +3663,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
3823 display_wm, cursor_wm); 3663 display_wm, cursor_wm);
3824 3664
3825 if (display_wm > display->max_wm) { 3665 if (display_wm > display->max_wm) {
3826 DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n", 3666 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3827 display_wm, display->max_wm); 3667 display_wm, display->max_wm);
3828 return false; 3668 return false;
3829 } 3669 }
3830 3670
3831 if (cursor_wm > cursor->max_wm) { 3671 if (cursor_wm > cursor->max_wm) {
3832 DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n", 3672 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3833 cursor_wm, cursor->max_wm); 3673 cursor_wm, cursor->max_wm);
3834 return false; 3674 return false;
3835 } 3675 }
@@ -4516,34 +4356,28 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4516 return dev_priv->lvds_use_ssc && i915_panel_use_ssc; 4356 return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
4517} 4357}
4518 4358
4519static int intel_crtc_mode_set(struct drm_crtc *crtc, 4359static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4520 struct drm_display_mode *mode, 4360 struct drm_display_mode *mode,
4521 struct drm_display_mode *adjusted_mode, 4361 struct drm_display_mode *adjusted_mode,
4522 int x, int y, 4362 int x, int y,
4523 struct drm_framebuffer *old_fb) 4363 struct drm_framebuffer *old_fb)
4524{ 4364{
4525 struct drm_device *dev = crtc->dev; 4365 struct drm_device *dev = crtc->dev;
4526 struct drm_i915_private *dev_priv = dev->dev_private; 4366 struct drm_i915_private *dev_priv = dev->dev_private;
4527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4528 int pipe = intel_crtc->pipe; 4368 int pipe = intel_crtc->pipe;
4529 int plane = intel_crtc->plane; 4369 int plane = intel_crtc->plane;
4530 u32 fp_reg, dpll_reg;
4531 int refclk, num_connectors = 0; 4370 int refclk, num_connectors = 0;
4532 intel_clock_t clock, reduced_clock; 4371 intel_clock_t clock, reduced_clock;
4533 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4372 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4534 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 4373 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4535 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4374 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4536 struct intel_encoder *has_edp_encoder = NULL;
4537 struct drm_mode_config *mode_config = &dev->mode_config; 4375 struct drm_mode_config *mode_config = &dev->mode_config;
4538 struct intel_encoder *encoder; 4376 struct intel_encoder *encoder;
4539 const intel_limit_t *limit; 4377 const intel_limit_t *limit;
4540 int ret; 4378 int ret;
4541 struct fdi_m_n m_n = {0}; 4379 u32 temp;
4542 u32 reg, temp;
4543 u32 lvds_sync = 0; 4380 u32 lvds_sync = 0;
4544 int target_clock;
4545
4546 drm_vblank_pre_modeset(dev, pipe);
4547 4381
4548 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4382 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4549 if (encoder->base.crtc != crtc) 4383 if (encoder->base.crtc != crtc)
@@ -4571,9 +4405,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4571 case INTEL_OUTPUT_DISPLAYPORT: 4405 case INTEL_OUTPUT_DISPLAYPORT:
4572 is_dp = true; 4406 is_dp = true;
4573 break; 4407 break;
4574 case INTEL_OUTPUT_EDP:
4575 has_edp_encoder = encoder;
4576 break;
4577 } 4408 }
4578 4409
4579 num_connectors++; 4410 num_connectors++;
@@ -4585,9 +4416,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4585 refclk / 1000); 4416 refclk / 1000);
4586 } else if (!IS_GEN2(dev)) { 4417 } else if (!IS_GEN2(dev)) {
4587 refclk = 96000; 4418 refclk = 96000;
4588 if (HAS_PCH_SPLIT(dev) &&
4589 (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
4590 refclk = 120000; /* 120Mhz refclk */
4591 } else { 4419 } else {
4592 refclk = 48000; 4420 refclk = 48000;
4593 } 4421 }
@@ -4601,7 +4429,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4601 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 4429 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4602 if (!ok) { 4430 if (!ok) {
4603 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4431 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4604 drm_vblank_post_modeset(dev, pipe);
4605 return -EINVAL; 4432 return -EINVAL;
4606 } 4433 }
4607 4434
@@ -4645,143 +4472,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4645 } 4472 }
4646 } 4473 }
4647 4474
4648 /* FDI link */
4649 if (HAS_PCH_SPLIT(dev)) {
4650 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4651 int lane = 0, link_bw, bpp;
4652 /* CPU eDP doesn't require FDI link, so just set DP M/N
4653 according to current link config */
4654 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4655 target_clock = mode->clock;
4656 intel_edp_link_config(has_edp_encoder,
4657 &lane, &link_bw);
4658 } else {
4659 /* [e]DP over FDI requires target mode clock
4660 instead of link clock */
4661 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4662 target_clock = mode->clock;
4663 else
4664 target_clock = adjusted_mode->clock;
4665
4666 /* FDI is a binary signal running at ~2.7GHz, encoding
4667 * each output octet as 10 bits. The actual frequency
4668 * is stored as a divider into a 100MHz clock, and the
4669 * mode pixel clock is stored in units of 1KHz.
4670 * Hence the bw of each lane in terms of the mode signal
4671 * is:
4672 */
4673 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4674 }
4675
4676 /* determine panel color depth */
4677 temp = I915_READ(PIPECONF(pipe));
4678 temp &= ~PIPE_BPC_MASK;
4679 if (is_lvds) {
4680 /* the BPC will be 6 if it is 18-bit LVDS panel */
4681 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
4682 temp |= PIPE_8BPC;
4683 else
4684 temp |= PIPE_6BPC;
4685 } else if (has_edp_encoder) {
4686 switch (dev_priv->edp.bpp/3) {
4687 case 8:
4688 temp |= PIPE_8BPC;
4689 break;
4690 case 10:
4691 temp |= PIPE_10BPC;
4692 break;
4693 case 6:
4694 temp |= PIPE_6BPC;
4695 break;
4696 case 12:
4697 temp |= PIPE_12BPC;
4698 break;
4699 }
4700 } else
4701 temp |= PIPE_8BPC;
4702 I915_WRITE(PIPECONF(pipe), temp);
4703
4704 switch (temp & PIPE_BPC_MASK) {
4705 case PIPE_8BPC:
4706 bpp = 24;
4707 break;
4708 case PIPE_10BPC:
4709 bpp = 30;
4710 break;
4711 case PIPE_6BPC:
4712 bpp = 18;
4713 break;
4714 case PIPE_12BPC:
4715 bpp = 36;
4716 break;
4717 default:
4718 DRM_ERROR("unknown pipe bpc value\n");
4719 bpp = 24;
4720 }
4721
4722 if (!lane) {
4723 /*
4724 * Account for spread spectrum to avoid
4725 * oversubscribing the link. Max center spread
4726 * is 2.5%; use 5% for safety's sake.
4727 */
4728 u32 bps = target_clock * bpp * 21 / 20;
4729 lane = bps / (link_bw * 8) + 1;
4730 }
4731
4732 intel_crtc->fdi_lanes = lane;
4733
4734 if (pixel_multiplier > 1)
4735 link_bw *= pixel_multiplier;
4736 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
4737 }
4738
4739 /* Ironlake: try to setup display ref clock before DPLL
4740 * enabling. This is only under driver's control after
4741 * PCH B stepping, previous chipset stepping should be
4742 * ignoring this setting.
4743 */
4744 if (HAS_PCH_SPLIT(dev)) {
4745 temp = I915_READ(PCH_DREF_CONTROL);
4746 /* Always enable nonspread source */
4747 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4748 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4749 temp &= ~DREF_SSC_SOURCE_MASK;
4750 temp |= DREF_SSC_SOURCE_ENABLE;
4751 I915_WRITE(PCH_DREF_CONTROL, temp);
4752
4753 POSTING_READ(PCH_DREF_CONTROL);
4754 udelay(200);
4755
4756 if (has_edp_encoder) {
4757 if (intel_panel_use_ssc(dev_priv)) {
4758 temp |= DREF_SSC1_ENABLE;
4759 I915_WRITE(PCH_DREF_CONTROL, temp);
4760
4761 POSTING_READ(PCH_DREF_CONTROL);
4762 udelay(200);
4763 }
4764 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4765
4766 /* Enable CPU source on CPU attached eDP */
4767 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4768 if (intel_panel_use_ssc(dev_priv))
4769 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4770 else
4771 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4772 } else {
4773 /* Enable SSC on PCH eDP if needed */
4774 if (intel_panel_use_ssc(dev_priv)) {
4775 DRM_ERROR("enabling SSC on PCH\n");
4776 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4777 }
4778 }
4779 I915_WRITE(PCH_DREF_CONTROL, temp);
4780 POSTING_READ(PCH_DREF_CONTROL);
4781 udelay(200);
4782 }
4783 }
4784
4785 if (IS_PINEVIEW(dev)) { 4475 if (IS_PINEVIEW(dev)) {
4786 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 4476 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4787 if (has_reduced_clock) 4477 if (has_reduced_clock)
@@ -4794,25 +4484,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4794 reduced_clock.m2; 4484 reduced_clock.m2;
4795 } 4485 }
4796 4486
4797 /* Enable autotuning of the PLL clock (if permissible) */ 4487 dpll = DPLL_VGA_MODE_DIS;
4798 if (HAS_PCH_SPLIT(dev)) {
4799 int factor = 21;
4800
4801 if (is_lvds) {
4802 if ((intel_panel_use_ssc(dev_priv) &&
4803 dev_priv->lvds_ssc_freq == 100) ||
4804 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4805 factor = 25;
4806 } else if (is_sdvo && is_tv)
4807 factor = 20;
4808
4809 if (clock.m1 < factor * clock.n)
4810 fp |= FP_CB_TUNE;
4811 }
4812
4813 dpll = 0;
4814 if (!HAS_PCH_SPLIT(dev))
4815 dpll = DPLL_VGA_MODE_DIS;
4816 4488
4817 if (!IS_GEN2(dev)) { 4489 if (!IS_GEN2(dev)) {
4818 if (is_lvds) 4490 if (is_lvds)
@@ -4824,12 +4496,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4824 if (pixel_multiplier > 1) { 4496 if (pixel_multiplier > 1) {
4825 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4497 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4826 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4498 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4827 else if (HAS_PCH_SPLIT(dev))
4828 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4829 } 4499 }
4830 dpll |= DPLL_DVO_HIGH_SPEED; 4500 dpll |= DPLL_DVO_HIGH_SPEED;
4831 } 4501 }
4832 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4502 if (is_dp)
4833 dpll |= DPLL_DVO_HIGH_SPEED; 4503 dpll |= DPLL_DVO_HIGH_SPEED;
4834 4504
4835 /* compute bitmask from p1 value */ 4505 /* compute bitmask from p1 value */
@@ -4837,9 +4507,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4837 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 4507 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4838 else { 4508 else {
4839 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4509 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4840 /* also FPA1 */
4841 if (HAS_PCH_SPLIT(dev))
4842 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4843 if (IS_G4X(dev) && has_reduced_clock) 4510 if (IS_G4X(dev) && has_reduced_clock)
4844 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4511 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4845 } 4512 }
@@ -4857,7 +4524,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4857 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4524 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4858 break; 4525 break;
4859 } 4526 }
4860 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 4527 if (INTEL_INFO(dev)->gen >= 4)
4861 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4528 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4862 } else { 4529 } else {
4863 if (is_lvds) { 4530 if (is_lvds) {
@@ -4891,12 +4558,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4891 4558
4892 /* Ironlake's plane is forced to pipe, bit 24 is to 4559 /* Ironlake's plane is forced to pipe, bit 24 is to
4893 enable color space conversion */ 4560 enable color space conversion */
4894 if (!HAS_PCH_SPLIT(dev)) { 4561 if (pipe == 0)
4895 if (pipe == 0) 4562 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4896 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4563 else
4897 else 4564 dspcntr |= DISPPLANE_SEL_PIPE_B;
4898 dspcntr |= DISPPLANE_SEL_PIPE_B;
4899 }
4900 4565
4901 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4566 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4902 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4567 /* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -4912,27 +4577,506 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4912 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4577 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4913 } 4578 }
4914 4579
4915 if (!HAS_PCH_SPLIT(dev)) 4580 dpll |= DPLL_VCO_ENABLE;
4916 dpll |= DPLL_VCO_ENABLE;
4917 4581
4918 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4582 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4919 drm_mode_debug_printmodeline(mode); 4583 drm_mode_debug_printmodeline(mode);
4920 4584
4921 /* assign to Ironlake registers */ 4585 I915_WRITE(FP0(pipe), fp);
4922 if (HAS_PCH_SPLIT(dev)) { 4586 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4923 fp_reg = PCH_FP0(pipe); 4587
4924 dpll_reg = PCH_DPLL(pipe); 4588 POSTING_READ(DPLL(pipe));
4589 udelay(150);
4590
4591 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4592 * This is an exception to the general rule that mode_set doesn't turn
4593 * things on.
4594 */
4595 if (is_lvds) {
4596 temp = I915_READ(LVDS);
4597 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4598 if (pipe == 1) {
4599 temp |= LVDS_PIPEB_SELECT;
4600 } else {
4601 temp &= ~LVDS_PIPEB_SELECT;
4602 }
4603 /* set the corresponsding LVDS_BORDER bit */
4604 temp |= dev_priv->lvds_border_bits;
4605 /* Set the B0-B3 data pairs corresponding to whether we're going to
4606 * set the DPLLs for dual-channel mode or not.
4607 */
4608 if (clock.p2 == 7)
4609 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4610 else
4611 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4612
4613 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4614 * appropriately here, but we need to look more thoroughly into how
4615 * panels behave in the two modes.
4616 */
4617 /* set the dithering flag on LVDS as needed */
4618 if (INTEL_INFO(dev)->gen >= 4) {
4619 if (dev_priv->lvds_dither)
4620 temp |= LVDS_ENABLE_DITHER;
4621 else
4622 temp &= ~LVDS_ENABLE_DITHER;
4623 }
4624 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4625 lvds_sync |= LVDS_HSYNC_POLARITY;
4626 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4627 lvds_sync |= LVDS_VSYNC_POLARITY;
4628 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4629 != lvds_sync) {
4630 char flags[2] = "-+";
4631 DRM_INFO("Changing LVDS panel from "
4632 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4633 flags[!(temp & LVDS_HSYNC_POLARITY)],
4634 flags[!(temp & LVDS_VSYNC_POLARITY)],
4635 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4636 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4637 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4638 temp |= lvds_sync;
4639 }
4640 I915_WRITE(LVDS, temp);
4641 }
4642
4643 if (is_dp) {
4644 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4645 }
4646
4647 I915_WRITE(DPLL(pipe), dpll);
4648
4649 /* Wait for the clocks to stabilize. */
4650 POSTING_READ(DPLL(pipe));
4651 udelay(150);
4652
4653 if (INTEL_INFO(dev)->gen >= 4) {
4654 temp = 0;
4655 if (is_sdvo) {
4656 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4657 if (temp > 1)
4658 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4659 else
4660 temp = 0;
4661 }
4662 I915_WRITE(DPLL_MD(pipe), temp);
4663 } else {
4664 /* The pixel multiplier can only be updated once the
4665 * DPLL is enabled and the clocks are stable.
4666 *
4667 * So write it again.
4668 */
4669 I915_WRITE(DPLL(pipe), dpll);
4670 }
4671
4672 intel_crtc->lowfreq_avail = false;
4673 if (is_lvds && has_reduced_clock && i915_powersave) {
4674 I915_WRITE(FP1(pipe), fp2);
4675 intel_crtc->lowfreq_avail = true;
4676 if (HAS_PIPE_CXSR(dev)) {
4677 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4678 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4679 }
4680 } else {
4681 I915_WRITE(FP1(pipe), fp);
4682 if (HAS_PIPE_CXSR(dev)) {
4683 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4684 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4685 }
4686 }
4687
4688 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4689 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4690 /* the chip adds 2 halflines automatically */
4691 adjusted_mode->crtc_vdisplay -= 1;
4692 adjusted_mode->crtc_vtotal -= 1;
4693 adjusted_mode->crtc_vblank_start -= 1;
4694 adjusted_mode->crtc_vblank_end -= 1;
4695 adjusted_mode->crtc_vsync_end -= 1;
4696 adjusted_mode->crtc_vsync_start -= 1;
4697 } else
4698 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4699
4700 I915_WRITE(HTOTAL(pipe),
4701 (adjusted_mode->crtc_hdisplay - 1) |
4702 ((adjusted_mode->crtc_htotal - 1) << 16));
4703 I915_WRITE(HBLANK(pipe),
4704 (adjusted_mode->crtc_hblank_start - 1) |
4705 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4706 I915_WRITE(HSYNC(pipe),
4707 (adjusted_mode->crtc_hsync_start - 1) |
4708 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4709
4710 I915_WRITE(VTOTAL(pipe),
4711 (adjusted_mode->crtc_vdisplay - 1) |
4712 ((adjusted_mode->crtc_vtotal - 1) << 16));
4713 I915_WRITE(VBLANK(pipe),
4714 (adjusted_mode->crtc_vblank_start - 1) |
4715 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4716 I915_WRITE(VSYNC(pipe),
4717 (adjusted_mode->crtc_vsync_start - 1) |
4718 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4719
4720 /* pipesrc and dspsize control the size that is scaled from,
4721 * which should always be the user's requested size.
4722 */
4723 I915_WRITE(DSPSIZE(plane),
4724 ((mode->vdisplay - 1) << 16) |
4725 (mode->hdisplay - 1));
4726 I915_WRITE(DSPPOS(plane), 0);
4727 I915_WRITE(PIPESRC(pipe),
4728 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4729
4730 I915_WRITE(PIPECONF(pipe), pipeconf);
4731 POSTING_READ(PIPECONF(pipe));
4732 intel_enable_pipe(dev_priv, pipe, false);
4733
4734 intel_wait_for_vblank(dev, pipe);
4735
4736 I915_WRITE(DSPCNTR(plane), dspcntr);
4737 POSTING_READ(DSPCNTR(plane));
4738
4739 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4740
4741 intel_update_watermarks(dev);
4742
4743 return ret;
4744}
4745
4746static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4747 struct drm_display_mode *mode,
4748 struct drm_display_mode *adjusted_mode,
4749 int x, int y,
4750 struct drm_framebuffer *old_fb)
4751{
4752 struct drm_device *dev = crtc->dev;
4753 struct drm_i915_private *dev_priv = dev->dev_private;
4754 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4755 int pipe = intel_crtc->pipe;
4756 int plane = intel_crtc->plane;
4757 int refclk, num_connectors = 0;
4758 intel_clock_t clock, reduced_clock;
4759 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4760 bool ok, has_reduced_clock = false, is_sdvo = false;
4761 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4762 struct intel_encoder *has_edp_encoder = NULL;
4763 struct drm_mode_config *mode_config = &dev->mode_config;
4764 struct intel_encoder *encoder;
4765 const intel_limit_t *limit;
4766 int ret;
4767 struct fdi_m_n m_n = {0};
4768 u32 temp;
4769 u32 lvds_sync = 0;
4770 int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
4771
4772 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4773 if (encoder->base.crtc != crtc)
4774 continue;
4775
4776 switch (encoder->type) {
4777 case INTEL_OUTPUT_LVDS:
4778 is_lvds = true;
4779 break;
4780 case INTEL_OUTPUT_SDVO:
4781 case INTEL_OUTPUT_HDMI:
4782 is_sdvo = true;
4783 if (encoder->needs_tv_clock)
4784 is_tv = true;
4785 break;
4786 case INTEL_OUTPUT_TVOUT:
4787 is_tv = true;
4788 break;
4789 case INTEL_OUTPUT_ANALOG:
4790 is_crt = true;
4791 break;
4792 case INTEL_OUTPUT_DISPLAYPORT:
4793 is_dp = true;
4794 break;
4795 case INTEL_OUTPUT_EDP:
4796 has_edp_encoder = encoder;
4797 break;
4798 }
4799
4800 num_connectors++;
4801 }
4802
4803 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4804 refclk = dev_priv->lvds_ssc_freq * 1000;
4805 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4806 refclk / 1000);
4807 } else {
4808 refclk = 96000;
4809 if (!has_edp_encoder ||
4810 intel_encoder_is_pch_edp(&has_edp_encoder->base))
4811 refclk = 120000; /* 120Mhz refclk */
4812 }
4813
4814 /*
4815 * Returns a set of divisors for the desired target clock with the given
4816 * refclk, or FALSE. The returned values represent the clock equation:
4817 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4818 */
4819 limit = intel_limit(crtc, refclk);
4820 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4821 if (!ok) {
4822 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4823 return -EINVAL;
4824 }
4825
4826 /* Ensure that the cursor is valid for the new mode before changing... */
4827 intel_crtc_update_cursor(crtc, true);
4828
4829 if (is_lvds && dev_priv->lvds_downclock_avail) {
4830 has_reduced_clock = limit->find_pll(limit, crtc,
4831 dev_priv->lvds_downclock,
4832 refclk,
4833 &reduced_clock);
4834 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4835 /*
4836 * If the different P is found, it means that we can't
4837 * switch the display clock by using the FP0/FP1.
4838 * In such case we will disable the LVDS downclock
4839 * feature.
4840 */
4841 DRM_DEBUG_KMS("Different P is found for "
4842 "LVDS clock/downclock\n");
4843 has_reduced_clock = 0;
4844 }
4845 }
4846 /* SDVO TV has fixed PLL values depend on its clock range,
4847 this mirrors vbios setting. */
4848 if (is_sdvo && is_tv) {
4849 if (adjusted_mode->clock >= 100000
4850 && adjusted_mode->clock < 140500) {
4851 clock.p1 = 2;
4852 clock.p2 = 10;
4853 clock.n = 3;
4854 clock.m1 = 16;
4855 clock.m2 = 8;
4856 } else if (adjusted_mode->clock >= 140500
4857 && adjusted_mode->clock <= 200000) {
4858 clock.p1 = 1;
4859 clock.p2 = 10;
4860 clock.n = 6;
4861 clock.m1 = 12;
4862 clock.m2 = 8;
4863 }
4864 }
4865
4866 /* FDI link */
4867 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4868 lane = 0;
4869 /* CPU eDP doesn't require FDI link, so just set DP M/N
4870 according to current link config */
4871 if (has_edp_encoder &&
4872 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4873 target_clock = mode->clock;
4874 intel_edp_link_config(has_edp_encoder,
4875 &lane, &link_bw);
4925 } else { 4876 } else {
4926 fp_reg = FP0(pipe); 4877 /* [e]DP over FDI requires target mode clock
4927 dpll_reg = DPLL(pipe); 4878 instead of link clock */
4879 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4880 target_clock = mode->clock;
4881 else
4882 target_clock = adjusted_mode->clock;
4883
4884 /* FDI is a binary signal running at ~2.7GHz, encoding
4885 * each output octet as 10 bits. The actual frequency
4886 * is stored as a divider into a 100MHz clock, and the
4887 * mode pixel clock is stored in units of 1KHz.
4888 * Hence the bw of each lane in terms of the mode signal
4889 * is:
4890 */
4891 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4892 }
4893
4894 /* determine panel color depth */
4895 temp = I915_READ(PIPECONF(pipe));
4896 temp &= ~PIPE_BPC_MASK;
4897 if (is_lvds) {
4898 /* the BPC will be 6 if it is 18-bit LVDS panel */
4899 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
4900 temp |= PIPE_8BPC;
4901 else
4902 temp |= PIPE_6BPC;
4903 } else if (has_edp_encoder) {
4904 switch (dev_priv->edp.bpp/3) {
4905 case 8:
4906 temp |= PIPE_8BPC;
4907 break;
4908 case 10:
4909 temp |= PIPE_10BPC;
4910 break;
4911 case 6:
4912 temp |= PIPE_6BPC;
4913 break;
4914 case 12:
4915 temp |= PIPE_12BPC;
4916 break;
4917 }
4918 } else
4919 temp |= PIPE_8BPC;
4920 I915_WRITE(PIPECONF(pipe), temp);
4921
4922 switch (temp & PIPE_BPC_MASK) {
4923 case PIPE_8BPC:
4924 bpp = 24;
4925 break;
4926 case PIPE_10BPC:
4927 bpp = 30;
4928 break;
4929 case PIPE_6BPC:
4930 bpp = 18;
4931 break;
4932 case PIPE_12BPC:
4933 bpp = 36;
4934 break;
4935 default:
4936 DRM_ERROR("unknown pipe bpc value\n");
4937 bpp = 24;
4938 }
4939
4940 if (!lane) {
4941 /*
4942 * Account for spread spectrum to avoid
4943 * oversubscribing the link. Max center spread
4944 * is 2.5%; use 5% for safety's sake.
4945 */
4946 u32 bps = target_clock * bpp * 21 / 20;
4947 lane = bps / (link_bw * 8) + 1;
4928 } 4948 }
4929 4949
4950 intel_crtc->fdi_lanes = lane;
4951
4952 if (pixel_multiplier > 1)
4953 link_bw *= pixel_multiplier;
4954 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
4955
4956 /* Ironlake: try to setup display ref clock before DPLL
4957 * enabling. This is only under driver's control after
4958 * PCH B stepping, previous chipset stepping should be
4959 * ignoring this setting.
4960 */
4961 temp = I915_READ(PCH_DREF_CONTROL);
4962 /* Always enable nonspread source */
4963 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4964 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4965 temp &= ~DREF_SSC_SOURCE_MASK;
4966 temp |= DREF_SSC_SOURCE_ENABLE;
4967 I915_WRITE(PCH_DREF_CONTROL, temp);
4968
4969 POSTING_READ(PCH_DREF_CONTROL);
4970 udelay(200);
4971
4972 if (has_edp_encoder) {
4973 if (intel_panel_use_ssc(dev_priv)) {
4974 temp |= DREF_SSC1_ENABLE;
4975 I915_WRITE(PCH_DREF_CONTROL, temp);
4976
4977 POSTING_READ(PCH_DREF_CONTROL);
4978 udelay(200);
4979 }
4980 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4981
4982 /* Enable CPU source on CPU attached eDP */
4983 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4984 if (intel_panel_use_ssc(dev_priv))
4985 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4986 else
4987 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4988 } else {
4989 /* Enable SSC on PCH eDP if needed */
4990 if (intel_panel_use_ssc(dev_priv)) {
4991 DRM_ERROR("enabling SSC on PCH\n");
4992 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4993 }
4994 }
4995 I915_WRITE(PCH_DREF_CONTROL, temp);
4996 POSTING_READ(PCH_DREF_CONTROL);
4997 udelay(200);
4998 }
4999
5000 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5001 if (has_reduced_clock)
5002 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5003 reduced_clock.m2;
5004
5005 /* Enable autotuning of the PLL clock (if permissible) */
5006 factor = 21;
5007 if (is_lvds) {
5008 if ((intel_panel_use_ssc(dev_priv) &&
5009 dev_priv->lvds_ssc_freq == 100) ||
5010 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5011 factor = 25;
5012 } else if (is_sdvo && is_tv)
5013 factor = 20;
5014
5015 if (clock.m1 < factor * clock.n)
5016 fp |= FP_CB_TUNE;
5017
5018 dpll = 0;
5019
5020 if (is_lvds)
5021 dpll |= DPLLB_MODE_LVDS;
5022 else
5023 dpll |= DPLLB_MODE_DAC_SERIAL;
5024 if (is_sdvo) {
5025 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5026 if (pixel_multiplier > 1) {
5027 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5028 }
5029 dpll |= DPLL_DVO_HIGH_SPEED;
5030 }
5031 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5032 dpll |= DPLL_DVO_HIGH_SPEED;
5033
5034 /* compute bitmask from p1 value */
5035 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5036 /* also FPA1 */
5037 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5038
5039 switch (clock.p2) {
5040 case 5:
5041 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5042 break;
5043 case 7:
5044 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5045 break;
5046 case 10:
5047 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5048 break;
5049 case 14:
5050 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5051 break;
5052 }
5053
5054 if (is_sdvo && is_tv)
5055 dpll |= PLL_REF_INPUT_TVCLKINBC;
5056 else if (is_tv)
5057 /* XXX: just matching BIOS for now */
5058 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5059 dpll |= 3;
5060 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5061 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5062 else
5063 dpll |= PLL_REF_INPUT_DREFCLK;
5064
5065 /* setup pipeconf */
5066 pipeconf = I915_READ(PIPECONF(pipe));
5067
5068 /* Set up the display plane register */
5069 dspcntr = DISPPLANE_GAMMA_ENABLE;
5070
5071 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5072 drm_mode_debug_printmodeline(mode);
5073
4930 /* PCH eDP needs FDI, but CPU eDP does not */ 5074 /* PCH eDP needs FDI, but CPU eDP does not */
4931 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5075 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4932 I915_WRITE(fp_reg, fp); 5076 I915_WRITE(PCH_FP0(pipe), fp);
4933 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 5077 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4934 5078
4935 POSTING_READ(dpll_reg); 5079 POSTING_READ(PCH_DPLL(pipe));
4936 udelay(150); 5080 udelay(150);
4937 } 5081 }
4938 5082
@@ -4964,11 +5108,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4964 * things on. 5108 * things on.
4965 */ 5109 */
4966 if (is_lvds) { 5110 if (is_lvds) {
4967 reg = LVDS; 5111 temp = I915_READ(PCH_LVDS);
4968 if (HAS_PCH_SPLIT(dev))
4969 reg = PCH_LVDS;
4970
4971 temp = I915_READ(reg);
4972 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5112 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4973 if (pipe == 1) { 5113 if (pipe == 1) {
4974 if (HAS_PCH_CPT(dev)) 5114 if (HAS_PCH_CPT(dev))
@@ -4995,13 +5135,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4995 * appropriately here, but we need to look more thoroughly into how 5135 * appropriately here, but we need to look more thoroughly into how
4996 * panels behave in the two modes. 5136 * panels behave in the two modes.
4997 */ 5137 */
4998 /* set the dithering flag on non-PCH LVDS as needed */
4999 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
5000 if (dev_priv->lvds_dither)
5001 temp |= LVDS_ENABLE_DITHER;
5002 else
5003 temp &= ~LVDS_ENABLE_DITHER;
5004 }
5005 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5138 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5006 lvds_sync |= LVDS_HSYNC_POLARITY; 5139 lvds_sync |= LVDS_HSYNC_POLARITY;
5007 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5140 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -5018,22 +5151,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5018 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5151 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5019 temp |= lvds_sync; 5152 temp |= lvds_sync;
5020 } 5153 }
5021 I915_WRITE(reg, temp); 5154 I915_WRITE(PCH_LVDS, temp);
5022 } 5155 }
5023 5156
5024 /* set the dithering flag and clear for anything other than a panel. */ 5157 /* set the dithering flag and clear for anything other than a panel. */
5025 if (HAS_PCH_SPLIT(dev)) { 5158 pipeconf &= ~PIPECONF_DITHER_EN;
5026 pipeconf &= ~PIPECONF_DITHER_EN; 5159 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5027 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5160 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
5028 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5161 pipeconf |= PIPECONF_DITHER_EN;
5029 pipeconf |= PIPECONF_DITHER_EN; 5162 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5030 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5031 }
5032 } 5163 }
5033 5164
5034 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5165 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5035 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5166 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5036 } else if (HAS_PCH_SPLIT(dev)) { 5167 } else {
5037 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5168 /* For non-DP output, clear any trans DP clock recovery setting.*/
5038 I915_WRITE(TRANSDATA_M1(pipe), 0); 5169 I915_WRITE(TRANSDATA_M1(pipe), 0);
5039 I915_WRITE(TRANSDATA_N1(pipe), 0); 5170 I915_WRITE(TRANSDATA_N1(pipe), 0);
@@ -5041,43 +5172,32 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5041 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5172 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5042 } 5173 }
5043 5174
5044 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5175 if (!has_edp_encoder ||
5045 I915_WRITE(dpll_reg, dpll); 5176 intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5177 I915_WRITE(PCH_DPLL(pipe), dpll);
5046 5178
5047 /* Wait for the clocks to stabilize. */ 5179 /* Wait for the clocks to stabilize. */
5048 POSTING_READ(dpll_reg); 5180 POSTING_READ(PCH_DPLL(pipe));
5049 udelay(150); 5181 udelay(150);
5050 5182
5051 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 5183 /* The pixel multiplier can only be updated once the
5052 temp = 0; 5184 * DPLL is enabled and the clocks are stable.
5053 if (is_sdvo) { 5185 *
5054 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 5186 * So write it again.
5055 if (temp > 1) 5187 */
5056 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5188 I915_WRITE(PCH_DPLL(pipe), dpll);
5057 else
5058 temp = 0;
5059 }
5060 I915_WRITE(DPLL_MD(pipe), temp);
5061 } else {
5062 /* The pixel multiplier can only be updated once the
5063 * DPLL is enabled and the clocks are stable.
5064 *
5065 * So write it again.
5066 */
5067 I915_WRITE(dpll_reg, dpll);
5068 }
5069 } 5189 }
5070 5190
5071 intel_crtc->lowfreq_avail = false; 5191 intel_crtc->lowfreq_avail = false;
5072 if (is_lvds && has_reduced_clock && i915_powersave) { 5192 if (is_lvds && has_reduced_clock && i915_powersave) {
5073 I915_WRITE(fp_reg + 4, fp2); 5193 I915_WRITE(PCH_FP1(pipe), fp2);
5074 intel_crtc->lowfreq_avail = true; 5194 intel_crtc->lowfreq_avail = true;
5075 if (HAS_PIPE_CXSR(dev)) { 5195 if (HAS_PIPE_CXSR(dev)) {
5076 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5196 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5077 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 5197 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5078 } 5198 }
5079 } else { 5199 } else {
5080 I915_WRITE(fp_reg + 4, fp); 5200 I915_WRITE(PCH_FP1(pipe), fp);
5081 if (HAS_PIPE_CXSR(dev)) { 5201 if (HAS_PIPE_CXSR(dev)) {
5082 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 5202 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5083 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 5203 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -5116,33 +5236,24 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5116 (adjusted_mode->crtc_vsync_start - 1) | 5236 (adjusted_mode->crtc_vsync_start - 1) |
5117 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 5237 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5118 5238
5119 /* pipesrc and dspsize control the size that is scaled from, 5239 /* pipesrc controls the size that is scaled from, which should
5120 * which should always be the user's requested size. 5240 * always be the user's requested size.
5121 */ 5241 */
5122 if (!HAS_PCH_SPLIT(dev)) {
5123 I915_WRITE(DSPSIZE(plane),
5124 ((mode->vdisplay - 1) << 16) |
5125 (mode->hdisplay - 1));
5126 I915_WRITE(DSPPOS(plane), 0);
5127 }
5128 I915_WRITE(PIPESRC(pipe), 5242 I915_WRITE(PIPESRC(pipe),
5129 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 5243 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5130 5244
5131 if (HAS_PCH_SPLIT(dev)) { 5245 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5132 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 5246 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5133 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 5247 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5134 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 5248 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5135 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5136 5249
5137 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5250 if (has_edp_encoder &&
5138 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5251 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5139 } 5252 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5140 } 5253 }
5141 5254
5142 I915_WRITE(PIPECONF(pipe), pipeconf); 5255 I915_WRITE(PIPECONF(pipe), pipeconf);
5143 POSTING_READ(PIPECONF(pipe)); 5256 POSTING_READ(PIPECONF(pipe));
5144 if (!HAS_PCH_SPLIT(dev))
5145 intel_enable_pipe(dev_priv, pipe, false);
5146 5257
5147 intel_wait_for_vblank(dev, pipe); 5258 intel_wait_for_vblank(dev, pipe);
5148 5259
@@ -5161,6 +5272,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5161 5272
5162 intel_update_watermarks(dev); 5273 intel_update_watermarks(dev);
5163 5274
5275 return ret;
5276}
5277
5278static int intel_crtc_mode_set(struct drm_crtc *crtc,
5279 struct drm_display_mode *mode,
5280 struct drm_display_mode *adjusted_mode,
5281 int x, int y,
5282 struct drm_framebuffer *old_fb)
5283{
5284 struct drm_device *dev = crtc->dev;
5285 struct drm_i915_private *dev_priv = dev->dev_private;
5286 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5287 int pipe = intel_crtc->pipe;
5288 int ret;
5289
5290 drm_vblank_pre_modeset(dev, pipe);
5291
5292 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5293 x, y, old_fb);
5294
5164 drm_vblank_post_modeset(dev, pipe); 5295 drm_vblank_post_modeset(dev, pipe);
5165 5296
5166 return ret; 5297 return ret;
@@ -5483,43 +5614,140 @@ static struct drm_display_mode load_detect_mode = {
5483 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 5614 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5484}; 5615};
5485 5616
5486struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5617static struct drm_framebuffer *
5487 struct drm_connector *connector, 5618intel_framebuffer_create(struct drm_device *dev,
5488 struct drm_display_mode *mode, 5619 struct drm_mode_fb_cmd *mode_cmd,
5489 int *dpms_mode) 5620 struct drm_i915_gem_object *obj)
5621{
5622 struct intel_framebuffer *intel_fb;
5623 int ret;
5624
5625 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5626 if (!intel_fb) {
5627 drm_gem_object_unreference_unlocked(&obj->base);
5628 return ERR_PTR(-ENOMEM);
5629 }
5630
5631 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5632 if (ret) {
5633 drm_gem_object_unreference_unlocked(&obj->base);
5634 kfree(intel_fb);
5635 return ERR_PTR(ret);
5636 }
5637
5638 return &intel_fb->base;
5639}
5640
5641static u32
5642intel_framebuffer_pitch_for_width(int width, int bpp)
5643{
5644 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5645 return ALIGN(pitch, 64);
5646}
5647
5648static u32
5649intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5650{
5651 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5652 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5653}
5654
5655static struct drm_framebuffer *
5656intel_framebuffer_create_for_mode(struct drm_device *dev,
5657 struct drm_display_mode *mode,
5658 int depth, int bpp)
5659{
5660 struct drm_i915_gem_object *obj;
5661 struct drm_mode_fb_cmd mode_cmd;
5662
5663 obj = i915_gem_alloc_object(dev,
5664 intel_framebuffer_size_for_mode(mode, bpp));
5665 if (obj == NULL)
5666 return ERR_PTR(-ENOMEM);
5667
5668 mode_cmd.width = mode->hdisplay;
5669 mode_cmd.height = mode->vdisplay;
5670 mode_cmd.depth = depth;
5671 mode_cmd.bpp = bpp;
5672 mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
5673
5674 return intel_framebuffer_create(dev, &mode_cmd, obj);
5675}
5676
5677static struct drm_framebuffer *
5678mode_fits_in_fbdev(struct drm_device *dev,
5679 struct drm_display_mode *mode)
5680{
5681 struct drm_i915_private *dev_priv = dev->dev_private;
5682 struct drm_i915_gem_object *obj;
5683 struct drm_framebuffer *fb;
5684
5685 if (dev_priv->fbdev == NULL)
5686 return NULL;
5687
5688 obj = dev_priv->fbdev->ifb.obj;
5689 if (obj == NULL)
5690 return NULL;
5691
5692 fb = &dev_priv->fbdev->ifb.base;
5693 if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
5694 fb->bits_per_pixel))
5695 return NULL;
5696
5697 if (obj->base.size < mode->vdisplay * fb->pitch)
5698 return NULL;
5699
5700 return fb;
5701}
5702
5703bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5704 struct drm_connector *connector,
5705 struct drm_display_mode *mode,
5706 struct intel_load_detect_pipe *old)
5490{ 5707{
5491 struct intel_crtc *intel_crtc; 5708 struct intel_crtc *intel_crtc;
5492 struct drm_crtc *possible_crtc; 5709 struct drm_crtc *possible_crtc;
5493 struct drm_crtc *supported_crtc =NULL;
5494 struct drm_encoder *encoder = &intel_encoder->base; 5710 struct drm_encoder *encoder = &intel_encoder->base;
5495 struct drm_crtc *crtc = NULL; 5711 struct drm_crtc *crtc = NULL;
5496 struct drm_device *dev = encoder->dev; 5712 struct drm_device *dev = encoder->dev;
5497 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5713 struct drm_framebuffer *old_fb;
5498 struct drm_crtc_helper_funcs *crtc_funcs;
5499 int i = -1; 5714 int i = -1;
5500 5715
5716 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5717 connector->base.id, drm_get_connector_name(connector),
5718 encoder->base.id, drm_get_encoder_name(encoder));
5719
5501 /* 5720 /*
5502 * Algorithm gets a little messy: 5721 * Algorithm gets a little messy:
5722 *
5503 * - if the connector already has an assigned crtc, use it (but make 5723 * - if the connector already has an assigned crtc, use it (but make
5504 * sure it's on first) 5724 * sure it's on first)
5725 *
5505 * - try to find the first unused crtc that can drive this connector, 5726 * - try to find the first unused crtc that can drive this connector,
5506 * and use that if we find one 5727 * and use that if we find one
5507 * - if there are no unused crtcs available, try to use the first
5508 * one we found that supports the connector
5509 */ 5728 */
5510 5729
5511 /* See if we already have a CRTC for this connector */ 5730 /* See if we already have a CRTC for this connector */
5512 if (encoder->crtc) { 5731 if (encoder->crtc) {
5513 crtc = encoder->crtc; 5732 crtc = encoder->crtc;
5514 /* Make sure the crtc and connector are running */ 5733
5515 intel_crtc = to_intel_crtc(crtc); 5734 intel_crtc = to_intel_crtc(crtc);
5516 *dpms_mode = intel_crtc->dpms_mode; 5735 old->dpms_mode = intel_crtc->dpms_mode;
5736 old->load_detect_temp = false;
5737
5738 /* Make sure the crtc and connector are running */
5517 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5739 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5740 struct drm_encoder_helper_funcs *encoder_funcs;
5741 struct drm_crtc_helper_funcs *crtc_funcs;
5742
5518 crtc_funcs = crtc->helper_private; 5743 crtc_funcs = crtc->helper_private;
5519 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5744 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5745
5746 encoder_funcs = encoder->helper_private;
5520 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5747 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5521 } 5748 }
5522 return crtc; 5749
5750 return true;
5523 } 5751 }
5524 5752
5525 /* Find an unused one (if possible) */ 5753 /* Find an unused one (if possible) */
@@ -5531,46 +5759,66 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5531 crtc = possible_crtc; 5759 crtc = possible_crtc;
5532 break; 5760 break;
5533 } 5761 }
5534 if (!supported_crtc)
5535 supported_crtc = possible_crtc;
5536 } 5762 }
5537 5763
5538 /* 5764 /*
5539 * If we didn't find an unused CRTC, don't use any. 5765 * If we didn't find an unused CRTC, don't use any.
5540 */ 5766 */
5541 if (!crtc) { 5767 if (!crtc) {
5542 return NULL; 5768 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5769 return false;
5543 } 5770 }
5544 5771
5545 encoder->crtc = crtc; 5772 encoder->crtc = crtc;
5546 connector->encoder = encoder; 5773 connector->encoder = encoder;
5547 intel_encoder->load_detect_temp = true;
5548 5774
5549 intel_crtc = to_intel_crtc(crtc); 5775 intel_crtc = to_intel_crtc(crtc);
5550 *dpms_mode = intel_crtc->dpms_mode; 5776 old->dpms_mode = intel_crtc->dpms_mode;
5777 old->load_detect_temp = true;
5778 old->release_fb = NULL;
5551 5779
5552 if (!crtc->enabled) { 5780 if (!mode)
5553 if (!mode) 5781 mode = &load_detect_mode;
5554 mode = &load_detect_mode;
5555 drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
5556 } else {
5557 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5558 crtc_funcs = crtc->helper_private;
5559 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5560 }
5561 5782
5562 /* Add this connector to the crtc */ 5783 old_fb = crtc->fb;
5563 encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); 5784
5564 encoder_funcs->commit(encoder); 5785 /* We need a framebuffer large enough to accommodate all accesses
5786 * that the plane may generate whilst we perform load detection.
5787 * We can not rely on the fbcon either being present (we get called
5788 * during its initialisation to detect all boot displays, or it may
5789 * not even exist) or that it is large enough to satisfy the
5790 * requested mode.
5791 */
5792 crtc->fb = mode_fits_in_fbdev(dev, mode);
5793 if (crtc->fb == NULL) {
5794 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5795 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5796 old->release_fb = crtc->fb;
5797 } else
5798 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5799 if (IS_ERR(crtc->fb)) {
5800 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5801 crtc->fb = old_fb;
5802 return false;
5803 }
5804
5805 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5806 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5807 if (old->release_fb)
5808 old->release_fb->funcs->destroy(old->release_fb);
5809 crtc->fb = old_fb;
5810 return false;
5565 } 5811 }
5812
5566 /* let the connector get through one full cycle before testing */ 5813 /* let the connector get through one full cycle before testing */
5567 intel_wait_for_vblank(dev, intel_crtc->pipe); 5814 intel_wait_for_vblank(dev, intel_crtc->pipe);
5568 5815
5569 return crtc; 5816 return true;
5570} 5817}
5571 5818
5572void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5819void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5573 struct drm_connector *connector, int dpms_mode) 5820 struct drm_connector *connector,
5821 struct intel_load_detect_pipe *old)
5574{ 5822{
5575 struct drm_encoder *encoder = &intel_encoder->base; 5823 struct drm_encoder *encoder = &intel_encoder->base;
5576 struct drm_device *dev = encoder->dev; 5824 struct drm_device *dev = encoder->dev;
@@ -5578,19 +5826,24 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5578 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5826 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5579 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5827 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5580 5828
5581 if (intel_encoder->load_detect_temp) { 5829 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5582 encoder->crtc = NULL; 5830 connector->base.id, drm_get_connector_name(connector),
5831 encoder->base.id, drm_get_encoder_name(encoder));
5832
5833 if (old->load_detect_temp) {
5583 connector->encoder = NULL; 5834 connector->encoder = NULL;
5584 intel_encoder->load_detect_temp = false;
5585 crtc->enabled = drm_helper_crtc_in_use(crtc);
5586 drm_helper_disable_unused_functions(dev); 5835 drm_helper_disable_unused_functions(dev);
5836
5837 if (old->release_fb)
5838 old->release_fb->funcs->destroy(old->release_fb);
5839
5840 return;
5587 } 5841 }
5588 5842
5589 /* Switch crtc and encoder back off if necessary */ 5843 /* Switch crtc and encoder back off if necessary */
5590 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 5844 if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5591 if (encoder->crtc == crtc) 5845 encoder_funcs->dpms(encoder, old->dpms_mode);
5592 encoder_funcs->dpms(encoder, dpms_mode); 5846 crtc_funcs->dpms(crtc, old->dpms_mode);
5593 crtc_funcs->dpms(crtc, dpms_mode);
5594 } 5847 }
5595} 5848}
5596 5849
@@ -6185,6 +6438,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6185 break; 6438 break;
6186 6439
6187 case 6: 6440 case 6:
6441 case 7:
6188 OUT_RING(MI_DISPLAY_FLIP | 6442 OUT_RING(MI_DISPLAY_FLIP |
6189 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 6443 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6190 OUT_RING(fb->pitch | obj->tiling_mode); 6444 OUT_RING(fb->pitch | obj->tiling_mode);
@@ -6504,6 +6758,9 @@ static void intel_setup_outputs(struct drm_device *dev)
6504 } 6758 }
6505 6759
6506 intel_panel_setup_backlight(dev); 6760 intel_panel_setup_backlight(dev);
6761
6762 /* disable all the possible outputs/crtcs before entering KMS mode */
6763 drm_helper_disable_unused_functions(dev);
6507} 6764}
6508 6765
6509static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 6766static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -6571,27 +6828,12 @@ intel_user_framebuffer_create(struct drm_device *dev,
6571 struct drm_mode_fb_cmd *mode_cmd) 6828 struct drm_mode_fb_cmd *mode_cmd)
6572{ 6829{
6573 struct drm_i915_gem_object *obj; 6830 struct drm_i915_gem_object *obj;
6574 struct intel_framebuffer *intel_fb;
6575 int ret;
6576 6831
6577 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); 6832 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
6578 if (&obj->base == NULL) 6833 if (&obj->base == NULL)
6579 return ERR_PTR(-ENOENT); 6834 return ERR_PTR(-ENOENT);
6580 6835
6581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6836 return intel_framebuffer_create(dev, mode_cmd, obj);
6582 if (!intel_fb) {
6583 drm_gem_object_unreference_unlocked(&obj->base);
6584 return ERR_PTR(-ENOMEM);
6585 }
6586
6587 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6588 if (ret) {
6589 drm_gem_object_unreference_unlocked(&obj->base);
6590 kfree(intel_fb);
6591 return ERR_PTR(ret);
6592 }
6593
6594 return &intel_fb->base;
6595} 6837}
6596 6838
6597static const struct drm_mode_config_funcs intel_mode_funcs = { 6839static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -6605,13 +6847,14 @@ intel_alloc_context_page(struct drm_device *dev)
6605 struct drm_i915_gem_object *ctx; 6847 struct drm_i915_gem_object *ctx;
6606 int ret; 6848 int ret;
6607 6849
6850 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
6851
6608 ctx = i915_gem_alloc_object(dev, 4096); 6852 ctx = i915_gem_alloc_object(dev, 4096);
6609 if (!ctx) { 6853 if (!ctx) {
6610 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 6854 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
6611 return NULL; 6855 return NULL;
6612 } 6856 }
6613 6857
6614 mutex_lock(&dev->struct_mutex);
6615 ret = i915_gem_object_pin(ctx, 4096, true); 6858 ret = i915_gem_object_pin(ctx, 4096, true);
6616 if (ret) { 6859 if (ret) {
6617 DRM_ERROR("failed to pin power context: %d\n", ret); 6860 DRM_ERROR("failed to pin power context: %d\n", ret);
@@ -6623,7 +6866,6 @@ intel_alloc_context_page(struct drm_device *dev)
6623 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 6866 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
6624 goto err_unpin; 6867 goto err_unpin;
6625 } 6868 }
6626 mutex_unlock(&dev->struct_mutex);
6627 6869
6628 return ctx; 6870 return ctx;
6629 6871
@@ -6758,6 +7000,11 @@ void gen6_disable_rps(struct drm_device *dev)
6758 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 7000 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6759 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 7001 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
6760 I915_WRITE(GEN6_PMIER, 0); 7002 I915_WRITE(GEN6_PMIER, 0);
7003
7004 spin_lock_irq(&dev_priv->rps_lock);
7005 dev_priv->pm_iir = 0;
7006 spin_unlock_irq(&dev_priv->rps_lock);
7007
6761 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 7008 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
6762} 7009}
6763 7010
@@ -6851,7 +7098,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6851{ 7098{
6852 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 7099 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6853 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 7100 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6854 u32 pcu_mbox; 7101 u32 pcu_mbox, rc6_mask = 0;
6855 int cur_freq, min_freq, max_freq; 7102 int cur_freq, min_freq, max_freq;
6856 int i; 7103 int i;
6857 7104
@@ -6862,7 +7109,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6862 * userspace... 7109 * userspace...
6863 */ 7110 */
6864 I915_WRITE(GEN6_RC_STATE, 0); 7111 I915_WRITE(GEN6_RC_STATE, 0);
6865 __gen6_gt_force_wake_get(dev_priv); 7112 mutex_lock(&dev_priv->dev->struct_mutex);
7113 gen6_gt_force_wake_get(dev_priv);
6866 7114
6867 /* disable the counters and set deterministic thresholds */ 7115 /* disable the counters and set deterministic thresholds */
6868 I915_WRITE(GEN6_RC_CONTROL, 0); 7116 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6882,9 +7130,12 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6882 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 7130 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6883 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 7131 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6884 7132
7133 if (i915_enable_rc6)
7134 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7135 GEN6_RC_CTL_RC6_ENABLE;
7136
6885 I915_WRITE(GEN6_RC_CONTROL, 7137 I915_WRITE(GEN6_RC_CONTROL,
6886 GEN6_RC_CTL_RC6p_ENABLE | 7138 rc6_mask |
6887 GEN6_RC_CTL_RC6_ENABLE |
6888 GEN6_RC_CTL_EI_MODE(1) | 7139 GEN6_RC_CTL_EI_MODE(1) |
6889 GEN6_RC_CTL_HW_ENABLE); 7140 GEN6_RC_CTL_HW_ENABLE);
6890 7141
@@ -6956,168 +7207,237 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6956 GEN6_PM_RP_DOWN_THRESHOLD | 7207 GEN6_PM_RP_DOWN_THRESHOLD |
6957 GEN6_PM_RP_UP_EI_EXPIRED | 7208 GEN6_PM_RP_UP_EI_EXPIRED |
6958 GEN6_PM_RP_DOWN_EI_EXPIRED); 7209 GEN6_PM_RP_DOWN_EI_EXPIRED);
7210 spin_lock_irq(&dev_priv->rps_lock);
7211 WARN_ON(dev_priv->pm_iir != 0);
6959 I915_WRITE(GEN6_PMIMR, 0); 7212 I915_WRITE(GEN6_PMIMR, 0);
7213 spin_unlock_irq(&dev_priv->rps_lock);
6960 /* enable all PM interrupts */ 7214 /* enable all PM interrupts */
6961 I915_WRITE(GEN6_PMINTRMSK, 0); 7215 I915_WRITE(GEN6_PMINTRMSK, 0);
6962 7216
6963 __gen6_gt_force_wake_put(dev_priv); 7217 gen6_gt_force_wake_put(dev_priv);
7218 mutex_unlock(&dev_priv->dev->struct_mutex);
6964} 7219}
6965 7220
6966void intel_enable_clock_gating(struct drm_device *dev) 7221static void ironlake_init_clock_gating(struct drm_device *dev)
7222{
7223 struct drm_i915_private *dev_priv = dev->dev_private;
7224 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7225
7226 /* Required for FBC */
7227 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7228 DPFCRUNIT_CLOCK_GATE_DISABLE |
7229 DPFDUNIT_CLOCK_GATE_DISABLE;
7230 /* Required for CxSR */
7231 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7232
7233 I915_WRITE(PCH_3DCGDIS0,
7234 MARIUNIT_CLOCK_GATE_DISABLE |
7235 SVSMUNIT_CLOCK_GATE_DISABLE);
7236 I915_WRITE(PCH_3DCGDIS1,
7237 VFMUNIT_CLOCK_GATE_DISABLE);
7238
7239 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7240
7241 /*
7242 * According to the spec the following bits should be set in
7243 * order to enable memory self-refresh
7244 * The bit 22/21 of 0x42004
7245 * The bit 5 of 0x42020
7246 * The bit 15 of 0x45000
7247 */
7248 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7249 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7250 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7251 I915_WRITE(ILK_DSPCLK_GATE,
7252 (I915_READ(ILK_DSPCLK_GATE) |
7253 ILK_DPARB_CLK_GATE));
7254 I915_WRITE(DISP_ARB_CTL,
7255 (I915_READ(DISP_ARB_CTL) |
7256 DISP_FBC_WM_DIS));
7257 I915_WRITE(WM3_LP_ILK, 0);
7258 I915_WRITE(WM2_LP_ILK, 0);
7259 I915_WRITE(WM1_LP_ILK, 0);
7260
7261 /*
7262 * Based on the document from hardware guys the following bits
7263 * should be set unconditionally in order to enable FBC.
7264 * The bit 22 of 0x42000
7265 * The bit 22 of 0x42004
7266 * The bit 7,8,9 of 0x42020.
7267 */
7268 if (IS_IRONLAKE_M(dev)) {
7269 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7270 I915_READ(ILK_DISPLAY_CHICKEN1) |
7271 ILK_FBCQ_DIS);
7272 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7273 I915_READ(ILK_DISPLAY_CHICKEN2) |
7274 ILK_DPARB_GATE);
7275 I915_WRITE(ILK_DSPCLK_GATE,
7276 I915_READ(ILK_DSPCLK_GATE) |
7277 ILK_DPFC_DIS1 |
7278 ILK_DPFC_DIS2 |
7279 ILK_CLK_FBC);
7280 }
7281
7282 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7283 I915_READ(ILK_DISPLAY_CHICKEN2) |
7284 ILK_ELPIN_409_SELECT);
7285 I915_WRITE(_3D_CHICKEN2,
7286 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7287 _3D_CHICKEN2_WM_READ_PIPELINED);
7288}
7289
7290static void gen6_init_clock_gating(struct drm_device *dev)
6967{ 7291{
6968 struct drm_i915_private *dev_priv = dev->dev_private; 7292 struct drm_i915_private *dev_priv = dev->dev_private;
6969 int pipe; 7293 int pipe;
7294 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7295
7296 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7297
7298 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7299 I915_READ(ILK_DISPLAY_CHICKEN2) |
7300 ILK_ELPIN_409_SELECT);
7301
7302 I915_WRITE(WM3_LP_ILK, 0);
7303 I915_WRITE(WM2_LP_ILK, 0);
7304 I915_WRITE(WM1_LP_ILK, 0);
6970 7305
6971 /* 7306 /*
6972 * Disable clock gating reported to work incorrectly according to the 7307 * According to the spec the following bits should be
6973 * specs, but enable as much else as we can. 7308 * set in order to enable memory self-refresh and fbc:
7309 * The bit21 and bit22 of 0x42000
7310 * The bit21 and bit22 of 0x42004
7311 * The bit5 and bit7 of 0x42020
7312 * The bit14 of 0x70180
7313 * The bit14 of 0x71180
6974 */ 7314 */
6975 if (HAS_PCH_SPLIT(dev)) { 7315 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6976 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 7316 I915_READ(ILK_DISPLAY_CHICKEN1) |
7317 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7318 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7319 I915_READ(ILK_DISPLAY_CHICKEN2) |
7320 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7321 I915_WRITE(ILK_DSPCLK_GATE,
7322 I915_READ(ILK_DSPCLK_GATE) |
7323 ILK_DPARB_CLK_GATE |
7324 ILK_DPFD_CLK_GATE);
6977 7325
6978 if (IS_GEN5(dev)) { 7326 for_each_pipe(pipe)
6979 /* Required for FBC */ 7327 I915_WRITE(DSPCNTR(pipe),
6980 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 7328 I915_READ(DSPCNTR(pipe)) |
6981 DPFCRUNIT_CLOCK_GATE_DISABLE | 7329 DISPPLANE_TRICKLE_FEED_DISABLE);
6982 DPFDUNIT_CLOCK_GATE_DISABLE; 7330}
6983 /* Required for CxSR */
6984 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6985
6986 I915_WRITE(PCH_3DCGDIS0,
6987 MARIUNIT_CLOCK_GATE_DISABLE |
6988 SVSMUNIT_CLOCK_GATE_DISABLE);
6989 I915_WRITE(PCH_3DCGDIS1,
6990 VFMUNIT_CLOCK_GATE_DISABLE);
6991 }
6992 7331
6993 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 7332static void ivybridge_init_clock_gating(struct drm_device *dev)
7333{
7334 struct drm_i915_private *dev_priv = dev->dev_private;
7335 int pipe;
7336 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
6994 7337
6995 /* 7338 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
6996 * On Ibex Peak and Cougar Point, we need to disable clock
6997 * gating for the panel power sequencer or it will fail to
6998 * start up when no ports are active.
6999 */
7000 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7001 7339
7002 /* 7340 I915_WRITE(WM3_LP_ILK, 0);
7003 * According to the spec the following bits should be set in 7341 I915_WRITE(WM2_LP_ILK, 0);
7004 * order to enable memory self-refresh 7342 I915_WRITE(WM1_LP_ILK, 0);
7005 * The bit 22/21 of 0x42004
7006 * The bit 5 of 0x42020
7007 * The bit 15 of 0x45000
7008 */
7009 if (IS_GEN5(dev)) {
7010 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7011 (I915_READ(ILK_DISPLAY_CHICKEN2) |
7012 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7013 I915_WRITE(ILK_DSPCLK_GATE,
7014 (I915_READ(ILK_DSPCLK_GATE) |
7015 ILK_DPARB_CLK_GATE));
7016 I915_WRITE(DISP_ARB_CTL,
7017 (I915_READ(DISP_ARB_CTL) |
7018 DISP_FBC_WM_DIS));
7019 I915_WRITE(WM3_LP_ILK, 0);
7020 I915_WRITE(WM2_LP_ILK, 0);
7021 I915_WRITE(WM1_LP_ILK, 0);
7022 }
7023 /*
7024 * Based on the document from hardware guys the following bits
7025 * should be set unconditionally in order to enable FBC.
7026 * The bit 22 of 0x42000
7027 * The bit 22 of 0x42004
7028 * The bit 7,8,9 of 0x42020.
7029 */
7030 if (IS_IRONLAKE_M(dev)) {
7031 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7032 I915_READ(ILK_DISPLAY_CHICKEN1) |
7033 ILK_FBCQ_DIS);
7034 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7035 I915_READ(ILK_DISPLAY_CHICKEN2) |
7036 ILK_DPARB_GATE);
7037 I915_WRITE(ILK_DSPCLK_GATE,
7038 I915_READ(ILK_DSPCLK_GATE) |
7039 ILK_DPFC_DIS1 |
7040 ILK_DPFC_DIS2 |
7041 ILK_CLK_FBC);
7042 }
7043 7343
7044 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7344 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7045 I915_READ(ILK_DISPLAY_CHICKEN2) |
7046 ILK_ELPIN_409_SELECT);
7047 7345
7048 if (IS_GEN5(dev)) { 7346 for_each_pipe(pipe)
7049 I915_WRITE(_3D_CHICKEN2, 7347 I915_WRITE(DSPCNTR(pipe),
7050 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 7348 I915_READ(DSPCNTR(pipe)) |
7051 _3D_CHICKEN2_WM_READ_PIPELINED); 7349 DISPPLANE_TRICKLE_FEED_DISABLE);
7052 } 7350}
7053 7351
7054 if (IS_GEN6(dev)) { 7352static void g4x_init_clock_gating(struct drm_device *dev)
7055 I915_WRITE(WM3_LP_ILK, 0); 7353{
7056 I915_WRITE(WM2_LP_ILK, 0); 7354 struct drm_i915_private *dev_priv = dev->dev_private;
7057 I915_WRITE(WM1_LP_ILK, 0); 7355 uint32_t dspclk_gate;
7058 7356
7059 /* 7357 I915_WRITE(RENCLK_GATE_D1, 0);
7060 * According to the spec the following bits should be 7358 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7061 * set in order to enable memory self-refresh and fbc: 7359 GS_UNIT_CLOCK_GATE_DISABLE |
7062 * The bit21 and bit22 of 0x42000 7360 CL_UNIT_CLOCK_GATE_DISABLE);
7063 * The bit21 and bit22 of 0x42004 7361 I915_WRITE(RAMCLK_GATE_D, 0);
7064 * The bit5 and bit7 of 0x42020 7362 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7065 * The bit14 of 0x70180 7363 OVRUNIT_CLOCK_GATE_DISABLE |
7066 * The bit14 of 0x71180 7364 OVCUNIT_CLOCK_GATE_DISABLE;
7067 */ 7365 if (IS_GM45(dev))
7068 I915_WRITE(ILK_DISPLAY_CHICKEN1, 7366 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7069 I915_READ(ILK_DISPLAY_CHICKEN1) | 7367 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7070 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7368}
7071 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7072 I915_READ(ILK_DISPLAY_CHICKEN2) |
7073 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7074 I915_WRITE(ILK_DSPCLK_GATE,
7075 I915_READ(ILK_DSPCLK_GATE) |
7076 ILK_DPARB_CLK_GATE |
7077 ILK_DPFD_CLK_GATE);
7078
7079 for_each_pipe(pipe)
7080 I915_WRITE(DSPCNTR(pipe),
7081 I915_READ(DSPCNTR(pipe)) |
7082 DISPPLANE_TRICKLE_FEED_DISABLE);
7083 }
7084 } else if (IS_G4X(dev)) {
7085 uint32_t dspclk_gate;
7086 I915_WRITE(RENCLK_GATE_D1, 0);
7087 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7088 GS_UNIT_CLOCK_GATE_DISABLE |
7089 CL_UNIT_CLOCK_GATE_DISABLE);
7090 I915_WRITE(RAMCLK_GATE_D, 0);
7091 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7092 OVRUNIT_CLOCK_GATE_DISABLE |
7093 OVCUNIT_CLOCK_GATE_DISABLE;
7094 if (IS_GM45(dev))
7095 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7096 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7097 } else if (IS_CRESTLINE(dev)) {
7098 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7099 I915_WRITE(RENCLK_GATE_D2, 0);
7100 I915_WRITE(DSPCLK_GATE_D, 0);
7101 I915_WRITE(RAMCLK_GATE_D, 0);
7102 I915_WRITE16(DEUC, 0);
7103 } else if (IS_BROADWATER(dev)) {
7104 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7105 I965_RCC_CLOCK_GATE_DISABLE |
7106 I965_RCPB_CLOCK_GATE_DISABLE |
7107 I965_ISC_CLOCK_GATE_DISABLE |
7108 I965_FBC_CLOCK_GATE_DISABLE);
7109 I915_WRITE(RENCLK_GATE_D2, 0);
7110 } else if (IS_GEN3(dev)) {
7111 u32 dstate = I915_READ(D_STATE);
7112 7369
7113 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7370static void crestline_init_clock_gating(struct drm_device *dev)
7114 DSTATE_DOT_CLOCK_GATING; 7371{
7115 I915_WRITE(D_STATE, dstate); 7372 struct drm_i915_private *dev_priv = dev->dev_private;
7116 } else if (IS_I85X(dev) || IS_I865G(dev)) { 7373
7117 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7374 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7118 } else if (IS_I830(dev)) { 7375 I915_WRITE(RENCLK_GATE_D2, 0);
7119 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7376 I915_WRITE(DSPCLK_GATE_D, 0);
7120 } 7377 I915_WRITE(RAMCLK_GATE_D, 0);
7378 I915_WRITE16(DEUC, 0);
7379}
7380
7381static void broadwater_init_clock_gating(struct drm_device *dev)
7382{
7383 struct drm_i915_private *dev_priv = dev->dev_private;
7384
7385 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7386 I965_RCC_CLOCK_GATE_DISABLE |
7387 I965_RCPB_CLOCK_GATE_DISABLE |
7388 I965_ISC_CLOCK_GATE_DISABLE |
7389 I965_FBC_CLOCK_GATE_DISABLE);
7390 I915_WRITE(RENCLK_GATE_D2, 0);
7391}
7392
7393static void gen3_init_clock_gating(struct drm_device *dev)
7394{
7395 struct drm_i915_private *dev_priv = dev->dev_private;
7396 u32 dstate = I915_READ(D_STATE);
7397
7398 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7399 DSTATE_DOT_CLOCK_GATING;
7400 I915_WRITE(D_STATE, dstate);
7401}
7402
7403static void i85x_init_clock_gating(struct drm_device *dev)
7404{
7405 struct drm_i915_private *dev_priv = dev->dev_private;
7406
7407 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7408}
7409
7410static void i830_init_clock_gating(struct drm_device *dev)
7411{
7412 struct drm_i915_private *dev_priv = dev->dev_private;
7413
7414 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7415}
7416
7417static void ibx_init_clock_gating(struct drm_device *dev)
7418{
7419 struct drm_i915_private *dev_priv = dev->dev_private;
7420
7421 /*
7422 * On Ibex Peak and Cougar Point, we need to disable clock
7423 * gating for the panel power sequencer or it will fail to
7424 * start up when no ports are active.
7425 */
7426 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7427}
7428
7429static void cpt_init_clock_gating(struct drm_device *dev)
7430{
7431 struct drm_i915_private *dev_priv = dev->dev_private;
7432
7433 /*
7434 * On Ibex Peak and Cougar Point, we need to disable clock
7435 * gating for the panel power sequencer or it will fail to
7436 * start up when no ports are active.
7437 */
7438 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7439 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7440 DPLS_EDP_PPS_FIX_DIS);
7121} 7441}
7122 7442
7123static void ironlake_teardown_rc6(struct drm_device *dev) 7443static void ironlake_teardown_rc6(struct drm_device *dev)
@@ -7187,9 +7507,12 @@ void ironlake_enable_rc6(struct drm_device *dev)
7187 if (!i915_enable_rc6) 7507 if (!i915_enable_rc6)
7188 return; 7508 return;
7189 7509
7510 mutex_lock(&dev->struct_mutex);
7190 ret = ironlake_setup_rc6(dev); 7511 ret = ironlake_setup_rc6(dev);
7191 if (ret) 7512 if (ret) {
7513 mutex_unlock(&dev->struct_mutex);
7192 return; 7514 return;
7515 }
7193 7516
7194 /* 7517 /*
7195 * GPU can automatically power down the render unit if given a page 7518 * GPU can automatically power down the render unit if given a page
@@ -7198,6 +7521,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
7198 ret = BEGIN_LP_RING(6); 7521 ret = BEGIN_LP_RING(6);
7199 if (ret) { 7522 if (ret) {
7200 ironlake_teardown_rc6(dev); 7523 ironlake_teardown_rc6(dev);
7524 mutex_unlock(&dev->struct_mutex);
7201 return; 7525 return;
7202 } 7526 }
7203 7527
@@ -7213,10 +7537,33 @@ void ironlake_enable_rc6(struct drm_device *dev)
7213 OUT_RING(MI_FLUSH); 7537 OUT_RING(MI_FLUSH);
7214 ADVANCE_LP_RING(); 7538 ADVANCE_LP_RING();
7215 7539
7540 /*
7541 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7542 * does an implicit flush, combined with MI_FLUSH above, it should be
7543 * safe to assume that renderctx is valid
7544 */
7545 ret = intel_wait_ring_idle(LP_RING(dev_priv));
7546 if (ret) {
7547 DRM_ERROR("failed to enable ironlake power power savings\n");
7548 ironlake_teardown_rc6(dev);
7549 mutex_unlock(&dev->struct_mutex);
7550 return;
7551 }
7552
7216 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 7553 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7217 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7554 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7555 mutex_unlock(&dev->struct_mutex);
7218} 7556}
7219 7557
7558void intel_init_clock_gating(struct drm_device *dev)
7559{
7560 struct drm_i915_private *dev_priv = dev->dev_private;
7561
7562 dev_priv->display.init_clock_gating(dev);
7563
7564 if (dev_priv->display.init_pch_clock_gating)
7565 dev_priv->display.init_pch_clock_gating(dev);
7566}
7220 7567
7221/* Set up chip specific display functions */ 7568/* Set up chip specific display functions */
7222static void intel_init_display(struct drm_device *dev) 7569static void intel_init_display(struct drm_device *dev)
@@ -7224,10 +7571,13 @@ static void intel_init_display(struct drm_device *dev)
7224 struct drm_i915_private *dev_priv = dev->dev_private; 7571 struct drm_i915_private *dev_priv = dev->dev_private;
7225 7572
7226 /* We always want a DPMS function */ 7573 /* We always want a DPMS function */
7227 if (HAS_PCH_SPLIT(dev)) 7574 if (HAS_PCH_SPLIT(dev)) {
7228 dev_priv->display.dpms = ironlake_crtc_dpms; 7575 dev_priv->display.dpms = ironlake_crtc_dpms;
7229 else 7576 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7577 } else {
7230 dev_priv->display.dpms = i9xx_crtc_dpms; 7578 dev_priv->display.dpms = i9xx_crtc_dpms;
7579 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7580 }
7231 7581
7232 if (I915_HAS_FBC(dev)) { 7582 if (I915_HAS_FBC(dev)) {
7233 if (HAS_PCH_SPLIT(dev)) { 7583 if (HAS_PCH_SPLIT(dev)) {
@@ -7271,6 +7621,11 @@ static void intel_init_display(struct drm_device *dev)
7271 7621
7272 /* For FIFO watermark updates */ 7622 /* For FIFO watermark updates */
7273 if (HAS_PCH_SPLIT(dev)) { 7623 if (HAS_PCH_SPLIT(dev)) {
7624 if (HAS_PCH_IBX(dev))
7625 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7626 else if (HAS_PCH_CPT(dev))
7627 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7628
7274 if (IS_GEN5(dev)) { 7629 if (IS_GEN5(dev)) {
7275 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 7630 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7276 dev_priv->display.update_wm = ironlake_update_wm; 7631 dev_priv->display.update_wm = ironlake_update_wm;
@@ -7279,6 +7634,8 @@ static void intel_init_display(struct drm_device *dev)
7279 "Disable CxSR\n"); 7634 "Disable CxSR\n");
7280 dev_priv->display.update_wm = NULL; 7635 dev_priv->display.update_wm = NULL;
7281 } 7636 }
7637 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7638 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7282 } else if (IS_GEN6(dev)) { 7639 } else if (IS_GEN6(dev)) {
7283 if (SNB_READ_WM0_LATENCY()) { 7640 if (SNB_READ_WM0_LATENCY()) {
7284 dev_priv->display.update_wm = sandybridge_update_wm; 7641 dev_priv->display.update_wm = sandybridge_update_wm;
@@ -7287,6 +7644,20 @@ static void intel_init_display(struct drm_device *dev)
7287 "Disable CxSR\n"); 7644 "Disable CxSR\n");
7288 dev_priv->display.update_wm = NULL; 7645 dev_priv->display.update_wm = NULL;
7289 } 7646 }
7647 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7648 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7649 } else if (IS_IVYBRIDGE(dev)) {
7650 /* FIXME: detect B0+ stepping and use auto training */
7651 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7652 if (SNB_READ_WM0_LATENCY()) {
7653 dev_priv->display.update_wm = sandybridge_update_wm;
7654 } else {
7655 DRM_DEBUG_KMS("Failed to read display plane latency. "
7656 "Disable CxSR\n");
7657 dev_priv->display.update_wm = NULL;
7658 }
7659 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7660
7290 } else 7661 } else
7291 dev_priv->display.update_wm = NULL; 7662 dev_priv->display.update_wm = NULL;
7292 } else if (IS_PINEVIEW(dev)) { 7663 } else if (IS_PINEVIEW(dev)) {
@@ -7304,18 +7675,30 @@ static void intel_init_display(struct drm_device *dev)
7304 dev_priv->display.update_wm = NULL; 7675 dev_priv->display.update_wm = NULL;
7305 } else 7676 } else
7306 dev_priv->display.update_wm = pineview_update_wm; 7677 dev_priv->display.update_wm = pineview_update_wm;
7307 } else if (IS_G4X(dev)) 7678 } else if (IS_G4X(dev)) {
7308 dev_priv->display.update_wm = g4x_update_wm; 7679 dev_priv->display.update_wm = g4x_update_wm;
7309 else if (IS_GEN4(dev)) 7680 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7681 } else if (IS_GEN4(dev)) {
7310 dev_priv->display.update_wm = i965_update_wm; 7682 dev_priv->display.update_wm = i965_update_wm;
7311 else if (IS_GEN3(dev)) { 7683 if (IS_CRESTLINE(dev))
7684 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7685 else if (IS_BROADWATER(dev))
7686 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7687 } else if (IS_GEN3(dev)) {
7312 dev_priv->display.update_wm = i9xx_update_wm; 7688 dev_priv->display.update_wm = i9xx_update_wm;
7313 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 7689 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7690 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7691 } else if (IS_I865G(dev)) {
7692 dev_priv->display.update_wm = i830_update_wm;
7693 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7694 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7314 } else if (IS_I85X(dev)) { 7695 } else if (IS_I85X(dev)) {
7315 dev_priv->display.update_wm = i9xx_update_wm; 7696 dev_priv->display.update_wm = i9xx_update_wm;
7316 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 7697 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7698 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7317 } else { 7699 } else {
7318 dev_priv->display.update_wm = i830_update_wm; 7700 dev_priv->display.update_wm = i830_update_wm;
7701 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7319 if (IS_845G(dev)) 7702 if (IS_845G(dev))
7320 dev_priv->display.get_fifo_size = i845_get_fifo_size; 7703 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7321 else 7704 else
@@ -7441,12 +7824,11 @@ void intel_modeset_init(struct drm_device *dev)
7441 intel_crtc_init(dev, i); 7824 intel_crtc_init(dev, i);
7442 } 7825 }
7443 7826
7444 intel_setup_outputs(dev);
7445
7446 intel_enable_clock_gating(dev);
7447
7448 /* Just disable it once at startup */ 7827 /* Just disable it once at startup */
7449 i915_disable_vga(dev); 7828 i915_disable_vga(dev);
7829 intel_setup_outputs(dev);
7830
7831 intel_init_clock_gating(dev);
7450 7832
7451 if (IS_IRONLAKE_M(dev)) { 7833 if (IS_IRONLAKE_M(dev)) {
7452 ironlake_enable_drps(dev); 7834 ironlake_enable_drps(dev);
@@ -7456,12 +7838,15 @@ void intel_modeset_init(struct drm_device *dev)
7456 if (IS_GEN6(dev)) 7838 if (IS_GEN6(dev))
7457 gen6_enable_rps(dev_priv); 7839 gen6_enable_rps(dev_priv);
7458 7840
7459 if (IS_IRONLAKE_M(dev))
7460 ironlake_enable_rc6(dev);
7461
7462 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 7841 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7463 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 7842 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7464 (unsigned long)dev); 7843 (unsigned long)dev);
7844}
7845
7846void intel_modeset_gem_init(struct drm_device *dev)
7847{
7848 if (IS_IRONLAKE_M(dev))
7849 ironlake_enable_rc6(dev);
7465 7850
7466 intel_setup_overlay(dev); 7851 intel_setup_overlay(dev);
7467} 7852}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d20712d527f..831d7a4a0d18 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -140,7 +140,6 @@ struct intel_fbdev {
140struct intel_encoder { 140struct intel_encoder {
141 struct drm_encoder base; 141 struct drm_encoder base;
142 int type; 142 int type;
143 bool load_detect_temp;
144 bool needs_tv_clock; 143 bool needs_tv_clock;
145 void (*hot_plug)(struct intel_encoder *); 144 void (*hot_plug)(struct intel_encoder *);
146 int crtc_mask; 145 int crtc_mask;
@@ -291,13 +290,19 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
291 struct drm_file *file_priv); 290 struct drm_file *file_priv);
292extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 291extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
293extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 292extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
294extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 293
295 struct drm_connector *connector, 294struct intel_load_detect_pipe {
296 struct drm_display_mode *mode, 295 struct drm_framebuffer *release_fb;
297 int *dpms_mode); 296 bool load_detect_temp;
297 int dpms_mode;
298};
299extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
300 struct drm_connector *connector,
301 struct drm_display_mode *mode,
302 struct intel_load_detect_pipe *old);
298extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 303extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
299 struct drm_connector *connector, 304 struct drm_connector *connector,
300 int dpms_mode); 305 struct intel_load_detect_pipe *old);
301 306
302extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 307extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
303extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); 308extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
@@ -339,4 +344,6 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
339 344
340extern void intel_fb_output_poll_changed(struct drm_device *dev); 345extern void intel_fb_output_poll_changed(struct drm_device *dev);
341extern void intel_fb_restore_mode(struct drm_device *dev); 346extern void intel_fb_restore_mode(struct drm_device *dev);
347
348extern void intel_init_clock_gating(struct drm_device *dev);
342#endif /* __INTEL_DRV_H__ */ 349#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e9e6f71418a4..95c4b1429935 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err; 237 goto err;
238 } 238 }
239 obj->agp_type = AGP_USER_CACHED_MEMORY; 239 obj->cache_level = I915_CACHE_LLC;
240 240
241 ret = i915_gem_object_pin(obj, 4096, true); 241 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret) 242 if (ret)
@@ -286,7 +286,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
286 286
287 if (INTEL_INFO(dev)->gen > 3) { 287 if (INTEL_INFO(dev)->gen > 3) {
288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
289 if (IS_GEN6(dev)) 289 if (IS_GEN6(dev) || IS_GEN7(dev))
290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291 I915_WRITE(MI_MODE, mode); 291 I915_WRITE(MI_MODE, mode);
292 } 292 }
@@ -551,10 +551,31 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
551 551
552void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 552void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
553{ 553{
554 struct drm_device *dev = ring->dev;
554 drm_i915_private_t *dev_priv = ring->dev->dev_private; 555 drm_i915_private_t *dev_priv = ring->dev->dev_private;
555 u32 mmio = IS_GEN6(ring->dev) ? 556 u32 mmio = 0;
556 RING_HWS_PGA_GEN6(ring->mmio_base) : 557
557 RING_HWS_PGA(ring->mmio_base); 558 /* The ring status page addresses are no longer next to the rest of
559 * the ring registers as of gen7.
560 */
561 if (IS_GEN7(dev)) {
562 switch (ring->id) {
563 case RING_RENDER:
564 mmio = RENDER_HWS_PGA_GEN7;
565 break;
566 case RING_BLT:
567 mmio = BLT_HWS_PGA_GEN7;
568 break;
569 case RING_BSD:
570 mmio = BSD_HWS_PGA_GEN7;
571 break;
572 }
573 } else if (IS_GEN6(ring->dev)) {
574 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
575 } else {
576 mmio = RING_HWS_PGA(ring->mmio_base);
577 }
578
558 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 579 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
559 POSTING_READ(mmio); 580 POSTING_READ(mmio);
560} 581}
@@ -600,7 +621,7 @@ ring_add_request(struct intel_ring_buffer *ring,
600} 621}
601 622
602static bool 623static bool
603ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 624gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
604{ 625{
605 struct drm_device *dev = ring->dev; 626 struct drm_device *dev = ring->dev;
606 drm_i915_private_t *dev_priv = dev->dev_private; 627 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -609,71 +630,67 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
609 return false; 630 return false;
610 631
611 spin_lock(&ring->irq_lock); 632 spin_lock(&ring->irq_lock);
612 if (ring->irq_refcount++ == 0) 633 if (ring->irq_refcount++ == 0) {
613 ironlake_enable_irq(dev_priv, flag); 634 ring->irq_mask &= ~rflag;
635 I915_WRITE_IMR(ring, ring->irq_mask);
636 ironlake_enable_irq(dev_priv, gflag);
637 }
614 spin_unlock(&ring->irq_lock); 638 spin_unlock(&ring->irq_lock);
615 639
616 return true; 640 return true;
617} 641}
618 642
619static void 643static void
620ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 644gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
621{ 645{
622 struct drm_device *dev = ring->dev; 646 struct drm_device *dev = ring->dev;
623 drm_i915_private_t *dev_priv = dev->dev_private; 647 drm_i915_private_t *dev_priv = dev->dev_private;
624 648
625 spin_lock(&ring->irq_lock); 649 spin_lock(&ring->irq_lock);
626 if (--ring->irq_refcount == 0) 650 if (--ring->irq_refcount == 0) {
627 ironlake_disable_irq(dev_priv, flag); 651 ring->irq_mask |= rflag;
652 I915_WRITE_IMR(ring, ring->irq_mask);
653 ironlake_disable_irq(dev_priv, gflag);
654 }
628 spin_unlock(&ring->irq_lock); 655 spin_unlock(&ring->irq_lock);
629} 656}
630 657
631static bool 658static bool
632gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 659bsd_ring_get_irq(struct intel_ring_buffer *ring)
633{ 660{
634 struct drm_device *dev = ring->dev; 661 struct drm_device *dev = ring->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private; 662 drm_i915_private_t *dev_priv = dev->dev_private;
636 663
637 if (!dev->irq_enabled) 664 if (!dev->irq_enabled)
638 return false; 665 return false;
639 666
640 spin_lock(&ring->irq_lock); 667 spin_lock(&ring->irq_lock);
641 if (ring->irq_refcount++ == 0) { 668 if (ring->irq_refcount++ == 0) {
642 ring->irq_mask &= ~rflag; 669 if (IS_G4X(dev))
643 I915_WRITE_IMR(ring, ring->irq_mask); 670 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
644 ironlake_enable_irq(dev_priv, gflag); 671 else
672 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
645 } 673 }
646 spin_unlock(&ring->irq_lock); 674 spin_unlock(&ring->irq_lock);
647 675
648 return true; 676 return true;
649} 677}
650
651static void 678static void
652gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 679bsd_ring_put_irq(struct intel_ring_buffer *ring)
653{ 680{
654 struct drm_device *dev = ring->dev; 681 struct drm_device *dev = ring->dev;
655 drm_i915_private_t *dev_priv = dev->dev_private; 682 drm_i915_private_t *dev_priv = dev->dev_private;
656 683
657 spin_lock(&ring->irq_lock); 684 spin_lock(&ring->irq_lock);
658 if (--ring->irq_refcount == 0) { 685 if (--ring->irq_refcount == 0) {
659 ring->irq_mask |= rflag; 686 if (IS_G4X(dev))
660 I915_WRITE_IMR(ring, ring->irq_mask); 687 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
661 ironlake_disable_irq(dev_priv, gflag); 688 else
689 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
662 } 690 }
663 spin_unlock(&ring->irq_lock); 691 spin_unlock(&ring->irq_lock);
664} 692}
665 693
666static bool
667bsd_ring_get_irq(struct intel_ring_buffer *ring)
668{
669 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
670}
671static void
672bsd_ring_put_irq(struct intel_ring_buffer *ring)
673{
674 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
675}
676
677static int 694static int
678ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 695ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
679{ 696{
@@ -759,7 +776,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
759 ret = -ENOMEM; 776 ret = -ENOMEM;
760 goto err; 777 goto err;
761 } 778 }
762 obj->agp_type = AGP_USER_CACHED_MEMORY; 779 obj->cache_level = I915_CACHE_LLC;
763 780
764 ret = i915_gem_object_pin(obj, 4096, true); 781 ret = i915_gem_object_pin(obj, 4096, true);
765 if (ret != 0) { 782 if (ret != 0) {
@@ -800,6 +817,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
800 INIT_LIST_HEAD(&ring->request_list); 817 INIT_LIST_HEAD(&ring->request_list);
801 INIT_LIST_HEAD(&ring->gpu_write_list); 818 INIT_LIST_HEAD(&ring->gpu_write_list);
802 819
820 init_waitqueue_head(&ring->irq_queue);
803 spin_lock_init(&ring->irq_lock); 821 spin_lock_init(&ring->irq_lock);
804 ring->irq_mask = ~0; 822 ring->irq_mask = ~0;
805 823
@@ -872,7 +890,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
872 890
873 /* Disable the ring buffer. The ring must be idle at this point */ 891 /* Disable the ring buffer. The ring must be idle at this point */
874 dev_priv = ring->dev->dev_private; 892 dev_priv = ring->dev->dev_private;
875 ret = intel_wait_ring_buffer(ring, ring->size - 8); 893 ret = intel_wait_ring_idle(ring);
876 if (ret) 894 if (ret)
877 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 895 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
878 ring->name, ret); 896 ring->name, ret);
@@ -1333,7 +1351,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1333 drm_i915_private_t *dev_priv = dev->dev_private; 1351 drm_i915_private_t *dev_priv = dev->dev_private;
1334 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1352 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1335 1353
1336 if (IS_GEN6(dev)) 1354 if (IS_GEN6(dev) || IS_GEN7(dev))
1337 *ring = gen6_bsd_ring; 1355 *ring = gen6_bsd_ring;
1338 else 1356 else
1339 *ring = bsd_ring; 1357 *ring = bsd_ring;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f23cc5f037a6..c0e0ee63fbf4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,27 +14,24 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) 17#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) 18#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
19 19
20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
22 22
23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
25 25
26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
28 28
29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
31 31
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) 33#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
34 34#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
37#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
38 35
39struct intel_ring_buffer { 36struct intel_ring_buffer {
40 const char *name; 37 const char *name;
@@ -164,7 +161,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
164#define I915_BREADCRUMB_INDEX 0x21 161#define I915_BREADCRUMB_INDEX 0x21
165 162
166void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 163void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
164
167int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 165int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
166static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
167{
168 return intel_wait_ring_buffer(ring, ring->space - 8);
169}
170
168int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 171int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
169 172
170static inline void intel_ring_emit(struct intel_ring_buffer *ring, 173static inline void intel_ring_emit(struct intel_ring_buffer *ring,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4324f33212d6..754086f83941 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2544,21 +2544,19 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2544 if (!intel_sdvo) 2544 if (!intel_sdvo)
2545 return false; 2545 return false;
2546 2546
2547 intel_sdvo->sdvo_reg = sdvo_reg;
2548 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2549 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2547 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2550 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2548 kfree(intel_sdvo); 2551 kfree(intel_sdvo);
2549 return false; 2552 return false;
2550 } 2553 }
2551 2554
2552 intel_sdvo->sdvo_reg = sdvo_reg; 2555 /* encoder type will be decided later */
2553
2554 intel_encoder = &intel_sdvo->base; 2556 intel_encoder = &intel_sdvo->base;
2555 intel_encoder->type = INTEL_OUTPUT_SDVO; 2557 intel_encoder->type = INTEL_OUTPUT_SDVO;
2556 /* encoder type will be decided later */
2557 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); 2558 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
2558 2559
2559 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2560 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2561
2562 /* Read the regs to test if we can talk to the device */ 2560 /* Read the regs to test if we can talk to the device */
2563 for (i = 0; i < 0x40; i++) { 2561 for (i = 0; i < 0x40; i++) {
2564 u8 byte; 2562 u8 byte;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6b22c1dcc015..113e4e7264cd 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1361,15 +1361,14 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1361 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1361 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1362 type = intel_tv_detect_type(intel_tv, connector); 1362 type = intel_tv_detect_type(intel_tv, connector);
1363 } else if (force) { 1363 } else if (force) {
1364 struct drm_crtc *crtc; 1364 struct intel_load_detect_pipe tmp;
1365 int dpms_mode;
1366 1365
1367 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1366 if (intel_get_load_detect_pipe(&intel_tv->base, connector,
1368 &mode, &dpms_mode); 1367 &mode, &tmp)) {
1369 if (crtc) {
1370 type = intel_tv_detect_type(intel_tv, connector); 1368 type = intel_tv_detect_type(intel_tv, connector);
1371 intel_release_load_detect_pipe(&intel_tv->base, connector, 1369 intel_release_load_detect_pipe(&intel_tv->base,
1372 dpms_mode); 1370 connector,
1371 &tmp);
1373 } else 1372 } else
1374 return connector_status_unknown; 1373 return connector_status_unknown;
1375 } else 1374 } else
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index de70959b9ed5..ca1639918f57 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,6 +11,8 @@ config DRM_NOUVEAU
11 select FRAMEBUFFER_CONSOLE if !EXPERT 11 select FRAMEBUFFER_CONSOLE if !EXPERT
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 select ACPI_WMI if ACPI
15 select MXM_WMI if ACPI
14 help 16 help
15 Choose this option for open-source nVidia support. 17 Choose this option for open-source nVidia support.
16 18
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e12c97fd8db8..0583677e4581 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -20,6 +20,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
20 nv40_graph.o nv50_graph.o nvc0_graph.o \ 20 nv40_graph.o nv50_graph.o nvc0_graph.o \
21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
22 nv84_crypt.o \ 22 nv84_crypt.o \
23 nva3_copy.o nvc0_copy.o \
24 nv40_mpeg.o nv50_mpeg.o \
23 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 25 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
24 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ 26 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
25 nv50_cursor.o nv50_display.o \ 27 nv50_cursor.o nv50_display.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index a54238058dc5..f0d459bb46e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -4,6 +4,8 @@
4#include <acpi/acpi_drivers.h> 4#include <acpi/acpi_drivers.h>
5#include <acpi/acpi_bus.h> 5#include <acpi/acpi_bus.h>
6#include <acpi/video.h> 6#include <acpi/video.h>
7#include <acpi/acpi.h>
8#include <linux/mxm-wmi.h>
7 9
8#include "drmP.h" 10#include "drmP.h"
9#include "drm.h" 11#include "drm.h"
@@ -35,15 +37,71 @@
35 37
36static struct nouveau_dsm_priv { 38static struct nouveau_dsm_priv {
37 bool dsm_detected; 39 bool dsm_detected;
40 bool optimus_detected;
38 acpi_handle dhandle; 41 acpi_handle dhandle;
39 acpi_handle rom_handle; 42 acpi_handle rom_handle;
40} nouveau_dsm_priv; 43} nouveau_dsm_priv;
41 44
45#define NOUVEAU_DSM_HAS_MUX 0x1
46#define NOUVEAU_DSM_HAS_OPT 0x2
47
42static const char nouveau_dsm_muid[] = { 48static const char nouveau_dsm_muid[] = {
43 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 49 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
44 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, 50 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
45}; 51};
46 52
53static const char nouveau_op_dsm_muid[] = {
54 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
55 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
56};
57
58static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
59{
60 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
61 struct acpi_object_list input;
62 union acpi_object params[4];
63 union acpi_object *obj;
64 int err;
65
66 input.count = 4;
67 input.pointer = params;
68 params[0].type = ACPI_TYPE_BUFFER;
69 params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
70 params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
71 params[1].type = ACPI_TYPE_INTEGER;
72 params[1].integer.value = 0x00000100;
73 params[2].type = ACPI_TYPE_INTEGER;
74 params[2].integer.value = func;
75 params[3].type = ACPI_TYPE_BUFFER;
76 params[3].buffer.length = 0;
77
78 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
79 if (err) {
80 printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
81 return err;
82 }
83
84 obj = (union acpi_object *)output.pointer;
85
86 if (obj->type == ACPI_TYPE_INTEGER)
87 if (obj->integer.value == 0x80000002) {
88 return -ENODEV;
89 }
90
91 if (obj->type == ACPI_TYPE_BUFFER) {
92 if (obj->buffer.length == 4 && result) {
93 *result = 0;
94 *result |= obj->buffer.pointer[0];
95 *result |= (obj->buffer.pointer[1] << 8);
96 *result |= (obj->buffer.pointer[2] << 16);
97 *result |= (obj->buffer.pointer[3] << 24);
98 }
99 }
100
101 kfree(output.pointer);
102 return 0;
103}
104
47static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) 105static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
48{ 106{
49 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 107 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -92,6 +150,8 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
92 150
93static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) 151static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
94{ 152{
153 mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
154 mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
95 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); 155 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
96} 156}
97 157
@@ -148,11 +208,11 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
148 .get_client_id = nouveau_dsm_get_client_id, 208 .get_client_id = nouveau_dsm_get_client_id,
149}; 209};
150 210
151static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) 211static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
152{ 212{
153 acpi_handle dhandle, nvidia_handle; 213 acpi_handle dhandle, nvidia_handle;
154 acpi_status status; 214 acpi_status status;
155 int ret; 215 int ret, retval = 0;
156 uint32_t result; 216 uint32_t result;
157 217
158 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 218 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -166,11 +226,17 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
166 226
167 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, 227 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
168 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); 228 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
169 if (ret < 0) 229 if (ret == 0)
170 return false; 230 retval |= NOUVEAU_DSM_HAS_MUX;
171 231
172 nouveau_dsm_priv.dhandle = dhandle; 232 ret = nouveau_optimus_dsm(dhandle, 0, 0, &result);
173 return true; 233 if (ret == 0)
234 retval |= NOUVEAU_DSM_HAS_OPT;
235
236 if (retval)
237 nouveau_dsm_priv.dhandle = dhandle;
238
239 return retval;
174} 240}
175 241
176static bool nouveau_dsm_detect(void) 242static bool nouveau_dsm_detect(void)
@@ -179,22 +245,42 @@ static bool nouveau_dsm_detect(void)
179 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 245 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
180 struct pci_dev *pdev = NULL; 246 struct pci_dev *pdev = NULL;
181 int has_dsm = 0; 247 int has_dsm = 0;
248 int has_optimus;
182 int vga_count = 0; 249 int vga_count = 0;
250 bool guid_valid;
251 int retval;
252 bool ret = false;
253
254 /* lookup the MXM GUID */
255 guid_valid = mxm_wmi_supported();
183 256
257 if (guid_valid)
258 printk("MXM: GUID detected in BIOS\n");
259
260 /* now do DSM detection */
184 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 261 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
185 vga_count++; 262 vga_count++;
186 263
187 has_dsm |= (nouveau_dsm_pci_probe(pdev) == true); 264 retval = nouveau_dsm_pci_probe(pdev);
265 printk("ret val is %d\n", retval);
266 if (retval & NOUVEAU_DSM_HAS_MUX)
267 has_dsm |= 1;
268 if (retval & NOUVEAU_DSM_HAS_OPT)
269 has_optimus = 1;
188 } 270 }
189 271
190 if (vga_count == 2 && has_dsm) { 272 if (vga_count == 2 && has_dsm && guid_valid) {
191 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); 273 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
192 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 274 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
193 acpi_method_name); 275 acpi_method_name);
194 nouveau_dsm_priv.dsm_detected = true; 276 nouveau_dsm_priv.dsm_detected = true;
195 return true; 277 ret = true;
196 } 278 }
197 return false; 279
280 if (has_optimus == 1)
281 nouveau_dsm_priv.optimus_detected = true;
282
283 return ret;
198} 284}
199 285
200void nouveau_register_dsm_handler(void) 286void nouveau_register_dsm_handler(void)
@@ -247,7 +333,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
247 acpi_status status; 333 acpi_status status;
248 acpi_handle dhandle, rom_handle; 334 acpi_handle dhandle, rom_handle;
249 335
250 if (!nouveau_dsm_priv.dsm_detected) 336 if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
251 return false; 337 return false;
252 338
253 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 339 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 90aef64b76f2..729d5fd7c88d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5049,11 +5049,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
5049 pll_lim->vco1.max_n = record[11]; 5049 pll_lim->vco1.max_n = record[11];
5050 pll_lim->min_p = record[12]; 5050 pll_lim->min_p = record[12];
5051 pll_lim->max_p = record[13]; 5051 pll_lim->max_p = record[13];
5052 /* where did this go to?? */ 5052 pll_lim->refclk = ROM16(entry[9]) * 1000;
5053 if ((entry[0] & 0xf0) == 0x80)
5054 pll_lim->refclk = 27000;
5055 else
5056 pll_lim->refclk = 100000;
5057 } 5053 }
5058 5054
5059 /* 5055 /*
@@ -6035,6 +6031,7 @@ parse_dcb_connector_table(struct nvbios *bios)
6035 case DCB_CONNECTOR_DVI_I: 6031 case DCB_CONNECTOR_DVI_I:
6036 case DCB_CONNECTOR_DVI_D: 6032 case DCB_CONNECTOR_DVI_D:
6037 case DCB_CONNECTOR_LVDS: 6033 case DCB_CONNECTOR_LVDS:
6034 case DCB_CONNECTOR_LVDS_SPWG:
6038 case DCB_CONNECTOR_DP: 6035 case DCB_CONNECTOR_DP:
6039 case DCB_CONNECTOR_eDP: 6036 case DCB_CONNECTOR_eDP:
6040 case DCB_CONNECTOR_HDMI_0: 6037 case DCB_CONNECTOR_HDMI_0:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 8a54fa7edf5c..050c314119df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -82,6 +82,7 @@ enum dcb_connector_type {
82 DCB_CONNECTOR_DVI_I = 0x30, 82 DCB_CONNECTOR_DVI_I = 0x30,
83 DCB_CONNECTOR_DVI_D = 0x31, 83 DCB_CONNECTOR_DVI_D = 0x31,
84 DCB_CONNECTOR_LVDS = 0x40, 84 DCB_CONNECTOR_LVDS = 0x40,
85 DCB_CONNECTOR_LVDS_SPWG = 0x41,
85 DCB_CONNECTOR_DP = 0x46, 86 DCB_CONNECTOR_DP = 0x46,
86 DCB_CONNECTOR_eDP = 0x47, 87 DCB_CONNECTOR_eDP = 0x47,
87 DCB_CONNECTOR_HDMI_0 = 0x60, 88 DCB_CONNECTOR_HDMI_0 = 0x60,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 4cea35c57d15..a7583a8ddb01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -268,9 +268,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
268 struct drm_device *dev = chan->dev; 268 struct drm_device *dev = chan->dev;
269 struct drm_nouveau_private *dev_priv = dev->dev_private; 269 struct drm_nouveau_private *dev_priv = dev->dev_private;
270 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 270 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
271 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
272 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
273 unsigned long flags; 271 unsigned long flags;
272 int i;
274 273
275 /* decrement the refcount, and we're done if there's still refs */ 274 /* decrement the refcount, and we're done if there's still refs */
276 if (likely(!atomic_dec_and_test(&chan->users))) { 275 if (likely(!atomic_dec_and_test(&chan->users))) {
@@ -294,19 +293,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
294 /* boot it off the hardware */ 293 /* boot it off the hardware */
295 pfifo->reassign(dev, false); 294 pfifo->reassign(dev, false);
296 295
297 /* We want to give pgraph a chance to idle and get rid of all
298 * potential errors. We need to do this without the context
299 * switch lock held, otherwise the irq handler is unable to
300 * process them.
301 */
302 if (pgraph->channel(dev) == chan)
303 nouveau_wait_for_idle(dev);
304
305 /* destroy the engine specific contexts */ 296 /* destroy the engine specific contexts */
306 pfifo->destroy_context(chan); 297 pfifo->destroy_context(chan);
307 pgraph->destroy_context(chan); 298 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
308 if (pcrypt->destroy_context) 299 if (chan->engctx[i])
309 pcrypt->destroy_context(chan); 300 dev_priv->eng[i]->context_del(chan, i);
301 }
310 302
311 pfifo->reassign(dev, true); 303 pfifo->reassign(dev, true);
312 304
@@ -414,7 +406,7 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
414 struct nouveau_channel *chan; 406 struct nouveau_channel *chan;
415 int ret; 407 int ret;
416 408
417 if (dev_priv->engine.graph.accel_blocked) 409 if (!dev_priv->eng[NVOBJ_ENGINE_GR])
418 return -ENODEV; 410 return -ENODEV;
419 411
420 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 412 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7ae151109a66..1595d0b6e815 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -442,7 +442,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
442 } 442 }
443 443
444 /* LVDS always needs gpu scaling */ 444 /* LVDS always needs gpu scaling */
445 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS && 445 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
446 value == DRM_MODE_SCALE_NONE) 446 value == DRM_MODE_SCALE_NONE)
447 return -EINVAL; 447 return -EINVAL;
448 448
@@ -650,6 +650,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
650 ret = get_slave_funcs(encoder)->get_modes(encoder, connector); 650 ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
651 651
652 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || 652 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
653 nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
653 nv_connector->dcb->type == DCB_CONNECTOR_eDP) 654 nv_connector->dcb->type == DCB_CONNECTOR_eDP)
654 ret += nouveau_connector_scaler_modes_add(connector); 655 ret += nouveau_connector_scaler_modes_add(connector);
655 656
@@ -810,6 +811,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
810 type = DRM_MODE_CONNECTOR_HDMIA; 811 type = DRM_MODE_CONNECTOR_HDMIA;
811 break; 812 break;
812 case DCB_CONNECTOR_LVDS: 813 case DCB_CONNECTOR_LVDS:
814 case DCB_CONNECTOR_LVDS_SPWG:
813 type = DRM_MODE_CONNECTOR_LVDS; 815 type = DRM_MODE_CONNECTOR_LVDS;
814 funcs = &nouveau_connector_funcs_lvds; 816 funcs = &nouveau_connector_funcs_lvds;
815 break; 817 break;
@@ -838,7 +840,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
838 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 840 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
839 841
840 /* Check if we need dithering enabled */ 842 /* Check if we need dithering enabled */
841 if (dcb->type == DCB_CONNECTOR_LVDS) { 843 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
842 bool dummy, is_24bit = false; 844 bool dummy, is_24bit = false;
843 845
844 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); 846 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
@@ -883,7 +885,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
883 nv_connector->use_dithering ? 885 nv_connector->use_dithering ?
884 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 886 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
885 887
886 if (dcb->type != DCB_CONNECTOR_LVDS) { 888 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
887 if (dev_priv->card_type >= NV_50) 889 if (dev_priv->card_type >= NV_50)
888 connector->polled = DRM_CONNECTOR_POLL_HPD; 890 connector->polled = DRM_CONNECTOR_POLL_HPD;
889 else 891 else
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 764c15d537ba..eb514ea29377 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -276,7 +276,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
276 struct nouveau_fence *fence; 276 struct nouveau_fence *fence;
277 int ret; 277 int ret;
278 278
279 if (dev_priv->engine.graph.accel_blocked) 279 if (!dev_priv->channel)
280 return -ENODEV; 280 return -ENODEV;
281 281
282 s = kzalloc(sizeof(*s), GFP_KERNEL); 282 s = kzalloc(sizeof(*s), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 155ebdcbf06f..02c6f37d8bd7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -162,11 +162,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
162 struct drm_device *dev = pci_get_drvdata(pdev); 162 struct drm_device *dev = pci_get_drvdata(pdev);
163 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct drm_nouveau_private *dev_priv = dev->dev_private;
164 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 164 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
165 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
166 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 165 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
167 struct nouveau_channel *chan; 166 struct nouveau_channel *chan;
168 struct drm_crtc *crtc; 167 struct drm_crtc *crtc;
169 int ret, i; 168 int ret, i, e;
170 169
171 if (pm_state.event == PM_EVENT_PRETHAW) 170 if (pm_state.event == PM_EVENT_PRETHAW)
172 return 0; 171 return 0;
@@ -206,12 +205,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
206 nouveau_channel_idle(chan); 205 nouveau_channel_idle(chan);
207 } 206 }
208 207
209 pgraph->fifo_access(dev, false);
210 nouveau_wait_for_idle(dev);
211 pfifo->reassign(dev, false); 208 pfifo->reassign(dev, false);
212 pfifo->disable(dev); 209 pfifo->disable(dev);
213 pfifo->unload_context(dev); 210 pfifo->unload_context(dev);
214 pgraph->unload_context(dev); 211
212 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
213 if (dev_priv->eng[e]) {
214 ret = dev_priv->eng[e]->fini(dev, e);
215 if (ret)
216 goto out_abort;
217 }
218 }
215 219
216 ret = pinstmem->suspend(dev); 220 ret = pinstmem->suspend(dev);
217 if (ret) { 221 if (ret) {
@@ -242,9 +246,12 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
242 246
243out_abort: 247out_abort:
244 NV_INFO(dev, "Re-enabling acceleration..\n"); 248 NV_INFO(dev, "Re-enabling acceleration..\n");
249 for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
250 if (dev_priv->eng[e])
251 dev_priv->eng[e]->init(dev, e);
252 }
245 pfifo->enable(dev); 253 pfifo->enable(dev);
246 pfifo->reassign(dev, true); 254 pfifo->reassign(dev, true);
247 pgraph->fifo_access(dev, true);
248 return ret; 255 return ret;
249} 256}
250 257
@@ -299,8 +306,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
299 engine->mc.init(dev); 306 engine->mc.init(dev);
300 engine->timer.init(dev); 307 engine->timer.init(dev);
301 engine->fb.init(dev); 308 engine->fb.init(dev);
302 engine->graph.init(dev); 309 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
303 engine->crypt.init(dev); 310 if (dev_priv->eng[i])
311 dev_priv->eng[i]->init(dev, i);
312 }
304 engine->fifo.init(dev); 313 engine->fifo.init(dev);
305 314
306 nouveau_irq_postinstall(dev); 315 nouveau_irq_postinstall(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index a76514a209b3..9c56331941e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -150,13 +150,12 @@ enum nouveau_flags {
150 150
151#define NVOBJ_ENGINE_SW 0 151#define NVOBJ_ENGINE_SW 0
152#define NVOBJ_ENGINE_GR 1 152#define NVOBJ_ENGINE_GR 1
153#define NVOBJ_ENGINE_PPP 2 153#define NVOBJ_ENGINE_CRYPT 2
154#define NVOBJ_ENGINE_COPY 3 154#define NVOBJ_ENGINE_COPY0 3
155#define NVOBJ_ENGINE_VP 4 155#define NVOBJ_ENGINE_COPY1 4
156#define NVOBJ_ENGINE_CRYPT 5 156#define NVOBJ_ENGINE_MPEG 5
157#define NVOBJ_ENGINE_BSP 6 157#define NVOBJ_ENGINE_DISPLAY 15
158#define NVOBJ_ENGINE_DISPLAY 0xcafe0001 158#define NVOBJ_ENGINE_NR 16
159#define NVOBJ_ENGINE_INT 0xdeadbeef
160 159
161#define NVOBJ_FLAG_DONT_MAP (1 << 0) 160#define NVOBJ_FLAG_DONT_MAP (1 << 0)
162#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 161#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -245,11 +244,8 @@ struct nouveau_channel {
245 struct nouveau_gpuobj *cache; 244 struct nouveau_gpuobj *cache;
246 void *fifo_priv; 245 void *fifo_priv;
247 246
248 /* PGRAPH context */ 247 /* Execution engine contexts */
249 /* XXX may be merge 2 pointers as private data ??? */ 248 void *engctx[NVOBJ_ENGINE_NR];
250 struct nouveau_gpuobj *ramin_grctx;
251 struct nouveau_gpuobj *crypt_ctx;
252 void *pgraph_ctx;
253 249
254 /* NV50 VM */ 250 /* NV50 VM */
255 struct nouveau_vm *vm; 251 struct nouveau_vm *vm;
@@ -298,6 +294,18 @@ struct nouveau_channel {
298 } debugfs; 294 } debugfs;
299}; 295};
300 296
297struct nouveau_exec_engine {
298 void (*destroy)(struct drm_device *, int engine);
299 int (*init)(struct drm_device *, int engine);
300 int (*fini)(struct drm_device *, int engine);
301 int (*context_new)(struct nouveau_channel *, int engine);
302 void (*context_del)(struct nouveau_channel *, int engine);
303 int (*object_new)(struct nouveau_channel *, int engine,
304 u32 handle, u16 class);
305 void (*set_tile_region)(struct drm_device *dev, int i);
306 void (*tlb_flush)(struct drm_device *, int engine);
307};
308
301struct nouveau_instmem_engine { 309struct nouveau_instmem_engine {
302 void *priv; 310 void *priv;
303 311
@@ -364,30 +372,6 @@ struct nouveau_fifo_engine {
364 void (*tlb_flush)(struct drm_device *dev); 372 void (*tlb_flush)(struct drm_device *dev);
365}; 373};
366 374
367struct nouveau_pgraph_engine {
368 bool accel_blocked;
369 bool registered;
370 int grctx_size;
371 void *priv;
372
373 /* NV2x/NV3x context table (0x400780) */
374 struct nouveau_gpuobj *ctx_table;
375
376 int (*init)(struct drm_device *);
377 void (*takedown)(struct drm_device *);
378
379 void (*fifo_access)(struct drm_device *, bool);
380
381 struct nouveau_channel *(*channel)(struct drm_device *);
382 int (*create_context)(struct nouveau_channel *);
383 void (*destroy_context)(struct nouveau_channel *);
384 int (*load_context)(struct nouveau_channel *);
385 int (*unload_context)(struct drm_device *);
386 void (*tlb_flush)(struct drm_device *dev);
387
388 void (*set_tile_region)(struct drm_device *dev, int i);
389};
390
391struct nouveau_display_engine { 375struct nouveau_display_engine {
392 void *priv; 376 void *priv;
393 int (*early_init)(struct drm_device *); 377 int (*early_init)(struct drm_device *);
@@ -426,6 +410,19 @@ struct nouveau_pm_voltage {
426 int nr_level; 410 int nr_level;
427}; 411};
428 412
413struct nouveau_pm_memtiming {
414 int id;
415 u32 reg_100220;
416 u32 reg_100224;
417 u32 reg_100228;
418 u32 reg_10022c;
419 u32 reg_100230;
420 u32 reg_100234;
421 u32 reg_100238;
422 u32 reg_10023c;
423 u32 reg_100240;
424};
425
429#define NOUVEAU_PM_MAX_LEVEL 8 426#define NOUVEAU_PM_MAX_LEVEL 8
430struct nouveau_pm_level { 427struct nouveau_pm_level {
431 struct device_attribute dev_attr; 428 struct device_attribute dev_attr;
@@ -436,11 +433,13 @@ struct nouveau_pm_level {
436 u32 memory; 433 u32 memory;
437 u32 shader; 434 u32 shader;
438 u32 unk05; 435 u32 unk05;
436 u32 unk0a;
439 437
440 u8 voltage; 438 u8 voltage;
441 u8 fanspeed; 439 u8 fanspeed;
442 440
443 u16 memscript; 441 u16 memscript;
442 struct nouveau_pm_memtiming *timing;
444}; 443};
445 444
446struct nouveau_pm_temp_sensor_constants { 445struct nouveau_pm_temp_sensor_constants {
@@ -457,17 +456,6 @@ struct nouveau_pm_threshold_temp {
457 s16 fan_boost; 456 s16 fan_boost;
458}; 457};
459 458
460struct nouveau_pm_memtiming {
461 u32 reg_100220;
462 u32 reg_100224;
463 u32 reg_100228;
464 u32 reg_10022c;
465 u32 reg_100230;
466 u32 reg_100234;
467 u32 reg_100238;
468 u32 reg_10023c;
469};
470
471struct nouveau_pm_memtimings { 459struct nouveau_pm_memtimings {
472 bool supported; 460 bool supported;
473 struct nouveau_pm_memtiming *timing; 461 struct nouveau_pm_memtiming *timing;
@@ -499,16 +487,6 @@ struct nouveau_pm_engine {
499 int (*temp_get)(struct drm_device *); 487 int (*temp_get)(struct drm_device *);
500}; 488};
501 489
502struct nouveau_crypt_engine {
503 bool registered;
504
505 int (*init)(struct drm_device *);
506 void (*takedown)(struct drm_device *);
507 int (*create_context)(struct nouveau_channel *);
508 void (*destroy_context)(struct nouveau_channel *);
509 void (*tlb_flush)(struct drm_device *dev);
510};
511
512struct nouveau_vram_engine { 490struct nouveau_vram_engine {
513 int (*init)(struct drm_device *); 491 int (*init)(struct drm_device *);
514 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, 492 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
@@ -523,12 +501,10 @@ struct nouveau_engine {
523 struct nouveau_mc_engine mc; 501 struct nouveau_mc_engine mc;
524 struct nouveau_timer_engine timer; 502 struct nouveau_timer_engine timer;
525 struct nouveau_fb_engine fb; 503 struct nouveau_fb_engine fb;
526 struct nouveau_pgraph_engine graph;
527 struct nouveau_fifo_engine fifo; 504 struct nouveau_fifo_engine fifo;
528 struct nouveau_display_engine display; 505 struct nouveau_display_engine display;
529 struct nouveau_gpio_engine gpio; 506 struct nouveau_gpio_engine gpio;
530 struct nouveau_pm_engine pm; 507 struct nouveau_pm_engine pm;
531 struct nouveau_crypt_engine crypt;
532 struct nouveau_vram_engine vram; 508 struct nouveau_vram_engine vram;
533}; 509};
534 510
@@ -637,6 +613,7 @@ struct drm_nouveau_private {
637 enum nouveau_card_type card_type; 613 enum nouveau_card_type card_type;
638 /* exact chipset, derived from NV_PMC_BOOT_0 */ 614 /* exact chipset, derived from NV_PMC_BOOT_0 */
639 int chipset; 615 int chipset;
616 int stepping;
640 int flags; 617 int flags;
641 618
642 void __iomem *mmio; 619 void __iomem *mmio;
@@ -647,6 +624,7 @@ struct drm_nouveau_private {
647 u32 ramin_base; 624 u32 ramin_base;
648 bool ramin_available; 625 bool ramin_available;
649 struct drm_mm ramin_heap; 626 struct drm_mm ramin_heap;
627 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
650 struct list_head gpuobj_list; 628 struct list_head gpuobj_list;
651 struct list_head classes; 629 struct list_head classes;
652 630
@@ -745,10 +723,6 @@ struct drm_nouveau_private {
745 uint32_t crtc_owner; 723 uint32_t crtc_owner;
746 uint32_t dac_users[4]; 724 uint32_t dac_users[4];
747 725
748 struct nouveau_suspend_resume {
749 uint32_t *ramin_copy;
750 } susres;
751
752 struct backlight_device *backlight; 726 struct backlight_device *backlight;
753 727
754 struct { 728 struct {
@@ -757,8 +731,6 @@ struct drm_nouveau_private {
757 731
758 struct nouveau_fbdev *nfbdev; 732 struct nouveau_fbdev *nfbdev;
759 struct apertures_struct *apertures; 733 struct apertures_struct *apertures;
760
761 bool powered_down;
762}; 734};
763 735
764static inline struct drm_nouveau_private * 736static inline struct drm_nouveau_private *
@@ -883,17 +855,27 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan,
883extern void nouveau_channel_idle(struct nouveau_channel *chan); 855extern void nouveau_channel_idle(struct nouveau_channel *chan);
884 856
885/* nouveau_object.c */ 857/* nouveau_object.c */
886#define NVOBJ_CLASS(d,c,e) do { \ 858#define NVOBJ_ENGINE_ADD(d, e, p) do { \
859 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
860 dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
861} while (0)
862
863#define NVOBJ_ENGINE_DEL(d, e) do { \
864 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
865 dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \
866} while (0)
867
868#define NVOBJ_CLASS(d, c, e) do { \
887 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ 869 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
888 if (ret) \ 870 if (ret) \
889 return ret; \ 871 return ret; \
890} while(0) 872} while (0)
891 873
892#define NVOBJ_MTHD(d,c,m,e) do { \ 874#define NVOBJ_MTHD(d, c, m, e) do { \
893 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ 875 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
894 if (ret) \ 876 if (ret) \
895 return ret; \ 877 return ret; \
896} while(0) 878} while (0)
897 879
898extern int nouveau_gpuobj_early_init(struct drm_device *); 880extern int nouveau_gpuobj_early_init(struct drm_device *);
899extern int nouveau_gpuobj_init(struct drm_device *); 881extern int nouveau_gpuobj_init(struct drm_device *);
@@ -903,7 +885,7 @@ extern void nouveau_gpuobj_resume(struct drm_device *dev);
903extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); 885extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
904extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, 886extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
905 int (*exec)(struct nouveau_channel *, 887 int (*exec)(struct nouveau_channel *,
906 u32 class, u32 mthd, u32 data)); 888 u32 class, u32 mthd, u32 data));
907extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); 889extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
908extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); 890extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
909extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, 891extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
@@ -1137,81 +1119,50 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
1137extern int nvc0_fifo_unload_context(struct drm_device *); 1119extern int nvc0_fifo_unload_context(struct drm_device *);
1138 1120
1139/* nv04_graph.c */ 1121/* nv04_graph.c */
1140extern int nv04_graph_init(struct drm_device *); 1122extern int nv04_graph_create(struct drm_device *);
1141extern void nv04_graph_takedown(struct drm_device *);
1142extern void nv04_graph_fifo_access(struct drm_device *, bool); 1123extern void nv04_graph_fifo_access(struct drm_device *, bool);
1143extern struct nouveau_channel *nv04_graph_channel(struct drm_device *); 1124extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
1144extern int nv04_graph_create_context(struct nouveau_channel *);
1145extern void nv04_graph_destroy_context(struct nouveau_channel *);
1146extern int nv04_graph_load_context(struct nouveau_channel *);
1147extern int nv04_graph_unload_context(struct drm_device *);
1148extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, 1125extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1149 u32 class, u32 mthd, u32 data); 1126 u32 class, u32 mthd, u32 data);
1150extern struct nouveau_bitfield nv04_graph_nsource[]; 1127extern struct nouveau_bitfield nv04_graph_nsource[];
1151 1128
1152/* nv10_graph.c */ 1129/* nv10_graph.c */
1153extern int nv10_graph_init(struct drm_device *); 1130extern int nv10_graph_create(struct drm_device *);
1154extern void nv10_graph_takedown(struct drm_device *);
1155extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); 1131extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
1156extern int nv10_graph_create_context(struct nouveau_channel *);
1157extern void nv10_graph_destroy_context(struct nouveau_channel *);
1158extern int nv10_graph_load_context(struct nouveau_channel *);
1159extern int nv10_graph_unload_context(struct drm_device *);
1160extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
1161extern struct nouveau_bitfield nv10_graph_intr[]; 1132extern struct nouveau_bitfield nv10_graph_intr[];
1162extern struct nouveau_bitfield nv10_graph_nstatus[]; 1133extern struct nouveau_bitfield nv10_graph_nstatus[];
1163 1134
1164/* nv20_graph.c */ 1135/* nv20_graph.c */
1165extern int nv20_graph_create_context(struct nouveau_channel *); 1136extern int nv20_graph_create(struct drm_device *);
1166extern void nv20_graph_destroy_context(struct nouveau_channel *);
1167extern int nv20_graph_load_context(struct nouveau_channel *);
1168extern int nv20_graph_unload_context(struct drm_device *);
1169extern int nv20_graph_init(struct drm_device *);
1170extern void nv20_graph_takedown(struct drm_device *);
1171extern int nv30_graph_init(struct drm_device *);
1172extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
1173 1137
1174/* nv40_graph.c */ 1138/* nv40_graph.c */
1175extern int nv40_graph_init(struct drm_device *); 1139extern int nv40_graph_create(struct drm_device *);
1176extern void nv40_graph_takedown(struct drm_device *);
1177extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
1178extern int nv40_graph_create_context(struct nouveau_channel *);
1179extern void nv40_graph_destroy_context(struct nouveau_channel *);
1180extern int nv40_graph_load_context(struct nouveau_channel *);
1181extern int nv40_graph_unload_context(struct drm_device *);
1182extern void nv40_grctx_init(struct nouveau_grctx *); 1140extern void nv40_grctx_init(struct nouveau_grctx *);
1183extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
1184 1141
1185/* nv50_graph.c */ 1142/* nv50_graph.c */
1186extern int nv50_graph_init(struct drm_device *); 1143extern int nv50_graph_create(struct drm_device *);
1187extern void nv50_graph_takedown(struct drm_device *);
1188extern void nv50_graph_fifo_access(struct drm_device *, bool);
1189extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
1190extern int nv50_graph_create_context(struct nouveau_channel *);
1191extern void nv50_graph_destroy_context(struct nouveau_channel *);
1192extern int nv50_graph_load_context(struct nouveau_channel *);
1193extern int nv50_graph_unload_context(struct drm_device *);
1194extern int nv50_grctx_init(struct nouveau_grctx *); 1144extern int nv50_grctx_init(struct nouveau_grctx *);
1195extern void nv50_graph_tlb_flush(struct drm_device *dev);
1196extern void nv84_graph_tlb_flush(struct drm_device *dev);
1197extern struct nouveau_enum nv50_data_error_names[]; 1145extern struct nouveau_enum nv50_data_error_names[];
1146extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
1198 1147
1199/* nvc0_graph.c */ 1148/* nvc0_graph.c */
1200extern int nvc0_graph_init(struct drm_device *); 1149extern int nvc0_graph_create(struct drm_device *);
1201extern void nvc0_graph_takedown(struct drm_device *); 1150extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1202extern void nvc0_graph_fifo_access(struct drm_device *, bool);
1203extern struct nouveau_channel *nvc0_graph_channel(struct drm_device *);
1204extern int nvc0_graph_create_context(struct nouveau_channel *);
1205extern void nvc0_graph_destroy_context(struct nouveau_channel *);
1206extern int nvc0_graph_load_context(struct nouveau_channel *);
1207extern int nvc0_graph_unload_context(struct drm_device *);
1208 1151
1209/* nv84_crypt.c */ 1152/* nv84_crypt.c */
1210extern int nv84_crypt_init(struct drm_device *dev); 1153extern int nv84_crypt_create(struct drm_device *);
1211extern void nv84_crypt_fini(struct drm_device *dev); 1154
1212extern int nv84_crypt_create_context(struct nouveau_channel *); 1155/* nva3_copy.c */
1213extern void nv84_crypt_destroy_context(struct nouveau_channel *); 1156extern int nva3_copy_create(struct drm_device *dev);
1214extern void nv84_crypt_tlb_flush(struct drm_device *dev); 1157
1158/* nvc0_copy.c */
1159extern int nvc0_copy_create(struct drm_device *dev, int engine);
1160
1161/* nv40_mpeg.c */
1162extern int nv40_mpeg_create(struct drm_device *dev);
1163
1164/* nv50_mpeg.c */
1165extern int nv50_mpeg_create(struct drm_device *dev);
1215 1166
1216/* nv04_instmem.c */ 1167/* nv04_instmem.c */
1217extern int nv04_instmem_init(struct drm_device *); 1168extern int nv04_instmem_init(struct drm_device *);
@@ -1402,8 +1353,8 @@ bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
1402/* nv50_calc. */ 1353/* nv50_calc. */
1403int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, 1354int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
1404 int *N1, int *M1, int *N2, int *M2, int *P); 1355 int *N1, int *M1, int *N2, int *M2, int *P);
1405int nv50_calc_pll2(struct drm_device *, struct pll_lims *, 1356int nva3_calc_pll(struct drm_device *, struct pll_lims *,
1406 int clk, int *N, int *fN, int *M, int *P); 1357 int clk, int *N, int *fN, int *M, int *P);
1407 1358
1408#ifndef ioread32_native 1359#ifndef ioread32_native
1409#ifdef __BIG_ENDIAN 1360#ifdef __BIG_ENDIAN
@@ -1579,6 +1530,13 @@ nv_match_device(struct drm_device *dev, unsigned device,
1579 dev->pdev->subsystem_device == sub_device; 1530 dev->pdev->subsystem_device == sub_device;
1580} 1531}
1581 1532
1533static inline void *
1534nv_engine(struct drm_device *dev, int engine)
1535{
1536 struct drm_nouveau_private *dev_priv = dev->dev_private;
1537 return (void *)dev_priv->eng[engine];
1538}
1539
1582/* returns 1 if device is one of the nv4x using the 0x4497 object class, 1540/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1583 * helpful to determine a number of other hardware features 1541 * helpful to determine a number of other hardware features
1584 */ 1542 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 4a8ad1307fa4..86c2e374e938 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -87,10 +87,10 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | 87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
88 (state ? 0 : CP_BRA_IF_CLEAR)); 88 (state ? 0 : CP_BRA_IF_CLEAR));
89} 89}
90#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 90#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
91#ifdef CP_BRA_MOD 91#ifdef CP_BRA_MOD
92#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 92#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
93#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) 93#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
94#endif 94#endif
95 95
96static inline void 96static inline void
@@ -98,14 +98,14 @@ _cp_wait(struct nouveau_grctx *ctx, int flag, int state)
98{ 98{
99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); 99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
100} 100}
101#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 101#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
102 102
103static inline void 103static inline void
104_cp_set(struct nouveau_grctx *ctx, int flag, int state) 104_cp_set(struct nouveau_grctx *ctx, int flag, int state)
105{ 105{
106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); 106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
107} 107}
108#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) 108#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
109 109
110static inline void 110static inline void
111cp_pos(struct nouveau_grctx *ctx, int offset) 111cp_pos(struct nouveau_grctx *ctx, int offset)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c3e953b08992..2960f583dc38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -51,8 +51,7 @@ nv10_mem_update_tile_region(struct drm_device *dev,
51 struct drm_nouveau_private *dev_priv = dev->dev_private; 51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 54 int i = tile - dev_priv->tile.reg, j;
55 int i = tile - dev_priv->tile.reg;
56 unsigned long save; 55 unsigned long save;
57 56
58 nouveau_fence_unref(&tile->fence); 57 nouveau_fence_unref(&tile->fence);
@@ -70,7 +69,10 @@ nv10_mem_update_tile_region(struct drm_device *dev,
70 nouveau_wait_for_idle(dev); 69 nouveau_wait_for_idle(dev);
71 70
72 pfb->set_tile_region(dev, i); 71 pfb->set_tile_region(dev, i);
73 pgraph->set_tile_region(dev, i); 72 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
73 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
74 dev_priv->eng[j]->set_tile_region(dev, i);
75 }
74 76
75 pfifo->cache_pull(dev, true); 77 pfifo->cache_pull(dev, true);
76 pfifo->reassign(dev, true); 78 pfifo->reassign(dev, true);
@@ -595,10 +597,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
595 if (!memtimings->timing) 597 if (!memtimings->timing)
596 return; 598 return;
597 599
598 /* Get "some number" from the timing reg for NV_40 600 /* Get "some number" from the timing reg for NV_40 and NV_50
599 * Used in calculations later */ 601 * Used in calculations later */
600 if(dev_priv->card_type == NV_40) { 602 if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) {
601 magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; 603 magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
602 } 604 }
603 605
604 entry = mem + mem[1]; 606 entry = mem + mem[1];
@@ -641,51 +643,68 @@ nouveau_mem_timing_init(struct drm_device *dev)
641 /* XXX: I don't trust the -1's and +1's... they must come 643 /* XXX: I don't trust the -1's and +1's... they must come
642 * from somewhere! */ 644 * from somewhere! */
643 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | 645 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
644 tUNK_18 << 16 | 646 max(tUNK_18, (u8) 1) << 16 |
645 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; 647 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
646 if(dev_priv->chipset == 0xa8) { 648 if (dev_priv->chipset == 0xa8) {
647 timing->reg_100224 |= (tUNK_2 - 1); 649 timing->reg_100224 |= (tUNK_2 - 1);
648 } else { 650 } else {
649 timing->reg_100224 |= (tUNK_2 + 2 - magic_number); 651 timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
650 } 652 }
651 653
652 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 654 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
653 if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { 655 if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa)
654 timing->reg_100228 |= (tUNK_19 - 1) << 24; 656 timing->reg_100228 |= (tUNK_19 - 1) << 24;
655 } 657 else
658 timing->reg_100228 |= magic_number << 24;
656 659
657 if(dev_priv->card_type == NV_40) { 660 if (dev_priv->card_type == NV_40) {
658 /* NV40: don't know what the rest of the regs are.. 661 /* NV40: don't know what the rest of the regs are..
659 * And don't need to know either */ 662 * And don't need to know either */
660 timing->reg_100228 |= 0x20200000 | magic_number << 24; 663 timing->reg_100228 |= 0x20200000;
661 } else if(dev_priv->card_type >= NV_50) { 664 } else if (dev_priv->card_type >= NV_50) {
662 /* XXX: reg_10022c */ 665 if (dev_priv->chipset < 0x98 ||
663 timing->reg_10022c = tUNK_2 - 1; 666 (dev_priv->chipset == 0x98 &&
667 dev_priv->stepping <= 0xa1)) {
668 timing->reg_10022c = (0x14 + tUNK_2) << 24 |
669 0x16 << 16 |
670 (tUNK_2 - 1) << 8 |
671 (tUNK_2 - 1);
672 } else {
673 /* XXX: reg_10022c for recentish cards */
674 timing->reg_10022c = tUNK_2 - 1;
675 }
664 676
665 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 677 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
666 tUNK_13 << 8 | tUNK_13); 678 tUNK_13 << 8 | tUNK_13);
667 679
668 timing->reg_100234 = (tRAS << 24 | tRC); 680 timing->reg_100234 = (tRAS << 24 | tRC);
669 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; 681 timing->reg_100234 += max(tUNK_10, tUNK_11) << 16;
670 682
671 if(dev_priv->chipset < 0xa3) { 683 if (dev_priv->chipset < 0x98 ||
684 (dev_priv->chipset == 0x98 &&
685 dev_priv->stepping <= 0xa1)) {
672 timing->reg_100234 |= (tUNK_2 + 2) << 8; 686 timing->reg_100234 |= (tUNK_2 + 2) << 8;
673 } else { 687 } else {
674 /* XXX: +6? */ 688 /* XXX: +6? */
675 timing->reg_100234 |= (tUNK_19 + 6) << 8; 689 timing->reg_100234 |= (tUNK_19 + 6) << 8;
676 } 690 }
677 691
678 /* XXX; reg_100238, reg_10023c 692 /* XXX; reg_100238
679 * reg_100238: 0x00?????? 693 * reg_100238: 0x00?????? */
680 * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
681 timing->reg_10023c = 0x202; 694 timing->reg_10023c = 0x202;
682 if(dev_priv->chipset < 0xa3) { 695 if (dev_priv->chipset < 0x98 ||
696 (dev_priv->chipset == 0x98 &&
697 dev_priv->stepping <= 0xa1)) {
683 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; 698 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
684 } else { 699 } else {
685 /* currently unknown 700 /* XXX: reg_10023c
701 * currently unknown
686 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ 702 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
687 } 703 }
704
705 /* XXX: reg_100240? */
688 } 706 }
707 timing->id = i;
689 708
690 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 709 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
691 timing->reg_100220, timing->reg_100224, 710 timing->reg_100220, timing->reg_100224,
@@ -693,10 +712,11 @@ nouveau_mem_timing_init(struct drm_device *dev)
693 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 712 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
694 timing->reg_100230, timing->reg_100234, 713 timing->reg_100230, timing->reg_100234,
695 timing->reg_100238, timing->reg_10023c); 714 timing->reg_100238, timing->reg_10023c);
715 NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240);
696 } 716 }
697 717
698 memtimings->nr_timing = entries; 718 memtimings->nr_timing = entries;
699 memtimings->supported = true; 719 memtimings->supported = (dev_priv->chipset <= 0x98);
700} 720}
701 721
702void 722void
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 67a16e01ffa6..8f97016f5b26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -361,20 +361,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
361 return 0; 361 return 0;
362} 362}
363 363
364
365static uint32_t
366nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
367{
368 struct drm_nouveau_private *dev_priv = dev->dev_private;
369
370 /*XXX: dodgy hack for now */
371 if (dev_priv->card_type >= NV_50)
372 return 24;
373 if (dev_priv->card_type >= NV_40)
374 return 32;
375 return 16;
376}
377
378/* 364/*
379 DMA objects are used to reference a piece of memory in the 365 DMA objects are used to reference a piece of memory in the
380 framebuffer, PCI or AGP address space. Each object is 16 bytes big 366 framebuffer, PCI or AGP address space. Each object is 16 bytes big
@@ -606,11 +592,11 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
606 set to 0? 592 set to 0?
607*/ 593*/
608static int 594static int
609nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 595nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
610 struct nouveau_gpuobj **gpuobj_ret)
611{ 596{
612 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 597 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
613 struct nouveau_gpuobj *gpuobj; 598 struct nouveau_gpuobj *gpuobj;
599 int ret;
614 600
615 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 601 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
616 if (!gpuobj) 602 if (!gpuobj)
@@ -624,8 +610,10 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
624 spin_lock(&dev_priv->ramin_lock); 610 spin_lock(&dev_priv->ramin_lock);
625 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 611 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
626 spin_unlock(&dev_priv->ramin_lock); 612 spin_unlock(&dev_priv->ramin_lock);
627 *gpuobj_ret = gpuobj; 613
628 return 0; 614 ret = nouveau_ramht_insert(chan, handle, gpuobj);
615 nouveau_gpuobj_ref(NULL, &gpuobj);
616 return ret;
629} 617}
630 618
631int 619int
@@ -634,101 +622,30 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
634 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 622 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
635 struct drm_device *dev = chan->dev; 623 struct drm_device *dev = chan->dev;
636 struct nouveau_gpuobj_class *oc; 624 struct nouveau_gpuobj_class *oc;
637 struct nouveau_gpuobj *gpuobj;
638 int ret; 625 int ret;
639 626
640 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); 627 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
641 628
642 list_for_each_entry(oc, &dev_priv->classes, head) { 629 list_for_each_entry(oc, &dev_priv->classes, head) {
643 if (oc->id == class) 630 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
644 goto found;
645 }
646
647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
648 return -EINVAL;
649 631
650found: 632 if (oc->id != class)
651 switch (oc->engine) { 633 continue;
652 case NVOBJ_ENGINE_SW:
653 if (dev_priv->card_type < NV_C0) {
654 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
655 if (ret)
656 return ret;
657 goto insert;
658 }
659 break;
660 case NVOBJ_ENGINE_GR:
661 if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
662 (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
663 struct nouveau_pgraph_engine *pgraph =
664 &dev_priv->engine.graph;
665 634
666 ret = pgraph->create_context(chan); 635 if (oc->engine == NVOBJ_ENGINE_SW)
667 if (ret) 636 return nouveau_gpuobj_sw_new(chan, handle, class);
668 return ret;
669 }
670 break;
671 case NVOBJ_ENGINE_CRYPT:
672 if (!chan->crypt_ctx) {
673 struct nouveau_crypt_engine *pcrypt =
674 &dev_priv->engine.crypt;
675 637
676 ret = pcrypt->create_context(chan); 638 if (!chan->engctx[oc->engine]) {
639 ret = eng->context_new(chan, oc->engine);
677 if (ret) 640 if (ret)
678 return ret; 641 return ret;
679 } 642 }
680 break;
681 }
682
683 /* we're done if this is fermi */
684 if (dev_priv->card_type >= NV_C0)
685 return 0;
686
687 ret = nouveau_gpuobj_new(dev, chan,
688 nouveau_gpuobj_class_instmem_size(dev, class),
689 16,
690 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
691 &gpuobj);
692 if (ret) {
693 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
694 return ret;
695 }
696 643
697 if (dev_priv->card_type >= NV_50) { 644 return eng->object_new(chan, oc->engine, handle, class);
698 nv_wo32(gpuobj, 0, class);
699 nv_wo32(gpuobj, 20, 0x00010000);
700 } else {
701 switch (class) {
702 case NV_CLASS_NULL:
703 nv_wo32(gpuobj, 0, 0x00001030);
704 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
705 break;
706 default:
707 if (dev_priv->card_type >= NV_40) {
708 nv_wo32(gpuobj, 0, class);
709#ifdef __BIG_ENDIAN
710 nv_wo32(gpuobj, 8, 0x01000000);
711#endif
712 } else {
713#ifdef __BIG_ENDIAN
714 nv_wo32(gpuobj, 0, class | 0x00080000);
715#else
716 nv_wo32(gpuobj, 0, class);
717#endif
718 }
719 }
720 } 645 }
721 dev_priv->engine.instmem.flush(dev);
722
723 gpuobj->engine = oc->engine;
724 gpuobj->class = oc->id;
725 646
726insert: 647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
727 ret = nouveau_ramht_insert(chan, handle, gpuobj); 648 return -EINVAL;
728 if (ret)
729 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
730 nouveau_gpuobj_ref(NULL, &gpuobj);
731 return ret;
732} 649}
733 650
734static int 651static int
@@ -746,9 +663,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
746 size = 0x2000; 663 size = 0x2000;
747 base = 0; 664 base = 0;
748 665
749 /* PGRAPH context */
750 size += dev_priv->engine.graph.grctx_size;
751
752 if (dev_priv->card_type == NV_50) { 666 if (dev_priv->card_type == NV_50) {
753 /* Various fixed table thingos */ 667 /* Various fixed table thingos */
754 size += 0x1400; /* mostly unknown stuff */ 668 size += 0x1400; /* mostly unknown stuff */
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 670e3cb697ec..922fb6b664ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -72,6 +72,68 @@ legacy_perf_init(struct drm_device *dev)
72 pm->nr_perflvl = 1; 72 pm->nr_perflvl = 1;
73} 73}
74 74
75static struct nouveau_pm_memtiming *
76nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
77 u16 memclk, u8 *entry, u8 recordlen, u8 entries)
78{
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
81 struct nvbios *bios = &dev_priv->vbios;
82 u8 ramcfg;
83 int i;
84
85 /* perf v2 has a separate "timing map" table, we have to match
86 * the target memory clock to a specific entry, *then* use
87 * ramcfg to select the correct subentry
88 */
89 if (P->version == 2) {
90 u8 *tmap = ROMPTR(bios, P->data[4]);
91 if (!tmap) {
92 NV_DEBUG(dev, "no timing map pointer\n");
93 return NULL;
94 }
95
96 if (tmap[0] != 0x10) {
97 NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]);
98 return NULL;
99 }
100
101 entry = tmap + tmap[1];
102 recordlen = tmap[2] + (tmap[4] * tmap[3]);
103 for (i = 0; i < tmap[5]; i++, entry += recordlen) {
104 if (memclk >= ROM16(entry[0]) &&
105 memclk <= ROM16(entry[2]))
106 break;
107 }
108
109 if (i == tmap[5]) {
110 NV_WARN(dev, "no match in timing map table\n");
111 return NULL;
112 }
113
114 entry += tmap[2];
115 recordlen = tmap[3];
116 entries = tmap[4];
117 }
118
119 ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
120 if (bios->ram_restrict_tbl_ptr)
121 ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg];
122
123 if (ramcfg >= entries) {
124 NV_WARN(dev, "ramcfg strap out of bounds!\n");
125 return NULL;
126 }
127
128 entry += ramcfg * recordlen;
129 if (entry[1] >= pm->memtimings.nr_timing) {
130 NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
131 return NULL;
132 }
133
134 return &pm->memtimings.timing[entry[1]];
135}
136
75void 137void
76nouveau_perf_init(struct drm_device *dev) 138nouveau_perf_init(struct drm_device *dev)
77{ 139{
@@ -124,6 +186,8 @@ nouveau_perf_init(struct drm_device *dev)
124 for (i = 0; i < entries; i++) { 186 for (i = 0; i < entries; i++) {
125 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 187 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
126 188
189 perflvl->timing = NULL;
190
127 if (entry[0] == 0xff) { 191 if (entry[0] == 0xff) {
128 entry += recordlen; 192 entry += recordlen;
129 continue; 193 continue;
@@ -174,9 +238,21 @@ nouveau_perf_init(struct drm_device *dev)
174#define subent(n) entry[perf[2] + ((n) * perf[3])] 238#define subent(n) entry[perf[2] + ((n) * perf[3])]
175 perflvl->fanspeed = 0; /*XXX*/ 239 perflvl->fanspeed = 0; /*XXX*/
176 perflvl->voltage = entry[2]; 240 perflvl->voltage = entry[2];
177 perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; 241 if (dev_priv->card_type == NV_50) {
178 perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; 242 perflvl->core = ROM16(subent(0)) & 0xfff;
179 perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; 243 perflvl->shader = ROM16(subent(1)) & 0xfff;
244 perflvl->memory = ROM16(subent(2)) & 0xfff;
245 } else {
246 perflvl->shader = ROM16(subent(3)) & 0xfff;
247 perflvl->core = perflvl->shader / 2;
248 perflvl->unk0a = ROM16(subent(4)) & 0xfff;
249 perflvl->memory = ROM16(subent(5)) & 0xfff;
250 }
251
252 perflvl->core *= 1000;
253 perflvl->shader *= 1000;
254 perflvl->memory *= 1000;
255 perflvl->unk0a *= 1000;
180 break; 256 break;
181 } 257 }
182 258
@@ -190,6 +266,16 @@ nouveau_perf_init(struct drm_device *dev)
190 } 266 }
191 } 267 }
192 268
269 /* get the corresponding memory timings */
270 if (version > 0x15) {
271 /* last 3 args are for < 0x40, ignored for >= 0x40 */
272 perflvl->timing =
273 nouveau_perf_timing(dev, &P,
274 perflvl->memory / 1000,
275 entry + perf[3],
276 perf[5], perf[4]);
277 }
278
193 snprintf(perflvl->name, sizeof(perflvl->name), 279 snprintf(perflvl->name, sizeof(perflvl->name),
194 "performance_level_%d", i); 280 "performance_level_%d", i);
195 perflvl->id = i; 281 perflvl->id = i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 4399e2f34db4..da8d994d5e8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -156,7 +156,7 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
156static void 156static void
157nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) 157nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
158{ 158{
159 char c[16], s[16], v[16], f[16]; 159 char c[16], s[16], v[16], f[16], t[16];
160 160
161 c[0] = '\0'; 161 c[0] = '\0';
162 if (perflvl->core) 162 if (perflvl->core)
@@ -174,8 +174,12 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
174 if (perflvl->fanspeed) 174 if (perflvl->fanspeed)
175 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); 175 snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
176 176
177 snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000, 177 t[0] = '\0';
178 c, s, v, f); 178 if (perflvl->timing)
179 snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
180
181 snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000,
182 c, s, v, f, t);
179} 183}
180 184
181static ssize_t 185static ssize_t
@@ -449,7 +453,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
449#endif 453#endif
450} 454}
451 455
452#ifdef CONFIG_ACPI 456#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
453static int 457static int
454nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) 458nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
455{ 459{
@@ -476,10 +480,10 @@ nouveau_pm_init(struct drm_device *dev)
476 char info[256]; 480 char info[256];
477 int ret, i; 481 int ret, i;
478 482
483 nouveau_mem_timing_init(dev);
479 nouveau_volt_init(dev); 484 nouveau_volt_init(dev);
480 nouveau_perf_init(dev); 485 nouveau_perf_init(dev);
481 nouveau_temp_init(dev); 486 nouveau_temp_init(dev);
482 nouveau_mem_timing_init(dev);
483 487
484 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); 488 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
485 for (i = 0; i < pm->nr_perflvl; i++) { 489 for (i = 0; i < pm->nr_perflvl; i++) {
@@ -490,6 +494,7 @@ nouveau_pm_init(struct drm_device *dev)
490 /* determine current ("boot") performance level */ 494 /* determine current ("boot") performance level */
491 ret = nouveau_pm_perflvl_get(dev, &pm->boot); 495 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
492 if (ret == 0) { 496 if (ret == 0) {
497 strncpy(pm->boot.name, "boot", 4);
493 pm->cur = &pm->boot; 498 pm->cur = &pm->boot;
494 499
495 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); 500 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
@@ -507,7 +512,7 @@ nouveau_pm_init(struct drm_device *dev)
507 512
508 nouveau_sysfs_init(dev); 513 nouveau_sysfs_init(dev);
509 nouveau_hwmon_init(dev); 514 nouveau_hwmon_init(dev);
510#ifdef CONFIG_ACPI 515#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
511 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; 516 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
512 register_acpi_notifier(&pm->acpi_nb); 517 register_acpi_notifier(&pm->acpi_nb);
513#endif 518#endif
@@ -524,12 +529,12 @@ nouveau_pm_fini(struct drm_device *dev)
524 if (pm->cur != &pm->boot) 529 if (pm->cur != &pm->boot)
525 nouveau_pm_perflvl_set(dev, &pm->boot); 530 nouveau_pm_perflvl_set(dev, &pm->boot);
526 531
527 nouveau_mem_timing_fini(dev);
528 nouveau_temp_fini(dev); 532 nouveau_temp_fini(dev);
529 nouveau_perf_fini(dev); 533 nouveau_perf_fini(dev);
530 nouveau_volt_fini(dev); 534 nouveau_volt_fini(dev);
535 nouveau_mem_timing_fini(dev);
531 536
532#ifdef CONFIG_ACPI 537#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
533 unregister_acpi_notifier(&pm->acpi_nb); 538 unregister_acpi_notifier(&pm->acpi_nb);
534#endif 539#endif
535 nouveau_hwmon_fini(dev); 540 nouveau_hwmon_fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 04e8fb795269..f18cdfc3400f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -639,9 +639,9 @@
639# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240 639# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
640# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258 640# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
641 641
642#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) 642#define NV50_AUXCH_DATA_OUT(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
643#define NV50_AUXCH_DATA_OUT__SIZE 4 643#define NV50_AUXCH_DATA_OUT__SIZE 4
644#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) 644#define NV50_AUXCH_DATA_IN(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
645#define NV50_AUXCH_DATA_IN__SIZE 4 645#define NV50_AUXCH_DATA_IN__SIZE 4
646#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0) 646#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
647#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4) 647#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
@@ -829,7 +829,7 @@
829#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084 829#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
830#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 830#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
831#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff 831#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
832#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) 832#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
833#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 833#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
834#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 834#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
835#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 835#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
@@ -841,10 +841,10 @@
841#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000 841#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
842#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000 842#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
843#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 843#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
844#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) 844#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
845#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) 845#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
846#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) 846#define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
847#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) 847#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
848 848
849#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) 849#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
850#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000) 850#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 915fbce89595..38ea662568c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -65,14 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
65 engine->timer.takedown = nv04_timer_takedown; 65 engine->timer.takedown = nv04_timer_takedown;
66 engine->fb.init = nv04_fb_init; 66 engine->fb.init = nv04_fb_init;
67 engine->fb.takedown = nv04_fb_takedown; 67 engine->fb.takedown = nv04_fb_takedown;
68 engine->graph.init = nv04_graph_init;
69 engine->graph.takedown = nv04_graph_takedown;
70 engine->graph.fifo_access = nv04_graph_fifo_access;
71 engine->graph.channel = nv04_graph_channel;
72 engine->graph.create_context = nv04_graph_create_context;
73 engine->graph.destroy_context = nv04_graph_destroy_context;
74 engine->graph.load_context = nv04_graph_load_context;
75 engine->graph.unload_context = nv04_graph_unload_context;
76 engine->fifo.channels = 16; 68 engine->fifo.channels = 16;
77 engine->fifo.init = nv04_fifo_init; 69 engine->fifo.init = nv04_fifo_init;
78 engine->fifo.takedown = nv04_fifo_fini; 70 engine->fifo.takedown = nv04_fifo_fini;
@@ -98,8 +90,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
98 engine->pm.clock_get = nv04_pm_clock_get; 90 engine->pm.clock_get = nv04_pm_clock_get;
99 engine->pm.clock_pre = nv04_pm_clock_pre; 91 engine->pm.clock_pre = nv04_pm_clock_pre;
100 engine->pm.clock_set = nv04_pm_clock_set; 92 engine->pm.clock_set = nv04_pm_clock_set;
101 engine->crypt.init = nouveau_stub_init;
102 engine->crypt.takedown = nouveau_stub_takedown;
103 engine->vram.init = nouveau_mem_detect; 93 engine->vram.init = nouveau_mem_detect;
104 engine->vram.flags_valid = nouveau_mem_flags_valid; 94 engine->vram.flags_valid = nouveau_mem_flags_valid;
105 break; 95 break;
@@ -123,15 +113,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
123 engine->fb.init_tile_region = nv10_fb_init_tile_region; 113 engine->fb.init_tile_region = nv10_fb_init_tile_region;
124 engine->fb.set_tile_region = nv10_fb_set_tile_region; 114 engine->fb.set_tile_region = nv10_fb_set_tile_region;
125 engine->fb.free_tile_region = nv10_fb_free_tile_region; 115 engine->fb.free_tile_region = nv10_fb_free_tile_region;
126 engine->graph.init = nv10_graph_init;
127 engine->graph.takedown = nv10_graph_takedown;
128 engine->graph.channel = nv10_graph_channel;
129 engine->graph.create_context = nv10_graph_create_context;
130 engine->graph.destroy_context = nv10_graph_destroy_context;
131 engine->graph.fifo_access = nv04_graph_fifo_access;
132 engine->graph.load_context = nv10_graph_load_context;
133 engine->graph.unload_context = nv10_graph_unload_context;
134 engine->graph.set_tile_region = nv10_graph_set_tile_region;
135 engine->fifo.channels = 32; 116 engine->fifo.channels = 32;
136 engine->fifo.init = nv10_fifo_init; 117 engine->fifo.init = nv10_fifo_init;
137 engine->fifo.takedown = nv04_fifo_fini; 118 engine->fifo.takedown = nv04_fifo_fini;
@@ -157,8 +138,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
157 engine->pm.clock_get = nv04_pm_clock_get; 138 engine->pm.clock_get = nv04_pm_clock_get;
158 engine->pm.clock_pre = nv04_pm_clock_pre; 139 engine->pm.clock_pre = nv04_pm_clock_pre;
159 engine->pm.clock_set = nv04_pm_clock_set; 140 engine->pm.clock_set = nv04_pm_clock_set;
160 engine->crypt.init = nouveau_stub_init;
161 engine->crypt.takedown = nouveau_stub_takedown;
162 engine->vram.init = nouveau_mem_detect; 141 engine->vram.init = nouveau_mem_detect;
163 engine->vram.flags_valid = nouveau_mem_flags_valid; 142 engine->vram.flags_valid = nouveau_mem_flags_valid;
164 break; 143 break;
@@ -182,15 +161,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
182 engine->fb.init_tile_region = nv10_fb_init_tile_region; 161 engine->fb.init_tile_region = nv10_fb_init_tile_region;
183 engine->fb.set_tile_region = nv10_fb_set_tile_region; 162 engine->fb.set_tile_region = nv10_fb_set_tile_region;
184 engine->fb.free_tile_region = nv10_fb_free_tile_region; 163 engine->fb.free_tile_region = nv10_fb_free_tile_region;
185 engine->graph.init = nv20_graph_init;
186 engine->graph.takedown = nv20_graph_takedown;
187 engine->graph.channel = nv10_graph_channel;
188 engine->graph.create_context = nv20_graph_create_context;
189 engine->graph.destroy_context = nv20_graph_destroy_context;
190 engine->graph.fifo_access = nv04_graph_fifo_access;
191 engine->graph.load_context = nv20_graph_load_context;
192 engine->graph.unload_context = nv20_graph_unload_context;
193 engine->graph.set_tile_region = nv20_graph_set_tile_region;
194 engine->fifo.channels = 32; 164 engine->fifo.channels = 32;
195 engine->fifo.init = nv10_fifo_init; 165 engine->fifo.init = nv10_fifo_init;
196 engine->fifo.takedown = nv04_fifo_fini; 166 engine->fifo.takedown = nv04_fifo_fini;
@@ -216,8 +186,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
216 engine->pm.clock_get = nv04_pm_clock_get; 186 engine->pm.clock_get = nv04_pm_clock_get;
217 engine->pm.clock_pre = nv04_pm_clock_pre; 187 engine->pm.clock_pre = nv04_pm_clock_pre;
218 engine->pm.clock_set = nv04_pm_clock_set; 188 engine->pm.clock_set = nv04_pm_clock_set;
219 engine->crypt.init = nouveau_stub_init;
220 engine->crypt.takedown = nouveau_stub_takedown;
221 engine->vram.init = nouveau_mem_detect; 189 engine->vram.init = nouveau_mem_detect;
222 engine->vram.flags_valid = nouveau_mem_flags_valid; 190 engine->vram.flags_valid = nouveau_mem_flags_valid;
223 break; 191 break;
@@ -241,15 +209,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
241 engine->fb.init_tile_region = nv30_fb_init_tile_region; 209 engine->fb.init_tile_region = nv30_fb_init_tile_region;
242 engine->fb.set_tile_region = nv10_fb_set_tile_region; 210 engine->fb.set_tile_region = nv10_fb_set_tile_region;
243 engine->fb.free_tile_region = nv30_fb_free_tile_region; 211 engine->fb.free_tile_region = nv30_fb_free_tile_region;
244 engine->graph.init = nv30_graph_init;
245 engine->graph.takedown = nv20_graph_takedown;
246 engine->graph.fifo_access = nv04_graph_fifo_access;
247 engine->graph.channel = nv10_graph_channel;
248 engine->graph.create_context = nv20_graph_create_context;
249 engine->graph.destroy_context = nv20_graph_destroy_context;
250 engine->graph.load_context = nv20_graph_load_context;
251 engine->graph.unload_context = nv20_graph_unload_context;
252 engine->graph.set_tile_region = nv20_graph_set_tile_region;
253 engine->fifo.channels = 32; 212 engine->fifo.channels = 32;
254 engine->fifo.init = nv10_fifo_init; 213 engine->fifo.init = nv10_fifo_init;
255 engine->fifo.takedown = nv04_fifo_fini; 214 engine->fifo.takedown = nv04_fifo_fini;
@@ -277,8 +236,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
277 engine->pm.clock_set = nv04_pm_clock_set; 236 engine->pm.clock_set = nv04_pm_clock_set;
278 engine->pm.voltage_get = nouveau_voltage_gpio_get; 237 engine->pm.voltage_get = nouveau_voltage_gpio_get;
279 engine->pm.voltage_set = nouveau_voltage_gpio_set; 238 engine->pm.voltage_set = nouveau_voltage_gpio_set;
280 engine->crypt.init = nouveau_stub_init;
281 engine->crypt.takedown = nouveau_stub_takedown;
282 engine->vram.init = nouveau_mem_detect; 239 engine->vram.init = nouveau_mem_detect;
283 engine->vram.flags_valid = nouveau_mem_flags_valid; 240 engine->vram.flags_valid = nouveau_mem_flags_valid;
284 break; 241 break;
@@ -303,15 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
303 engine->fb.init_tile_region = nv30_fb_init_tile_region; 260 engine->fb.init_tile_region = nv30_fb_init_tile_region;
304 engine->fb.set_tile_region = nv40_fb_set_tile_region; 261 engine->fb.set_tile_region = nv40_fb_set_tile_region;
305 engine->fb.free_tile_region = nv30_fb_free_tile_region; 262 engine->fb.free_tile_region = nv30_fb_free_tile_region;
306 engine->graph.init = nv40_graph_init;
307 engine->graph.takedown = nv40_graph_takedown;
308 engine->graph.fifo_access = nv04_graph_fifo_access;
309 engine->graph.channel = nv40_graph_channel;
310 engine->graph.create_context = nv40_graph_create_context;
311 engine->graph.destroy_context = nv40_graph_destroy_context;
312 engine->graph.load_context = nv40_graph_load_context;
313 engine->graph.unload_context = nv40_graph_unload_context;
314 engine->graph.set_tile_region = nv40_graph_set_tile_region;
315 engine->fifo.channels = 32; 263 engine->fifo.channels = 32;
316 engine->fifo.init = nv40_fifo_init; 264 engine->fifo.init = nv40_fifo_init;
317 engine->fifo.takedown = nv04_fifo_fini; 265 engine->fifo.takedown = nv04_fifo_fini;
@@ -340,8 +288,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
340 engine->pm.voltage_get = nouveau_voltage_gpio_get; 288 engine->pm.voltage_get = nouveau_voltage_gpio_get;
341 engine->pm.voltage_set = nouveau_voltage_gpio_set; 289 engine->pm.voltage_set = nouveau_voltage_gpio_set;
342 engine->pm.temp_get = nv40_temp_get; 290 engine->pm.temp_get = nv40_temp_get;
343 engine->crypt.init = nouveau_stub_init;
344 engine->crypt.takedown = nouveau_stub_takedown;
345 engine->vram.init = nouveau_mem_detect; 291 engine->vram.init = nouveau_mem_detect;
346 engine->vram.flags_valid = nouveau_mem_flags_valid; 292 engine->vram.flags_valid = nouveau_mem_flags_valid;
347 break; 293 break;
@@ -368,19 +314,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
368 engine->timer.takedown = nv04_timer_takedown; 314 engine->timer.takedown = nv04_timer_takedown;
369 engine->fb.init = nv50_fb_init; 315 engine->fb.init = nv50_fb_init;
370 engine->fb.takedown = nv50_fb_takedown; 316 engine->fb.takedown = nv50_fb_takedown;
371 engine->graph.init = nv50_graph_init;
372 engine->graph.takedown = nv50_graph_takedown;
373 engine->graph.fifo_access = nv50_graph_fifo_access;
374 engine->graph.channel = nv50_graph_channel;
375 engine->graph.create_context = nv50_graph_create_context;
376 engine->graph.destroy_context = nv50_graph_destroy_context;
377 engine->graph.load_context = nv50_graph_load_context;
378 engine->graph.unload_context = nv50_graph_unload_context;
379 if (dev_priv->chipset == 0x50 ||
380 dev_priv->chipset == 0xac)
381 engine->graph.tlb_flush = nv50_graph_tlb_flush;
382 else
383 engine->graph.tlb_flush = nv84_graph_tlb_flush;
384 engine->fifo.channels = 128; 317 engine->fifo.channels = 128;
385 engine->fifo.init = nv50_fifo_init; 318 engine->fifo.init = nv50_fifo_init;
386 engine->fifo.takedown = nv50_fifo_takedown; 319 engine->fifo.takedown = nv50_fifo_takedown;
@@ -432,24 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
432 engine->pm.temp_get = nv84_temp_get; 365 engine->pm.temp_get = nv84_temp_get;
433 else 366 else
434 engine->pm.temp_get = nv40_temp_get; 367 engine->pm.temp_get = nv40_temp_get;
435 switch (dev_priv->chipset) {
436 case 0x84:
437 case 0x86:
438 case 0x92:
439 case 0x94:
440 case 0x96:
441 case 0xa0:
442 engine->crypt.init = nv84_crypt_init;
443 engine->crypt.takedown = nv84_crypt_fini;
444 engine->crypt.create_context = nv84_crypt_create_context;
445 engine->crypt.destroy_context = nv84_crypt_destroy_context;
446 engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
447 break;
448 default:
449 engine->crypt.init = nouveau_stub_init;
450 engine->crypt.takedown = nouveau_stub_takedown;
451 break;
452 }
453 engine->vram.init = nv50_vram_init; 368 engine->vram.init = nv50_vram_init;
454 engine->vram.get = nv50_vram_new; 369 engine->vram.get = nv50_vram_new;
455 engine->vram.put = nv50_vram_del; 370 engine->vram.put = nv50_vram_del;
@@ -472,14 +387,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
472 engine->timer.takedown = nv04_timer_takedown; 387 engine->timer.takedown = nv04_timer_takedown;
473 engine->fb.init = nvc0_fb_init; 388 engine->fb.init = nvc0_fb_init;
474 engine->fb.takedown = nvc0_fb_takedown; 389 engine->fb.takedown = nvc0_fb_takedown;
475 engine->graph.init = nvc0_graph_init;
476 engine->graph.takedown = nvc0_graph_takedown;
477 engine->graph.fifo_access = nvc0_graph_fifo_access;
478 engine->graph.channel = nvc0_graph_channel;
479 engine->graph.create_context = nvc0_graph_create_context;
480 engine->graph.destroy_context = nvc0_graph_destroy_context;
481 engine->graph.load_context = nvc0_graph_load_context;
482 engine->graph.unload_context = nvc0_graph_unload_context;
483 engine->fifo.channels = 128; 390 engine->fifo.channels = 128;
484 engine->fifo.init = nvc0_fifo_init; 391 engine->fifo.init = nvc0_fifo_init;
485 engine->fifo.takedown = nvc0_fifo_takedown; 392 engine->fifo.takedown = nvc0_fifo_takedown;
@@ -503,8 +410,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
503 engine->gpio.irq_register = nv50_gpio_irq_register; 410 engine->gpio.irq_register = nv50_gpio_irq_register;
504 engine->gpio.irq_unregister = nv50_gpio_irq_unregister; 411 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
505 engine->gpio.irq_enable = nv50_gpio_irq_enable; 412 engine->gpio.irq_enable = nv50_gpio_irq_enable;
506 engine->crypt.init = nouveau_stub_init;
507 engine->crypt.takedown = nouveau_stub_takedown;
508 engine->vram.init = nvc0_vram_init; 413 engine->vram.init = nvc0_vram_init;
509 engine->vram.get = nvc0_vram_new; 414 engine->vram.get = nvc0_vram_new;
510 engine->vram.put = nv50_vram_del; 415 engine->vram.put = nv50_vram_del;
@@ -593,7 +498,7 @@ nouveau_card_init(struct drm_device *dev)
593{ 498{
594 struct drm_nouveau_private *dev_priv = dev->dev_private; 499 struct drm_nouveau_private *dev_priv = dev->dev_private;
595 struct nouveau_engine *engine; 500 struct nouveau_engine *engine;
596 int ret; 501 int ret, e = 0;
597 502
598 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 503 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
599 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, 504 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
@@ -658,23 +563,80 @@ nouveau_card_init(struct drm_device *dev)
658 if (ret) 563 if (ret)
659 goto out_timer; 564 goto out_timer;
660 565
661 if (nouveau_noaccel) 566 switch (dev_priv->card_type) {
662 engine->graph.accel_blocked = true; 567 case NV_04:
663 else { 568 nv04_graph_create(dev);
664 /* PGRAPH */ 569 break;
665 ret = engine->graph.init(dev); 570 case NV_10:
666 if (ret) 571 nv10_graph_create(dev);
667 goto out_fb; 572 break;
573 case NV_20:
574 case NV_30:
575 nv20_graph_create(dev);
576 break;
577 case NV_40:
578 nv40_graph_create(dev);
579 break;
580 case NV_50:
581 nv50_graph_create(dev);
582 break;
583 case NV_C0:
584 nvc0_graph_create(dev);
585 break;
586 default:
587 break;
588 }
668 589
669 /* PCRYPT */ 590 switch (dev_priv->chipset) {
670 ret = engine->crypt.init(dev); 591 case 0x84:
671 if (ret) 592 case 0x86:
672 goto out_graph; 593 case 0x92:
594 case 0x94:
595 case 0x96:
596 case 0xa0:
597 nv84_crypt_create(dev);
598 break;
599 }
600
601 switch (dev_priv->card_type) {
602 case NV_50:
603 switch (dev_priv->chipset) {
604 case 0xa3:
605 case 0xa5:
606 case 0xa8:
607 case 0xaf:
608 nva3_copy_create(dev);
609 break;
610 }
611 break;
612 case NV_C0:
613 nvc0_copy_create(dev, 0);
614 nvc0_copy_create(dev, 1);
615 break;
616 default:
617 break;
618 }
619
620 if (dev_priv->card_type == NV_40)
621 nv40_mpeg_create(dev);
622 else
623 if (dev_priv->card_type == NV_50 &&
624 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
625 nv50_mpeg_create(dev);
626
627 if (!nouveau_noaccel) {
628 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
629 if (dev_priv->eng[e]) {
630 ret = dev_priv->eng[e]->init(dev, e);
631 if (ret)
632 goto out_engine;
633 }
634 }
673 635
674 /* PFIFO */ 636 /* PFIFO */
675 ret = engine->fifo.init(dev); 637 ret = engine->fifo.init(dev);
676 if (ret) 638 if (ret)
677 goto out_crypt; 639 goto out_engine;
678 } 640 }
679 641
680 ret = engine->display.create(dev); 642 ret = engine->display.create(dev);
@@ -691,7 +653,7 @@ nouveau_card_init(struct drm_device *dev)
691 653
692 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 654 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
693 655
694 if (!engine->graph.accel_blocked) { 656 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
695 ret = nouveau_fence_init(dev); 657 ret = nouveau_fence_init(dev);
696 if (ret) 658 if (ret)
697 goto out_irq; 659 goto out_irq;
@@ -715,13 +677,16 @@ out_vblank:
715out_fifo: 677out_fifo:
716 if (!nouveau_noaccel) 678 if (!nouveau_noaccel)
717 engine->fifo.takedown(dev); 679 engine->fifo.takedown(dev);
718out_crypt: 680out_engine:
719 if (!nouveau_noaccel) 681 if (!nouveau_noaccel) {
720 engine->crypt.takedown(dev); 682 for (e = e - 1; e >= 0; e--) {
721out_graph: 683 if (!dev_priv->eng[e])
722 if (!nouveau_noaccel) 684 continue;
723 engine->graph.takedown(dev); 685 dev_priv->eng[e]->fini(dev, e);
724out_fb: 686 dev_priv->eng[e]->destroy(dev,e );
687 }
688 }
689
725 engine->fb.takedown(dev); 690 engine->fb.takedown(dev);
726out_timer: 691out_timer:
727 engine->timer.takedown(dev); 692 engine->timer.takedown(dev);
@@ -751,16 +716,21 @@ static void nouveau_card_takedown(struct drm_device *dev)
751{ 716{
752 struct drm_nouveau_private *dev_priv = dev->dev_private; 717 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 struct nouveau_engine *engine = &dev_priv->engine; 718 struct nouveau_engine *engine = &dev_priv->engine;
719 int e;
754 720
755 if (!engine->graph.accel_blocked) { 721 if (dev_priv->channel) {
756 nouveau_fence_fini(dev); 722 nouveau_fence_fini(dev);
757 nouveau_channel_put_unlocked(&dev_priv->channel); 723 nouveau_channel_put_unlocked(&dev_priv->channel);
758 } 724 }
759 725
760 if (!nouveau_noaccel) { 726 if (!nouveau_noaccel) {
761 engine->fifo.takedown(dev); 727 engine->fifo.takedown(dev);
762 engine->crypt.takedown(dev); 728 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
763 engine->graph.takedown(dev); 729 if (dev_priv->eng[e]) {
730 dev_priv->eng[e]->fini(dev, e);
731 dev_priv->eng[e]->destroy(dev,e );
732 }
733 }
764 } 734 }
765 engine->fb.takedown(dev); 735 engine->fb.takedown(dev);
766 engine->timer.takedown(dev); 736 engine->timer.takedown(dev);
@@ -866,7 +836,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
866#ifdef CONFIG_X86 836#ifdef CONFIG_X86
867 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 837 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
868#endif 838#endif
869 839
870 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); 840 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
871 return 0; 841 return 0;
872} 842}
@@ -918,11 +888,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
918 888
919 /* Time to determine the card architecture */ 889 /* Time to determine the card architecture */
920 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); 890 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
891 dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */
921 892
922 /* We're dealing with >=NV10 */ 893 /* We're dealing with >=NV10 */
923 if ((reg0 & 0x0f000000) > 0) { 894 if ((reg0 & 0x0f000000) > 0) {
924 /* Bit 27-20 contain the architecture in hex */ 895 /* Bit 27-20 contain the architecture in hex */
925 dev_priv->chipset = (reg0 & 0xff00000) >> 20; 896 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
897 dev_priv->stepping = (reg0 & 0xff);
926 /* NV04 or NV05 */ 898 /* NV04 or NV05 */
927 } else if ((reg0 & 0xff00fff0) == 0x20004000) { 899 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
928 if (reg0 & 0x00f00000) 900 if (reg0 & 0x00f00000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 2e06b55cfdc1..c48a9fc2b47b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -53,8 +53,7 @@ struct nouveau_vm {
53 int refcount; 53 int refcount;
54 54
55 struct list_head pgd_list; 55 struct list_head pgd_list;
56 atomic_t pgraph_refs; 56 atomic_t engref[16];
57 atomic_t pcrypt_refs;
58 57
59 struct nouveau_vm_pgt *pgt; 58 struct nouveau_vm_pgt *pgt;
60 u32 fpde; 59 u32 fpde;
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 04fdc00a67d5..75e872741d92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -159,8 +159,16 @@ nouveau_volt_init(struct drm_device *dev)
159 headerlen = volt[1]; 159 headerlen = volt[1];
160 recordlen = volt[2]; 160 recordlen = volt[2];
161 entries = volt[3]; 161 entries = volt[3];
162 vidshift = hweight8(volt[5]);
163 vidmask = volt[4]; 162 vidmask = volt[4];
163 /* no longer certain what volt[5] is, if it's related to
164 * the vid shift then it's definitely not a function of
165 * how many bits are set.
166 *
167 * after looking at a number of nva3+ vbios images, they
168 * all seem likely to have a static shift of 2.. lets
169 * go with that for now until proven otherwise.
170 */
171 vidshift = 2;
164 break; 172 break;
165 default: 173 default:
166 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); 174 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 748b9d9c2949..3c78bc81357e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -790,8 +790,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
790 if (atomic) { 790 if (atomic) {
791 drm_fb = passed_fb; 791 drm_fb = passed_fb;
792 fb = nouveau_framebuffer(passed_fb); 792 fb = nouveau_framebuffer(passed_fb);
793 } 793 } else {
794 else {
795 /* If not atomic, we can go ahead and pin, and unpin the 794 /* If not atomic, we can go ahead and pin, and unpin the
796 * old fb we were passed. 795 * old fb we were passed.
797 */ 796 */
@@ -944,14 +943,14 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
944 struct drm_gem_object *gem; 943 struct drm_gem_object *gem;
945 int ret = 0; 944 int ret = 0;
946 945
947 if (width != 64 || height != 64)
948 return -EINVAL;
949
950 if (!buffer_handle) { 946 if (!buffer_handle) {
951 nv_crtc->cursor.hide(nv_crtc, true); 947 nv_crtc->cursor.hide(nv_crtc, true);
952 return 0; 948 return 0;
953 } 949 }
954 950
951 if (width != 64 || height != 64)
952 return -EINVAL;
953
955 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); 954 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
956 if (!gem) 955 if (!gem)
957 return -ENOENT; 956 return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index af75015068d6..3626ee7db3ba 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,9 +28,11 @@
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_util.h" 30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
31 32
32static int nv04_graph_register(struct drm_device *dev); 33struct nv04_graph_engine {
33static void nv04_graph_isr(struct drm_device *dev); 34 struct nouveau_exec_engine base;
35};
34 36
35static uint32_t nv04_graph_ctx_regs[] = { 37static uint32_t nv04_graph_ctx_regs[] = {
36 0x0040053c, 38 0x0040053c,
@@ -350,7 +352,7 @@ struct graph_state {
350 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; 352 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
351}; 353};
352 354
353struct nouveau_channel * 355static struct nouveau_channel *
354nv04_graph_channel(struct drm_device *dev) 356nv04_graph_channel(struct drm_device *dev)
355{ 357{
356 struct drm_nouveau_private *dev_priv = dev->dev_private; 358 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -365,26 +367,6 @@ nv04_graph_channel(struct drm_device *dev)
365 return dev_priv->channels.ptr[chid]; 367 return dev_priv->channels.ptr[chid];
366} 368}
367 369
368static void
369nv04_graph_context_switch(struct drm_device *dev)
370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
373 struct nouveau_channel *chan = NULL;
374 int chid;
375
376 nouveau_wait_for_idle(dev);
377
378 /* If previous context is valid, we need to save it */
379 pgraph->unload_context(dev);
380
381 /* Load context for next channel */
382 chid = dev_priv->engine.fifo.channel_id(dev);
383 chan = dev_priv->channels.ptr[chid];
384 if (chan)
385 nv04_graph_load_context(chan);
386}
387
388static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) 370static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
389{ 371{
390 int i; 372 int i;
@@ -397,48 +379,11 @@ static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
397 return NULL; 379 return NULL;
398} 380}
399 381
400int nv04_graph_create_context(struct nouveau_channel *chan) 382static int
401{ 383nv04_graph_load_context(struct nouveau_channel *chan)
402 struct graph_state *pgraph_ctx;
403 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
404
405 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
406 GFP_KERNEL);
407 if (pgraph_ctx == NULL)
408 return -ENOMEM;
409
410 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
411
412 return 0;
413}
414
415void nv04_graph_destroy_context(struct nouveau_channel *chan)
416{
417 struct drm_device *dev = chan->dev;
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
420 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
421 unsigned long flags;
422
423 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
424 pgraph->fifo_access(dev, false);
425
426 /* Unload the context if it's the currently active one */
427 if (pgraph->channel(dev) == chan)
428 pgraph->unload_context(dev);
429
430 /* Free the context resources */
431 kfree(pgraph_ctx);
432 chan->pgraph_ctx = NULL;
433
434 pgraph->fifo_access(dev, true);
435 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
436}
437
438int nv04_graph_load_context(struct nouveau_channel *chan)
439{ 384{
385 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
440 struct drm_device *dev = chan->dev; 386 struct drm_device *dev = chan->dev;
441 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
442 uint32_t tmp; 387 uint32_t tmp;
443 int i; 388 int i;
444 389
@@ -456,20 +401,19 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
456 return 0; 401 return 0;
457} 402}
458 403
459int 404static int
460nv04_graph_unload_context(struct drm_device *dev) 405nv04_graph_unload_context(struct drm_device *dev)
461{ 406{
462 struct drm_nouveau_private *dev_priv = dev->dev_private; 407 struct drm_nouveau_private *dev_priv = dev->dev_private;
463 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
464 struct nouveau_channel *chan = NULL; 408 struct nouveau_channel *chan = NULL;
465 struct graph_state *ctx; 409 struct graph_state *ctx;
466 uint32_t tmp; 410 uint32_t tmp;
467 int i; 411 int i;
468 412
469 chan = pgraph->channel(dev); 413 chan = nv04_graph_channel(dev);
470 if (!chan) 414 if (!chan)
471 return 0; 415 return 0;
472 ctx = chan->pgraph_ctx; 416 ctx = chan->engctx[NVOBJ_ENGINE_GR];
473 417
474 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) 418 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
475 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]); 419 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
@@ -481,23 +425,85 @@ nv04_graph_unload_context(struct drm_device *dev)
481 return 0; 425 return 0;
482} 426}
483 427
484int nv04_graph_init(struct drm_device *dev) 428static int
429nv04_graph_context_new(struct nouveau_channel *chan, int engine)
485{ 430{
431 struct graph_state *pgraph_ctx;
432 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
433
434 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
435 if (pgraph_ctx == NULL)
436 return -ENOMEM;
437
438 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
439
440 chan->engctx[engine] = pgraph_ctx;
441 return 0;
442}
443
444static void
445nv04_graph_context_del(struct nouveau_channel *chan, int engine)
446{
447 struct drm_device *dev = chan->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 448 struct drm_nouveau_private *dev_priv = dev->dev_private;
487 uint32_t tmp; 449 struct graph_state *pgraph_ctx = chan->engctx[engine];
450 unsigned long flags;
451
452 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
453 nv04_graph_fifo_access(dev, false);
454
455 /* Unload the context if it's the currently active one */
456 if (nv04_graph_channel(dev) == chan)
457 nv04_graph_unload_context(dev);
458
459 nv04_graph_fifo_access(dev, true);
460 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
461
462 /* Free the context resources */
463 kfree(pgraph_ctx);
464 chan->engctx[engine] = NULL;
465}
466
467int
468nv04_graph_object_new(struct nouveau_channel *chan, int engine,
469 u32 handle, u16 class)
470{
471 struct drm_device *dev = chan->dev;
472 struct nouveau_gpuobj *obj = NULL;
488 int ret; 473 int ret;
489 474
475 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
476 if (ret)
477 return ret;
478 obj->engine = 1;
479 obj->class = class;
480
481#ifdef __BIG_ENDIAN
482 nv_wo32(obj, 0x00, 0x00080000 | class);
483#else
484 nv_wo32(obj, 0x00, class);
485#endif
486 nv_wo32(obj, 0x04, 0x00000000);
487 nv_wo32(obj, 0x08, 0x00000000);
488 nv_wo32(obj, 0x0c, 0x00000000);
489
490 ret = nouveau_ramht_insert(chan, handle, obj);
491 nouveau_gpuobj_ref(NULL, &obj);
492 return ret;
493}
494
495static int
496nv04_graph_init(struct drm_device *dev, int engine)
497{
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 uint32_t tmp;
500
490 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 501 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
491 ~NV_PMC_ENABLE_PGRAPH); 502 ~NV_PMC_ENABLE_PGRAPH);
492 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 503 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
493 NV_PMC_ENABLE_PGRAPH); 504 NV_PMC_ENABLE_PGRAPH);
494 505
495 ret = nv04_graph_register(dev);
496 if (ret)
497 return ret;
498
499 /* Enable PGRAPH interrupts */ 506 /* Enable PGRAPH interrupts */
500 nouveau_irq_register(dev, 12, nv04_graph_isr);
501 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); 507 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
502 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 508 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
503 509
@@ -507,7 +513,7 @@ int nv04_graph_init(struct drm_device *dev)
507 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ 513 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
508 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000); 514 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
509 /*1231C000 blob, 001 haiku*/ 515 /*1231C000 blob, 001 haiku*/
510 //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ 516 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
511 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100); 517 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
512 /*0x72111100 blob , 01 haiku*/ 518 /*0x72111100 blob , 01 haiku*/
513 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ 519 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
@@ -531,10 +537,12 @@ int nv04_graph_init(struct drm_device *dev)
531 return 0; 537 return 0;
532} 538}
533 539
534void nv04_graph_takedown(struct drm_device *dev) 540static int
541nv04_graph_fini(struct drm_device *dev, int engine)
535{ 542{
543 nv04_graph_unload_context(dev);
536 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 544 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
537 nouveau_irq_unregister(dev, 12); 545 return 0;
538} 546}
539 547
540void 548void
@@ -969,13 +977,138 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
969 return 1; 977 return 1;
970} 978}
971 979
972static int 980static struct nouveau_bitfield nv04_graph_intr[] = {
973nv04_graph_register(struct drm_device *dev) 981 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
982 {}
983};
984
985static struct nouveau_bitfield nv04_graph_nstatus[] = {
986 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
987 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
988 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
989 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
990 {}
991};
992
993struct nouveau_bitfield nv04_graph_nsource[] = {
994 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
995 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
996 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
997 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
998 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
999 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1000 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1001 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1002 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1003 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1004 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1005 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1006 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1007 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1008 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1009 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1010 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1011 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1012 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1013 {}
1014};
1015
1016static void
1017nv04_graph_context_switch(struct drm_device *dev)
974{ 1018{
975 struct drm_nouveau_private *dev_priv = dev->dev_private; 1019 struct drm_nouveau_private *dev_priv = dev->dev_private;
1020 struct nouveau_channel *chan = NULL;
1021 int chid;
976 1022
977 if (dev_priv->engine.graph.registered) 1023 nouveau_wait_for_idle(dev);
978 return 0; 1024
1025 /* If previous context is valid, we need to save it */
1026 nv04_graph_unload_context(dev);
1027
1028 /* Load context for next channel */
1029 chid = dev_priv->engine.fifo.channel_id(dev);
1030 chan = dev_priv->channels.ptr[chid];
1031 if (chan)
1032 nv04_graph_load_context(chan);
1033}
1034
1035static void
1036nv04_graph_isr(struct drm_device *dev)
1037{
1038 u32 stat;
1039
1040 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1041 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1042 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1043 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1044 u32 chid = (addr & 0x0f000000) >> 24;
1045 u32 subc = (addr & 0x0000e000) >> 13;
1046 u32 mthd = (addr & 0x00001ffc);
1047 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1048 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1049 u32 show = stat;
1050
1051 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1052 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1053 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1054 show &= ~NV_PGRAPH_INTR_NOTIFY;
1055 }
1056 }
1057
1058 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1059 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1060 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1061 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1062 nv04_graph_context_switch(dev);
1063 }
1064
1065 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1066 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1067
1068 if (show && nouveau_ratelimit()) {
1069 NV_INFO(dev, "PGRAPH -");
1070 nouveau_bitfield_print(nv04_graph_intr, show);
1071 printk(" nsource:");
1072 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1073 printk(" nstatus:");
1074 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1075 printk("\n");
1076 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1077 "mthd 0x%04x data 0x%08x\n",
1078 chid, subc, class, mthd, data);
1079 }
1080 }
1081}
1082
1083static void
1084nv04_graph_destroy(struct drm_device *dev, int engine)
1085{
1086 struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
1087
1088 nouveau_irq_unregister(dev, 12);
1089
1090 NVOBJ_ENGINE_DEL(dev, GR);
1091 kfree(pgraph);
1092}
1093
1094int
1095nv04_graph_create(struct drm_device *dev)
1096{
1097 struct nv04_graph_engine *pgraph;
1098
1099 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1100 if (!pgraph)
1101 return -ENOMEM;
1102
1103 pgraph->base.destroy = nv04_graph_destroy;
1104 pgraph->base.init = nv04_graph_init;
1105 pgraph->base.fini = nv04_graph_fini;
1106 pgraph->base.context_new = nv04_graph_context_new;
1107 pgraph->base.context_del = nv04_graph_context_del;
1108 pgraph->base.object_new = nv04_graph_object_new;
1109
1110 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1111 nouveau_irq_register(dev, 12, nv04_graph_isr);
979 1112
980 /* dvd subpicture */ 1113 /* dvd subpicture */
981 NVOBJ_CLASS(dev, 0x0038, GR); 1114 NVOBJ_CLASS(dev, 0x0038, GR);
@@ -1222,93 +1355,5 @@ nv04_graph_register(struct drm_device *dev)
1222 NVOBJ_CLASS(dev, 0x506e, SW); 1355 NVOBJ_CLASS(dev, 0x506e, SW);
1223 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); 1356 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
1224 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); 1357 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1225
1226 dev_priv->engine.graph.registered = true;
1227 return 0; 1358 return 0;
1228};
1229
1230static struct nouveau_bitfield nv04_graph_intr[] = {
1231 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1232 {}
1233};
1234
1235static struct nouveau_bitfield nv04_graph_nstatus[] =
1236{
1237 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1238 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1239 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1240 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1241 {}
1242};
1243
1244struct nouveau_bitfield nv04_graph_nsource[] =
1245{
1246 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1247 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1248 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1249 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1250 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1251 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1252 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1253 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1254 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1255 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1256 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1257 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1258 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1259 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1260 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1261 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1262 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1263 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1264 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1265 {}
1266};
1267
1268static void
1269nv04_graph_isr(struct drm_device *dev)
1270{
1271 u32 stat;
1272
1273 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1274 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1275 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1276 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1277 u32 chid = (addr & 0x0f000000) >> 24;
1278 u32 subc = (addr & 0x0000e000) >> 13;
1279 u32 mthd = (addr & 0x00001ffc);
1280 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1281 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1282 u32 show = stat;
1283
1284 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1285 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1286 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1287 show &= ~NV_PGRAPH_INTR_NOTIFY;
1288 }
1289 }
1290
1291 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1292 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1293 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1294 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1295 nv04_graph_context_switch(dev);
1296 }
1297
1298 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1299 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1300
1301 if (show && nouveau_ratelimit()) {
1302 NV_INFO(dev, "PGRAPH -");
1303 nouveau_bitfield_print(nv04_graph_intr, show);
1304 printk(" nsource:");
1305 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1306 printk(" nstatus:");
1307 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1308 printk("\n");
1309 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1310 "mthd 0x%04x data 0x%08x\n",
1311 chid, subc, class, mthd, data);
1312 }
1313 }
1314} 1359}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index b8e3edb5c063..b8611b955313 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -95,6 +95,9 @@ nv04_instmem_takedown(struct drm_device *dev)
95 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); 95 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
96 nouveau_gpuobj_ref(NULL, &dev_priv->ramro); 96 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
97 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); 97 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
98
99 if (drm_mm_initialized(&dev_priv->ramin_heap))
100 drm_mm_takedown(&dev_priv->ramin_heap);
98} 101}
99 102
100int 103int
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8c92edb7bbcd..0930c6cb88e0 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -28,10 +28,9 @@
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_util.h" 29#include "nouveau_util.h"
30 30
31static int nv10_graph_register(struct drm_device *); 31struct nv10_graph_engine {
32static void nv10_graph_isr(struct drm_device *); 32 struct nouveau_exec_engine base;
33 33};
34#define NV10_FIFO_NUMBER 32
35 34
36struct pipe_state { 35struct pipe_state {
37 uint32_t pipe_0x0000[0x040/4]; 36 uint32_t pipe_0x0000[0x040/4];
@@ -414,9 +413,9 @@ struct graph_state {
414 413
415static void nv10_graph_save_pipe(struct nouveau_channel *chan) 414static void nv10_graph_save_pipe(struct nouveau_channel *chan)
416{ 415{
417 struct drm_device *dev = chan->dev; 416 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
418 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
419 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 417 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
418 struct drm_device *dev = chan->dev;
420 419
421 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); 420 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
422 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); 421 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
@@ -432,9 +431,9 @@ static void nv10_graph_save_pipe(struct nouveau_channel *chan)
432 431
433static void nv10_graph_load_pipe(struct nouveau_channel *chan) 432static void nv10_graph_load_pipe(struct nouveau_channel *chan)
434{ 433{
435 struct drm_device *dev = chan->dev; 434 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
436 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
437 struct pipe_state *pipe = &pgraph_ctx->pipe_state; 435 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
436 struct drm_device *dev = chan->dev;
438 uint32_t xfmode0, xfmode1; 437 uint32_t xfmode0, xfmode1;
439 int i; 438 int i;
440 439
@@ -482,9 +481,9 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan)
482 481
483static void nv10_graph_create_pipe(struct nouveau_channel *chan) 482static void nv10_graph_create_pipe(struct nouveau_channel *chan)
484{ 483{
485 struct drm_device *dev = chan->dev; 484 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
486 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
487 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; 485 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
486 struct drm_device *dev = chan->dev;
488 uint32_t *fifo_pipe_state_addr; 487 uint32_t *fifo_pipe_state_addr;
489 int i; 488 int i;
490#define PIPE_INIT(addr) \ 489#define PIPE_INIT(addr) \
@@ -661,8 +660,6 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
661 uint32_t inst) 660 uint32_t inst)
662{ 661{
663 struct drm_device *dev = chan->dev; 662 struct drm_device *dev = chan->dev;
664 struct drm_nouveau_private *dev_priv = dev->dev_private;
665 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
666 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; 663 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
667 uint32_t ctx_user, ctx_switch[5]; 664 uint32_t ctx_user, ctx_switch[5];
668 int i, subchan = -1; 665 int i, subchan = -1;
@@ -711,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
711 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); 708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
712 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
713 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
714 pgraph->fifo_access(dev, true); 711 nv04_graph_fifo_access(dev, true);
715 pgraph->fifo_access(dev, false); 712 nv04_graph_fifo_access(dev, false);
716 713
717 /* Restore the FIFO state */ 714 /* Restore the FIFO state */
718 for (i = 0; i < ARRAY_SIZE(fifo); i++) 715 for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -729,11 +726,12 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
729 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user); 726 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
730} 727}
731 728
732int nv10_graph_load_context(struct nouveau_channel *chan) 729static int
730nv10_graph_load_context(struct nouveau_channel *chan)
733{ 731{
734 struct drm_device *dev = chan->dev; 732 struct drm_device *dev = chan->dev;
735 struct drm_nouveau_private *dev_priv = dev->dev_private; 733 struct drm_nouveau_private *dev_priv = dev->dev_private;
736 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 734 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
737 uint32_t tmp; 735 uint32_t tmp;
738 int i; 736 int i;
739 737
@@ -757,21 +755,20 @@ int nv10_graph_load_context(struct nouveau_channel *chan)
757 return 0; 755 return 0;
758} 756}
759 757
760int 758static int
761nv10_graph_unload_context(struct drm_device *dev) 759nv10_graph_unload_context(struct drm_device *dev)
762{ 760{
763 struct drm_nouveau_private *dev_priv = dev->dev_private; 761 struct drm_nouveau_private *dev_priv = dev->dev_private;
764 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
765 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 762 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
766 struct nouveau_channel *chan; 763 struct nouveau_channel *chan;
767 struct graph_state *ctx; 764 struct graph_state *ctx;
768 uint32_t tmp; 765 uint32_t tmp;
769 int i; 766 int i;
770 767
771 chan = pgraph->channel(dev); 768 chan = nv10_graph_channel(dev);
772 if (!chan) 769 if (!chan)
773 return 0; 770 return 0;
774 ctx = chan->pgraph_ctx; 771 ctx = chan->engctx[NVOBJ_ENGINE_GR];
775 772
776 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) 773 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
777 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); 774 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
@@ -805,7 +802,7 @@ nv10_graph_context_switch(struct drm_device *dev)
805 /* Load context for next channel */ 802 /* Load context for next channel */
806 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 803 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
807 chan = dev_priv->channels.ptr[chid]; 804 chan = dev_priv->channels.ptr[chid];
808 if (chan && chan->pgraph_ctx) 805 if (chan && chan->engctx[NVOBJ_ENGINE_GR])
809 nv10_graph_load_context(chan); 806 nv10_graph_load_context(chan);
810} 807}
811 808
@@ -836,7 +833,8 @@ nv10_graph_channel(struct drm_device *dev)
836 return dev_priv->channels.ptr[chid]; 833 return dev_priv->channels.ptr[chid];
837} 834}
838 835
839int nv10_graph_create_context(struct nouveau_channel *chan) 836static int
837nv10_graph_context_new(struct nouveau_channel *chan, int engine)
840{ 838{
841 struct drm_device *dev = chan->dev; 839 struct drm_device *dev = chan->dev;
842 struct drm_nouveau_private *dev_priv = dev->dev_private; 840 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -844,11 +842,10 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
844 842
845 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); 843 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
846 844
847 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), 845 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
848 GFP_KERNEL);
849 if (pgraph_ctx == NULL) 846 if (pgraph_ctx == NULL)
850 return -ENOMEM; 847 return -ENOMEM;
851 848 chan->engctx[engine] = pgraph_ctx;
852 849
853 NV_WRITE_CTX(0x00400e88, 0x08000000); 850 NV_WRITE_CTX(0x00400e88, 0x08000000);
854 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); 851 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -873,30 +870,30 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
873 return 0; 870 return 0;
874} 871}
875 872
876void nv10_graph_destroy_context(struct nouveau_channel *chan) 873static void
874nv10_graph_context_del(struct nouveau_channel *chan, int engine)
877{ 875{
878 struct drm_device *dev = chan->dev; 876 struct drm_device *dev = chan->dev;
879 struct drm_nouveau_private *dev_priv = dev->dev_private; 877 struct drm_nouveau_private *dev_priv = dev->dev_private;
880 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 878 struct graph_state *pgraph_ctx = chan->engctx[engine];
881 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
882 unsigned long flags; 879 unsigned long flags;
883 880
884 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 881 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
885 pgraph->fifo_access(dev, false); 882 nv04_graph_fifo_access(dev, false);
886 883
887 /* Unload the context if it's the currently active one */ 884 /* Unload the context if it's the currently active one */
888 if (pgraph->channel(dev) == chan) 885 if (nv10_graph_channel(dev) == chan)
889 pgraph->unload_context(dev); 886 nv10_graph_unload_context(dev);
887
888 nv04_graph_fifo_access(dev, true);
889 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
890 890
891 /* Free the context resources */ 891 /* Free the context resources */
892 chan->engctx[engine] = NULL;
892 kfree(pgraph_ctx); 893 kfree(pgraph_ctx);
893 chan->pgraph_ctx = NULL;
894
895 pgraph->fifo_access(dev, true);
896 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
897} 894}
898 895
899void 896static void
900nv10_graph_set_tile_region(struct drm_device *dev, int i) 897nv10_graph_set_tile_region(struct drm_device *dev, int i)
901{ 898{
902 struct drm_nouveau_private *dev_priv = dev->dev_private; 899 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -907,22 +904,18 @@ nv10_graph_set_tile_region(struct drm_device *dev, int i)
907 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); 904 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
908} 905}
909 906
910int nv10_graph_init(struct drm_device *dev) 907static int
908nv10_graph_init(struct drm_device *dev, int engine)
911{ 909{
912 struct drm_nouveau_private *dev_priv = dev->dev_private; 910 struct drm_nouveau_private *dev_priv = dev->dev_private;
913 uint32_t tmp; 911 u32 tmp;
914 int ret, i; 912 int i;
915 913
916 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 914 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
917 ~NV_PMC_ENABLE_PGRAPH); 915 ~NV_PMC_ENABLE_PGRAPH);
918 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 916 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
919 NV_PMC_ENABLE_PGRAPH); 917 NV_PMC_ENABLE_PGRAPH);
920 918
921 ret = nv10_graph_register(dev);
922 if (ret)
923 return ret;
924
925 nouveau_irq_register(dev, 12, nv10_graph_isr);
926 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 919 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
927 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 920 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
928 921
@@ -963,18 +956,20 @@ int nv10_graph_init(struct drm_device *dev)
963 return 0; 956 return 0;
964} 957}
965 958
966void nv10_graph_takedown(struct drm_device *dev) 959static int
960nv10_graph_fini(struct drm_device *dev, int engine)
967{ 961{
962 nv10_graph_unload_context(dev);
968 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 963 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
969 nouveau_irq_unregister(dev, 12); 964 return 0;
970} 965}
971 966
972static int 967static int
973nv17_graph_mthd_lma_window(struct nouveau_channel *chan, 968nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
974 u32 class, u32 mthd, u32 data) 969 u32 class, u32 mthd, u32 data)
975{ 970{
971 struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
976 struct drm_device *dev = chan->dev; 972 struct drm_device *dev = chan->dev;
977 struct graph_state *ctx = chan->pgraph_ctx;
978 struct pipe_state *pipe = &ctx->pipe_state; 973 struct pipe_state *pipe = &ctx->pipe_state;
979 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; 974 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
980 uint32_t xfmode0, xfmode1; 975 uint32_t xfmode0, xfmode1;
@@ -1061,64 +1056,13 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
1061 return 0; 1056 return 0;
1062} 1057}
1063 1058
1064static int
1065nv10_graph_register(struct drm_device *dev)
1066{
1067 struct drm_nouveau_private *dev_priv = dev->dev_private;
1068
1069 if (dev_priv->engine.graph.registered)
1070 return 0;
1071
1072 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1073 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1074 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1075 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1076 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1077 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1078 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1079 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1080 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1081 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1082 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1083 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1084 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1085 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1086 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1087 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1088 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1089 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1090
1091 /* celcius */
1092 if (dev_priv->chipset <= 0x10) {
1093 NVOBJ_CLASS(dev, 0x0056, GR);
1094 } else
1095 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1096 NVOBJ_CLASS(dev, 0x0096, GR);
1097 } else {
1098 NVOBJ_CLASS(dev, 0x0099, GR);
1099 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1100 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1101 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1102 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1103 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1104 }
1105
1106 /* nvsw */
1107 NVOBJ_CLASS(dev, 0x506e, SW);
1108 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1109
1110 dev_priv->engine.graph.registered = true;
1111 return 0;
1112}
1113
1114struct nouveau_bitfield nv10_graph_intr[] = { 1059struct nouveau_bitfield nv10_graph_intr[] = {
1115 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, 1060 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1116 { NV_PGRAPH_INTR_ERROR, "ERROR" }, 1061 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1117 {} 1062 {}
1118}; 1063};
1119 1064
1120struct nouveau_bitfield nv10_graph_nstatus[] = 1065struct nouveau_bitfield nv10_graph_nstatus[] = {
1121{
1122 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 1066 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1123 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, 1067 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1124 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, 1068 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
@@ -1173,3 +1117,73 @@ nv10_graph_isr(struct drm_device *dev)
1173 } 1117 }
1174 } 1118 }
1175} 1119}
1120
1121static void
1122nv10_graph_destroy(struct drm_device *dev, int engine)
1123{
1124 struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
1125
1126 nouveau_irq_unregister(dev, 12);
1127 kfree(pgraph);
1128}
1129
1130int
1131nv10_graph_create(struct drm_device *dev)
1132{
1133 struct drm_nouveau_private *dev_priv = dev->dev_private;
1134 struct nv10_graph_engine *pgraph;
1135
1136 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1137 if (!pgraph)
1138 return -ENOMEM;
1139
1140 pgraph->base.destroy = nv10_graph_destroy;
1141 pgraph->base.init = nv10_graph_init;
1142 pgraph->base.fini = nv10_graph_fini;
1143 pgraph->base.context_new = nv10_graph_context_new;
1144 pgraph->base.context_del = nv10_graph_context_del;
1145 pgraph->base.object_new = nv04_graph_object_new;
1146 pgraph->base.set_tile_region = nv10_graph_set_tile_region;
1147
1148 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1149 nouveau_irq_register(dev, 12, nv10_graph_isr);
1150
1151 /* nvsw */
1152 NVOBJ_CLASS(dev, 0x506e, SW);
1153 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1154
1155 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1156 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1157 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1158 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1159 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1160 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1161 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1162 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1163 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1164 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1165 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1166 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1167 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1168 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1169 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1170 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1171 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1172
1173 /* celcius */
1174 if (dev_priv->chipset <= 0x10) {
1175 NVOBJ_CLASS(dev, 0x0056, GR);
1176 } else
1177 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1178 NVOBJ_CLASS(dev, 0x0096, GR);
1179 } else {
1180 NVOBJ_CLASS(dev, 0x0099, GR);
1181 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1182 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1183 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1184 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1185 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1186 }
1187
1188 return 0;
1189}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 8464b76798d5..affc7d7dd029 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -24,6 +24,14 @@
24 * 24 *
25 */ 25 */
26 26
27struct nv20_graph_engine {
28 struct nouveau_exec_engine base;
29 struct nouveau_gpuobj *ctxtab;
30 void (*grctx_init)(struct nouveau_gpuobj *);
31 u32 grctx_size;
32 u32 grctx_user;
33};
34
27#define NV20_GRCTX_SIZE (3580*4) 35#define NV20_GRCTX_SIZE (3580*4)
28#define NV25_GRCTX_SIZE (3529*4) 36#define NV25_GRCTX_SIZE (3529*4)
29#define NV2A_GRCTX_SIZE (3500*4) 37#define NV2A_GRCTX_SIZE (3500*4)
@@ -32,12 +40,54 @@
32#define NV34_GRCTX_SIZE (18140) 40#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396) 41#define NV35_36_GRCTX_SIZE (22396)
34 42
35static int nv20_graph_register(struct drm_device *); 43int
36static int nv30_graph_register(struct drm_device *); 44nv20_graph_unload_context(struct drm_device *dev)
37static void nv20_graph_isr(struct drm_device *); 45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
48 struct nouveau_channel *chan;
49 struct nouveau_gpuobj *grctx;
50 u32 tmp;
51
52 chan = nv10_graph_channel(dev);
53 if (!chan)
54 return 0;
55 grctx = chan->engctx[NVOBJ_ENGINE_GR];
56
57 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
58 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
59 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
60
61 nouveau_wait_for_idle(dev);
62
63 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
64 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
65 tmp |= (pfifo->channels - 1) << 24;
66 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
67 return 0;
68}
69
70static void
71nv20_graph_rdi(struct drm_device *dev)
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 int i, writecount = 32;
75 uint32_t rdi_index = 0x2c80000;
76
77 if (dev_priv->chipset == 0x20) {
78 rdi_index = 0x3d0000;
79 writecount = 15;
80 }
81
82 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
83 for (i = 0; i < writecount; i++)
84 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
85
86 nouveau_wait_for_idle(dev);
87}
38 88
39static void 89static void
40nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 90nv20_graph_context_init(struct nouveau_gpuobj *ctx)
41{ 91{
42 int i; 92 int i;
43 93
@@ -87,7 +137,7 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
87} 137}
88 138
89static void 139static void
90nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 140nv25_graph_context_init(struct nouveau_gpuobj *ctx)
91{ 141{
92 int i; 142 int i;
93 143
@@ -146,7 +196,7 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
146} 196}
147 197
148static void 198static void
149nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 199nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
150{ 200{
151 int i; 201 int i;
152 202
@@ -196,7 +246,7 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
196} 246}
197 247
198static void 248static void
199nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 249nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
200{ 250{
201 int i; 251 int i;
202 252
@@ -254,7 +304,7 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
254} 304}
255 305
256static void 306static void
257nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 307nv34_graph_context_init(struct nouveau_gpuobj *ctx)
258{ 308{
259 int i; 309 int i;
260 310
@@ -312,7 +362,7 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
312} 362}
313 363
314static void 364static void
315nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 365nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
316{ 366{
317 int i; 367 int i;
318 368
@@ -370,148 +420,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
370} 420}
371 421
372int 422int
373nv20_graph_create_context(struct nouveau_channel *chan) 423nv20_graph_context_new(struct nouveau_channel *chan, int engine)
374{ 424{
425 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
426 struct nouveau_gpuobj *grctx = NULL;
375 struct drm_device *dev = chan->dev; 427 struct drm_device *dev = chan->dev;
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
378 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
379 unsigned int idoffs = 0x28;
380 int ret; 428 int ret;
381 429
382 switch (dev_priv->chipset) { 430 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
383 case 0x20: 431 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
384 ctx_init = nv20_graph_context_init;
385 idoffs = 0;
386 break;
387 case 0x25:
388 case 0x28:
389 ctx_init = nv25_graph_context_init;
390 break;
391 case 0x2a:
392 ctx_init = nv2a_graph_context_init;
393 idoffs = 0;
394 break;
395 case 0x30:
396 case 0x31:
397 ctx_init = nv30_31_graph_context_init;
398 break;
399 case 0x34:
400 ctx_init = nv34_graph_context_init;
401 break;
402 case 0x35:
403 case 0x36:
404 ctx_init = nv35_36_graph_context_init;
405 break;
406 default:
407 BUG_ON(1);
408 }
409
410 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
411 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
412 if (ret) 432 if (ret)
413 return ret; 433 return ret;
414 434
415 /* Initialise default context values */ 435 /* Initialise default context values */
416 ctx_init(dev, chan->ramin_grctx); 436 pgraph->grctx_init(grctx);
417 437
418 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ 438 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
419 nv_wo32(chan->ramin_grctx, idoffs, 439 /* CTX_USER */
420 (chan->id << 24) | 0x1); /* CTX_USER */ 440 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
421 441
422 nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4); 442 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
443 chan->engctx[engine] = grctx;
423 return 0; 444 return 0;
424} 445}
425 446
426void 447void
427nv20_graph_destroy_context(struct nouveau_channel *chan) 448nv20_graph_context_del(struct nouveau_channel *chan, int engine)
428{ 449{
450 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
451 struct nouveau_gpuobj *grctx = chan->engctx[engine];
429 struct drm_device *dev = chan->dev; 452 struct drm_device *dev = chan->dev;
430 struct drm_nouveau_private *dev_priv = dev->dev_private; 453 struct drm_nouveau_private *dev_priv = dev->dev_private;
431 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
432 unsigned long flags; 454 unsigned long flags;
433 455
434 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 456 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
435 pgraph->fifo_access(dev, false); 457 nv04_graph_fifo_access(dev, false);
436 458
437 /* Unload the context if it's the currently active one */ 459 /* Unload the context if it's the currently active one */
438 if (pgraph->channel(dev) == chan) 460 if (nv10_graph_channel(dev) == chan)
439 pgraph->unload_context(dev); 461 nv20_graph_unload_context(dev);
440 462
441 pgraph->fifo_access(dev, true); 463 nv04_graph_fifo_access(dev, true);
442 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 464 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
443 465
444 /* Free the context resources */ 466 /* Free the context resources */
445 nv_wo32(pgraph->ctx_table, chan->id * 4, 0); 467 nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
446 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
447}
448
449int
450nv20_graph_load_context(struct nouveau_channel *chan)
451{
452 struct drm_device *dev = chan->dev;
453 uint32_t inst;
454 468
455 if (!chan->ramin_grctx) 469 nouveau_gpuobj_ref(NULL, &grctx);
456 return -EINVAL; 470 chan->engctx[engine] = NULL;
457 inst = chan->ramin_grctx->pinst >> 4;
458
459 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
460 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
461 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
462 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
463
464 nouveau_wait_for_idle(dev);
465 return 0;
466}
467
468int
469nv20_graph_unload_context(struct drm_device *dev)
470{
471 struct drm_nouveau_private *dev_priv = dev->dev_private;
472 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
473 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
474 struct nouveau_channel *chan;
475 uint32_t inst, tmp;
476
477 chan = pgraph->channel(dev);
478 if (!chan)
479 return 0;
480 inst = chan->ramin_grctx->pinst >> 4;
481
482 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
483 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
484 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
485
486 nouveau_wait_for_idle(dev);
487
488 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
489 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
490 tmp |= (pfifo->channels - 1) << 24;
491 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
492 return 0;
493} 471}
494 472
495static void 473static void
496nv20_graph_rdi(struct drm_device *dev)
497{
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 int i, writecount = 32;
500 uint32_t rdi_index = 0x2c80000;
501
502 if (dev_priv->chipset == 0x20) {
503 rdi_index = 0x3d0000;
504 writecount = 15;
505 }
506
507 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
508 for (i = 0; i < writecount; i++)
509 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
510
511 nouveau_wait_for_idle(dev);
512}
513
514void
515nv20_graph_set_tile_region(struct drm_device *dev, int i) 474nv20_graph_set_tile_region(struct drm_device *dev, int i)
516{ 475{
517 struct drm_nouveau_private *dev_priv = dev->dev_private; 476 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -536,56 +495,22 @@ nv20_graph_set_tile_region(struct drm_device *dev, int i)
536} 495}
537 496
538int 497int
539nv20_graph_init(struct drm_device *dev) 498nv20_graph_init(struct drm_device *dev, int engine)
540{ 499{
500 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
541 struct drm_nouveau_private *dev_priv = dev->dev_private; 501 struct drm_nouveau_private *dev_priv = dev->dev_private;
542 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
543 uint32_t tmp, vramsz; 502 uint32_t tmp, vramsz;
544 int ret, i; 503 int i;
545
546 switch (dev_priv->chipset) {
547 case 0x20:
548 pgraph->grctx_size = NV20_GRCTX_SIZE;
549 break;
550 case 0x25:
551 case 0x28:
552 pgraph->grctx_size = NV25_GRCTX_SIZE;
553 break;
554 case 0x2a:
555 pgraph->grctx_size = NV2A_GRCTX_SIZE;
556 break;
557 default:
558 NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
559 pgraph->accel_blocked = true;
560 return 0;
561 }
562 504
563 nv_wr32(dev, NV03_PMC_ENABLE, 505 nv_wr32(dev, NV03_PMC_ENABLE,
564 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); 506 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
565 nv_wr32(dev, NV03_PMC_ENABLE, 507 nv_wr32(dev, NV03_PMC_ENABLE,
566 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 508 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
567 509
568 if (!pgraph->ctx_table) { 510 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
569 /* Create Context Pointer Table */
570 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
571 NVOBJ_FLAG_ZERO_ALLOC,
572 &pgraph->ctx_table);
573 if (ret)
574 return ret;
575 }
576
577 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
578 pgraph->ctx_table->pinst >> 4);
579 511
580 nv20_graph_rdi(dev); 512 nv20_graph_rdi(dev);
581 513
582 ret = nv20_graph_register(dev);
583 if (ret) {
584 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
585 return ret;
586 }
587
588 nouveau_irq_register(dev, 12, nv20_graph_isr);
589 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 514 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
590 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 515 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
591 516
@@ -657,67 +582,20 @@ nv20_graph_init(struct drm_device *dev)
657 return 0; 582 return 0;
658} 583}
659 584
660void
661nv20_graph_takedown(struct drm_device *dev)
662{
663 struct drm_nouveau_private *dev_priv = dev->dev_private;
664 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
665
666 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
667 nouveau_irq_unregister(dev, 12);
668
669 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
670}
671
672int 585int
673nv30_graph_init(struct drm_device *dev) 586nv30_graph_init(struct drm_device *dev, int engine)
674{ 587{
588 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
675 struct drm_nouveau_private *dev_priv = dev->dev_private; 589 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 590 int i;
677 int ret, i;
678
679 switch (dev_priv->chipset) {
680 case 0x30:
681 case 0x31:
682 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
683 break;
684 case 0x34:
685 pgraph->grctx_size = NV34_GRCTX_SIZE;
686 break;
687 case 0x35:
688 case 0x36:
689 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
690 break;
691 default:
692 NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
693 pgraph->accel_blocked = true;
694 return 0;
695 }
696 591
697 nv_wr32(dev, NV03_PMC_ENABLE, 592 nv_wr32(dev, NV03_PMC_ENABLE,
698 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); 593 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
699 nv_wr32(dev, NV03_PMC_ENABLE, 594 nv_wr32(dev, NV03_PMC_ENABLE,
700 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); 595 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
701 596
702 if (!pgraph->ctx_table) { 597 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
703 /* Create Context Pointer Table */
704 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
705 NVOBJ_FLAG_ZERO_ALLOC,
706 &pgraph->ctx_table);
707 if (ret)
708 return ret;
709 }
710
711 ret = nv30_graph_register(dev);
712 if (ret) {
713 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
714 return ret;
715 }
716 598
717 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
718 pgraph->ctx_table->pinst >> 4);
719
720 nouveau_irq_register(dev, 12, nv20_graph_isr);
721 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 599 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
722 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 600 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
723 601
@@ -775,85 +653,11 @@ nv30_graph_init(struct drm_device *dev)
775 return 0; 653 return 0;
776} 654}
777 655
778static int 656int
779nv20_graph_register(struct drm_device *dev) 657nv20_graph_fini(struct drm_device *dev, int engine)
780{
781 struct drm_nouveau_private *dev_priv = dev->dev_private;
782
783 if (dev_priv->engine.graph.registered)
784 return 0;
785
786 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
787 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
788 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
789 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
790 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
791 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
792 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
793 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
794 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
795 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
796 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
797 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
798 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
799 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
800 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
801
802 /* kelvin */
803 if (dev_priv->chipset < 0x25)
804 NVOBJ_CLASS(dev, 0x0097, GR);
805 else
806 NVOBJ_CLASS(dev, 0x0597, GR);
807
808 /* nvsw */
809 NVOBJ_CLASS(dev, 0x506e, SW);
810 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
811
812 dev_priv->engine.graph.registered = true;
813 return 0;
814}
815
816static int
817nv30_graph_register(struct drm_device *dev)
818{ 658{
819 struct drm_nouveau_private *dev_priv = dev->dev_private; 659 nv20_graph_unload_context(dev);
820 660 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
821 if (dev_priv->engine.graph.registered)
822 return 0;
823
824 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
825 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
826 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
827 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
828 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
829 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
830 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
831 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
832 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
833 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
834 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
835 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
836 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
837 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
838 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
839 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
840 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
841
842 /* rankine */
843 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
844 NVOBJ_CLASS(dev, 0x0397, GR);
845 else
846 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
847 NVOBJ_CLASS(dev, 0x0697, GR);
848 else
849 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
850 NVOBJ_CLASS(dev, 0x0497, GR);
851
852 /* nvsw */
853 NVOBJ_CLASS(dev, 0x506e, SW);
854 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
855
856 dev_priv->engine.graph.registered = true;
857 return 0; 661 return 0;
858} 662}
859 663
@@ -897,3 +701,135 @@ nv20_graph_isr(struct drm_device *dev)
897 } 701 }
898 } 702 }
899} 703}
704
705static void
706nv20_graph_destroy(struct drm_device *dev, int engine)
707{
708 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
709
710 nouveau_irq_unregister(dev, 12);
711 nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
712
713 NVOBJ_ENGINE_DEL(dev, GR);
714 kfree(pgraph);
715}
716
717int
718nv20_graph_create(struct drm_device *dev)
719{
720 struct drm_nouveau_private *dev_priv = dev->dev_private;
721 struct nv20_graph_engine *pgraph;
722 int ret;
723
724 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
725 if (!pgraph)
726 return -ENOMEM;
727
728 pgraph->base.destroy = nv20_graph_destroy;
729 pgraph->base.fini = nv20_graph_fini;
730 pgraph->base.context_new = nv20_graph_context_new;
731 pgraph->base.context_del = nv20_graph_context_del;
732 pgraph->base.object_new = nv04_graph_object_new;
733 pgraph->base.set_tile_region = nv20_graph_set_tile_region;
734
735 pgraph->grctx_user = 0x0028;
736 if (dev_priv->card_type == NV_20) {
737 pgraph->base.init = nv20_graph_init;
738 switch (dev_priv->chipset) {
739 case 0x20:
740 pgraph->grctx_init = nv20_graph_context_init;
741 pgraph->grctx_size = NV20_GRCTX_SIZE;
742 pgraph->grctx_user = 0x0000;
743 break;
744 case 0x25:
745 case 0x28:
746 pgraph->grctx_init = nv25_graph_context_init;
747 pgraph->grctx_size = NV25_GRCTX_SIZE;
748 break;
749 case 0x2a:
750 pgraph->grctx_init = nv2a_graph_context_init;
751 pgraph->grctx_size = NV2A_GRCTX_SIZE;
752 pgraph->grctx_user = 0x0000;
753 break;
754 default:
755 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
756 return 0;
757 }
758 } else {
759 pgraph->base.init = nv30_graph_init;
760 switch (dev_priv->chipset) {
761 case 0x30:
762 case 0x31:
763 pgraph->grctx_init = nv30_31_graph_context_init;
764 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
765 break;
766 case 0x34:
767 pgraph->grctx_init = nv34_graph_context_init;
768 pgraph->grctx_size = NV34_GRCTX_SIZE;
769 break;
770 case 0x35:
771 case 0x36:
772 pgraph->grctx_init = nv35_36_graph_context_init;
773 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
774 break;
775 default:
776 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
777 return 0;
778 }
779 }
780
781 /* Create Context Pointer Table */
782 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
783 &pgraph->ctxtab);
784 if (ret) {
785 kfree(pgraph);
786 return ret;
787 }
788
789 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
790 nouveau_irq_register(dev, 12, nv20_graph_isr);
791
792 /* nvsw */
793 NVOBJ_CLASS(dev, 0x506e, SW);
794 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
795
796 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
797 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
798 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
799 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
800 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
801 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
802 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
803 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
804 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
805 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
806 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
807 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
808 if (dev_priv->card_type == NV_20) {
809 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
810 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
811
812 /* kelvin */
813 if (dev_priv->chipset < 0x25)
814 NVOBJ_CLASS(dev, 0x0097, GR);
815 else
816 NVOBJ_CLASS(dev, 0x0597, GR);
817 } else {
818 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
819 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
820 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
821 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
822
823 /* rankine */
824 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
825 NVOBJ_CLASS(dev, 0x0397, GR);
826 else
827 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
828 NVOBJ_CLASS(dev, 0x0697, GR);
829 else
830 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
831 NVOBJ_CLASS(dev, 0x0497, GR);
832 }
833
834 return 0;
835}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 49b9a35a9cd6..68cb2d991c88 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -115,6 +115,7 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); 115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); 116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); 117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
118 nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
118 119
119 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); 120 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
120 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); 121 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
@@ -186,6 +187,7 @@ nv40_fifo_unload_context(struct drm_device *dev)
186 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); 187 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
187 nv_wi32(dev, fc + 72, tmp); 188 nv_wi32(dev, fc + 72, tmp);
188#endif 189#endif
190 nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
189 191
190 nv40_fifo_do_load_context(dev, pfifo->channels - 1); 192 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
191 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 193 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index fceb44c0ec74..5beb01b8ace1 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -28,14 +28,18 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_grctx.h" 30#include "nouveau_grctx.h"
31#include "nouveau_ramht.h"
31 32
32static int nv40_graph_register(struct drm_device *); 33struct nv40_graph_engine {
33static void nv40_graph_isr(struct drm_device *); 34 struct nouveau_exec_engine base;
35 u32 grctx_size;
36};
34 37
35struct nouveau_channel * 38static struct nouveau_channel *
36nv40_graph_channel(struct drm_device *dev) 39nv40_graph_channel(struct drm_device *dev)
37{ 40{
38 struct drm_nouveau_private *dev_priv = dev->dev_private; 41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *grctx;
39 uint32_t inst; 43 uint32_t inst;
40 int i; 44 int i;
41 45
@@ -45,74 +49,17 @@ nv40_graph_channel(struct drm_device *dev)
45 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; 49 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
46 50
47 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 51 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
48 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 52 if (!dev_priv->channels.ptr[i])
53 continue;
49 54
50 if (chan && chan->ramin_grctx && 55 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
51 chan->ramin_grctx->pinst == inst) 56 if (grctx && grctx->pinst == inst)
52 return chan; 57 return dev_priv->channels.ptr[i];
53 } 58 }
54 59
55 return NULL; 60 return NULL;
56} 61}
57 62
58int
59nv40_graph_create_context(struct nouveau_channel *chan)
60{
61 struct drm_device *dev = chan->dev;
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
64 struct nouveau_grctx ctx = {};
65 unsigned long flags;
66 int ret;
67
68 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
69 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
70 if (ret)
71 return ret;
72
73 /* Initialise default context values */
74 ctx.dev = chan->dev;
75 ctx.mode = NOUVEAU_GRCTX_VALS;
76 ctx.data = chan->ramin_grctx;
77 nv40_grctx_init(&ctx);
78
79 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
80
81 /* init grctx pointer in ramfc, and on PFIFO if channel is
82 * already active there
83 */
84 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
85 nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
86 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
87 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
88 nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
89 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
90 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
91 return 0;
92}
93
94void
95nv40_graph_destroy_context(struct nouveau_channel *chan)
96{
97 struct drm_device *dev = chan->dev;
98 struct drm_nouveau_private *dev_priv = dev->dev_private;
99 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 pgraph->fifo_access(dev, false);
104
105 /* Unload the context if it's the currently active one */
106 if (pgraph->channel(dev) == chan)
107 pgraph->unload_context(dev);
108
109 pgraph->fifo_access(dev, true);
110 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
111
112 /* Free the context resources */
113 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
114}
115
116static int 63static int
117nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) 64nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
118{ 65{
@@ -154,57 +101,115 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
154 return 0; 101 return 0;
155} 102}
156 103
157/* Restore the context for a specific channel into PGRAPH */ 104static int
158int 105nv40_graph_unload_context(struct drm_device *dev)
159nv40_graph_load_context(struct nouveau_channel *chan)
160{ 106{
161 struct drm_device *dev = chan->dev;
162 uint32_t inst; 107 uint32_t inst;
163 int ret; 108 int ret;
164 109
165 if (!chan->ramin_grctx) 110 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
166 return -EINVAL; 111 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
167 inst = chan->ramin_grctx->pinst >> 4; 112 return 0;
113 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
114
115 ret = nv40_graph_transfer_context(dev, inst, 1);
116
117 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
118 return ret;
119}
120
121static int
122nv40_graph_context_new(struct nouveau_channel *chan, int engine)
123{
124 struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
125 struct drm_device *dev = chan->dev;
126 struct drm_nouveau_private *dev_priv = dev->dev_private;
127 struct nouveau_gpuobj *grctx = NULL;
128 struct nouveau_grctx ctx = {};
129 unsigned long flags;
130 int ret;
168 131
169 ret = nv40_graph_transfer_context(dev, inst, 0); 132 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
133 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
170 if (ret) 134 if (ret)
171 return ret; 135 return ret;
172 136
173 /* 0x40032C, no idea of it's exact function. Could simply be a 137 /* Initialise default context values */
174 * record of the currently active PGRAPH context. It's currently 138 ctx.dev = chan->dev;
175 * unknown as to what bit 24 does. The nv ddx has it set, so we will 139 ctx.mode = NOUVEAU_GRCTX_VALS;
176 * set it here too. 140 ctx.data = grctx;
177 */ 141 nv40_grctx_init(&ctx);
178 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); 142
179 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 143 nv_wo32(grctx, 0, grctx->vinst);
180 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | 144
181 NV40_PGRAPH_CTXCTL_CUR_LOADED); 145 /* init grctx pointer in ramfc, and on PFIFO if channel is
182 /* 0x32E0 records the instance address of the active FIFO's PGRAPH 146 * already active there
183 * context. If at any time this doesn't match 0x40032C, you will
184 * receive PGRAPH_INTR_CONTEXT_SWITCH
185 */ 147 */
186 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); 148 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
149 nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
150 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
151 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
152 nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
153 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
154 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
155
156 chan->engctx[engine] = grctx;
187 return 0; 157 return 0;
188} 158}
189 159
190int 160static void
191nv40_graph_unload_context(struct drm_device *dev) 161nv40_graph_context_del(struct nouveau_channel *chan, int engine)
192{ 162{
193 uint32_t inst; 163 struct nouveau_gpuobj *grctx = chan->engctx[engine];
194 int ret; 164 struct drm_device *dev = chan->dev;
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 unsigned long flags;
195 167
196 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); 168 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
197 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) 169 nv04_graph_fifo_access(dev, false);
198 return 0;
199 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
200 170
201 ret = nv40_graph_transfer_context(dev, inst, 1); 171 /* Unload the context if it's the currently active one */
172 if (nv40_graph_channel(dev) == chan)
173 nv40_graph_unload_context(dev);
202 174
203 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); 175 nv04_graph_fifo_access(dev, true);
176 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
177
178 /* Free the context resources */
179 nouveau_gpuobj_ref(NULL, &grctx);
180 chan->engctx[engine] = NULL;
181}
182
183int
184nv40_graph_object_new(struct nouveau_channel *chan, int engine,
185 u32 handle, u16 class)
186{
187 struct drm_device *dev = chan->dev;
188 struct nouveau_gpuobj *obj = NULL;
189 int ret;
190
191 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
192 if (ret)
193 return ret;
194 obj->engine = 1;
195 obj->class = class;
196
197 nv_wo32(obj, 0x00, class);
198 nv_wo32(obj, 0x04, 0x00000000);
199#ifndef __BIG_ENDIAN
200 nv_wo32(obj, 0x08, 0x00000000);
201#else
202 nv_wo32(obj, 0x08, 0x01000000);
203#endif
204 nv_wo32(obj, 0x0c, 0x00000000);
205 nv_wo32(obj, 0x10, 0x00000000);
206
207 ret = nouveau_ramht_insert(chan, handle, obj);
208 nouveau_gpuobj_ref(NULL, &obj);
204 return ret; 209 return ret;
205} 210}
206 211
207void 212static void
208nv40_graph_set_tile_region(struct drm_device *dev, int i) 213nv40_graph_set_tile_region(struct drm_device *dev, int i)
209{ 214{
210 struct drm_nouveau_private *dev_priv = dev->dev_private; 215 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -257,14 +262,14 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
257 * C51 0x4e 262 * C51 0x4e
258 */ 263 */
259int 264int
260nv40_graph_init(struct drm_device *dev) 265nv40_graph_init(struct drm_device *dev, int engine)
261{ 266{
262 struct drm_nouveau_private *dev_priv = 267 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
263 (struct drm_nouveau_private *)dev->dev_private; 268 struct drm_nouveau_private *dev_priv = dev->dev_private;
264 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 269 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
265 struct nouveau_grctx ctx = {}; 270 struct nouveau_grctx ctx = {};
266 uint32_t vramsz, *cp; 271 uint32_t vramsz, *cp;
267 int ret, i, j; 272 int i, j;
268 273
269 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 274 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
270 ~NV_PMC_ENABLE_PGRAPH); 275 ~NV_PMC_ENABLE_PGRAPH);
@@ -280,7 +285,7 @@ nv40_graph_init(struct drm_device *dev)
280 ctx.data = cp; 285 ctx.data = cp;
281 ctx.ctxprog_max = 256; 286 ctx.ctxprog_max = 256;
282 nv40_grctx_init(&ctx); 287 nv40_grctx_init(&ctx);
283 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; 288 pgraph->grctx_size = ctx.ctxvals_pos * 4;
284 289
285 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 290 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
286 for (i = 0; i < ctx.ctxprog_len; i++) 291 for (i = 0; i < ctx.ctxprog_len; i++)
@@ -288,14 +293,9 @@ nv40_graph_init(struct drm_device *dev)
288 293
289 kfree(cp); 294 kfree(cp);
290 295
291 ret = nv40_graph_register(dev);
292 if (ret)
293 return ret;
294
295 /* No context present currently */ 296 /* No context present currently */
296 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 297 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
297 298
298 nouveau_irq_register(dev, 12, nv40_graph_isr);
299 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 299 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
300 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 300 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
301 301
@@ -428,47 +428,10 @@ nv40_graph_init(struct drm_device *dev)
428 return 0; 428 return 0;
429} 429}
430 430
431void nv40_graph_takedown(struct drm_device *dev)
432{
433 nouveau_irq_unregister(dev, 12);
434}
435
436static int 431static int
437nv40_graph_register(struct drm_device *dev) 432nv40_graph_fini(struct drm_device *dev, int engine)
438{ 433{
439 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 nv40_graph_unload_context(dev);
440
441 if (dev_priv->engine.graph.registered)
442 return 0;
443
444 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
445 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
446 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
447 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
448 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
449 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
450 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
451 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
452 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
453 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
454 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
455 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
456 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
457 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
458 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
459 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
460
461 /* curie */
462 if (nv44_graph_class(dev))
463 NVOBJ_CLASS(dev, 0x4497, GR);
464 else
465 NVOBJ_CLASS(dev, 0x4097, GR);
466
467 /* nvsw */
468 NVOBJ_CLASS(dev, 0x506e, SW);
469 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
470
471 dev_priv->engine.graph.registered = true;
472 return 0; 435 return 0;
473} 436}
474 437
@@ -476,17 +439,17 @@ static int
476nv40_graph_isr_chid(struct drm_device *dev, u32 inst) 439nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
477{ 440{
478 struct drm_nouveau_private *dev_priv = dev->dev_private; 441 struct drm_nouveau_private *dev_priv = dev->dev_private;
479 struct nouveau_channel *chan; 442 struct nouveau_gpuobj *grctx;
480 unsigned long flags; 443 unsigned long flags;
481 int i; 444 int i;
482 445
483 spin_lock_irqsave(&dev_priv->channels.lock, flags); 446 spin_lock_irqsave(&dev_priv->channels.lock, flags);
484 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 447 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
485 chan = dev_priv->channels.ptr[i]; 448 if (!dev_priv->channels.ptr[i])
486 if (!chan || !chan->ramin_grctx)
487 continue; 449 continue;
450 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
488 451
489 if (inst == chan->ramin_grctx->pinst) 452 if (grctx && grctx->pinst == inst)
490 break; 453 break;
491 } 454 }
492 spin_unlock_irqrestore(&dev_priv->channels.lock, flags); 455 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
@@ -537,3 +500,63 @@ nv40_graph_isr(struct drm_device *dev)
537 } 500 }
538 } 501 }
539} 502}
503
504static void
505nv40_graph_destroy(struct drm_device *dev, int engine)
506{
507 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
508
509 nouveau_irq_unregister(dev, 12);
510
511 NVOBJ_ENGINE_DEL(dev, GR);
512 kfree(pgraph);
513}
514
515int
516nv40_graph_create(struct drm_device *dev)
517{
518 struct nv40_graph_engine *pgraph;
519
520 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
521 if (!pgraph)
522 return -ENOMEM;
523
524 pgraph->base.destroy = nv40_graph_destroy;
525 pgraph->base.init = nv40_graph_init;
526 pgraph->base.fini = nv40_graph_fini;
527 pgraph->base.context_new = nv40_graph_context_new;
528 pgraph->base.context_del = nv40_graph_context_del;
529 pgraph->base.object_new = nv40_graph_object_new;
530 pgraph->base.set_tile_region = nv40_graph_set_tile_region;
531
532 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
533 nouveau_irq_register(dev, 12, nv40_graph_isr);
534
535 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
536 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
537 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
538 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
539 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
540 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
541 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
542 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
543 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
544 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
545 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
546 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
547 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
548 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
549 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
550 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
551
552 /* curie */
553 if (nv44_graph_class(dev))
554 NVOBJ_CLASS(dev, 0x4497, GR);
555 else
556 NVOBJ_CLASS(dev, 0x4097, GR);
557
558 /* nvsw */
559 NVOBJ_CLASS(dev, 0x506e, SW);
560 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
561 return 0;
562}
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c
new file mode 100644
index 000000000000..6d2af292a2e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c
@@ -0,0 +1,311 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_ramht.h"
28
29struct nv40_mpeg_engine {
30 struct nouveau_exec_engine base;
31};
32
33static int
34nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
35{
36 struct drm_device *dev = chan->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
38 struct nouveau_gpuobj *ctx = NULL;
39 unsigned long flags;
40 int ret;
41
42 NV_DEBUG(dev, "ch%d\n", chan->id);
43
44 ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
45 NVOBJ_FLAG_ZERO_FREE, &ctx);
46 if (ret)
47 return ret;
48
49 nv_wo32(ctx, 0x78, 0x02001ec1);
50
51 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
52 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
53 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
54 nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
55 nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
56 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
57 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
58
59 chan->engctx[engine] = ctx;
60 return 0;
61}
62
63static void
64nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
65{
66 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
67 struct nouveau_gpuobj *ctx = chan->engctx[engine];
68 struct drm_device *dev = chan->dev;
69 unsigned long flags;
70 u32 inst = 0x80000000 | (ctx->pinst >> 4);
71
72 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
73 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
74 if (nv_rd32(dev, 0x00b318) == inst)
75 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
76 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
77 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
78
79 nouveau_gpuobj_ref(NULL, &ctx);
80 chan->engctx[engine] = NULL;
81}
82
83static int
84nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
85 u32 handle, u16 class)
86{
87 struct drm_device *dev = chan->dev;
88 struct nouveau_gpuobj *obj = NULL;
89 int ret;
90
91 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
92 NVOBJ_FLAG_ZERO_FREE, &obj);
93 if (ret)
94 return ret;
95 obj->engine = 2;
96 obj->class = class;
97
98 nv_wo32(obj, 0x00, class);
99
100 ret = nouveau_ramht_insert(chan, handle, obj);
101 nouveau_gpuobj_ref(NULL, &obj);
102 return ret;
103}
104
105static int
106nv40_mpeg_init(struct drm_device *dev, int engine)
107{
108 struct drm_nouveau_private *dev_priv = dev->dev_private;
109 struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
110 int i;
111
112 /* VPE init */
113 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
114 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
115 nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
116 nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
117
118 for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
119 pmpeg->base.set_tile_region(dev, i);
120
121 /* PMPEG init */
122 nv_wr32(dev, 0x00b32c, 0x00000000);
123 nv_wr32(dev, 0x00b314, 0x00000100);
124 nv_wr32(dev, 0x00b220, 0x00000044);
125 nv_wr32(dev, 0x00b300, 0x02001ec1);
126 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
127
128 nv_wr32(dev, 0x00b100, 0xffffffff);
129 nv_wr32(dev, 0x00b140, 0xffffffff);
130
131 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
132 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
133 return -EBUSY;
134 }
135
136 return 0;
137}
138
139static int
140nv40_mpeg_fini(struct drm_device *dev, int engine)
141{
142 /*XXX: context save? */
143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
144 nv_wr32(dev, 0x00b140, 0x00000000);
145 return 0;
146}
147
148static int
149nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
150{
151 struct drm_device *dev = chan->dev;
152 u32 inst = data << 4;
153 u32 dma0 = nv_ri32(dev, inst + 0);
154 u32 dma1 = nv_ri32(dev, inst + 4);
155 u32 dma2 = nv_ri32(dev, inst + 8);
156 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
157 u32 size = dma1 + 1;
158
159 /* only allow linear DMA objects */
160 if (!(dma0 & 0x00002000))
161 return -EINVAL;
162
163 if (mthd == 0x0190) {
164 /* DMA_CMD */
165 nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
166 nv_wr32(dev, 0x00b334, base);
167 nv_wr32(dev, 0x00b324, size);
168 } else
169 if (mthd == 0x01a0) {
170 /* DMA_DATA */
171 nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
172 nv_wr32(dev, 0x00b360, base);
173 nv_wr32(dev, 0x00b364, size);
174 } else {
175 /* DMA_IMAGE, VRAM only */
176 if (dma0 & 0x000c0000)
177 return -EINVAL;
178
179 nv_wr32(dev, 0x00b370, base);
180 nv_wr32(dev, 0x00b374, size);
181 }
182
183 return 0;
184}
185
186static int
187nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
188{
189 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_gpuobj *ctx;
191 unsigned long flags;
192 int i;
193
194 spin_lock_irqsave(&dev_priv->channels.lock, flags);
195 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
196 if (!dev_priv->channels.ptr[i])
197 continue;
198
199 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
200 if (ctx && ctx->pinst == inst)
201 break;
202 }
203 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
204 return i;
205}
206
207static void
208nv40_vpe_set_tile_region(struct drm_device *dev, int i)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
212
213 nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
214 nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
215 nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
216}
217
218static void
219nv40_mpeg_isr(struct drm_device *dev)
220{
221 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
222 u32 chid = nv40_mpeg_isr_chid(dev, inst);
223 u32 stat = nv_rd32(dev, 0x00b100);
224 u32 type = nv_rd32(dev, 0x00b230);
225 u32 mthd = nv_rd32(dev, 0x00b234);
226 u32 data = nv_rd32(dev, 0x00b238);
227 u32 show = stat;
228
229 if (stat & 0x01000000) {
230 /* happens on initial binding of the object */
231 if (type == 0x00000020 && mthd == 0x0000) {
232 nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
233 show &= ~0x01000000;
234 }
235
236 if (type == 0x00000010) {
237 if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
238 show &= ~0x01000000;
239 }
240 }
241
242 nv_wr32(dev, 0x00b100, stat);
243 nv_wr32(dev, 0x00b230, 0x00000001);
244
245 if (show && nouveau_ratelimit()) {
246 NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
247 chid, inst, stat, type, mthd, data);
248 }
249}
250
251static void
252nv40_vpe_isr(struct drm_device *dev)
253{
254 if (nv_rd32(dev, 0x00b100))
255 nv40_mpeg_isr(dev);
256
257 if (nv_rd32(dev, 0x00b800)) {
258 u32 stat = nv_rd32(dev, 0x00b800);
259 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
260 nv_wr32(dev, 0xb800, stat);
261 }
262}
263
264static void
265nv40_mpeg_destroy(struct drm_device *dev, int engine)
266{
267 struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
268
269 nouveau_irq_unregister(dev, 0);
270
271 NVOBJ_ENGINE_DEL(dev, MPEG);
272 kfree(pmpeg);
273}
274
275int
276nv40_mpeg_create(struct drm_device *dev)
277{
278 struct nv40_mpeg_engine *pmpeg;
279
280 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
281 if (!pmpeg)
282 return -ENOMEM;
283
284 pmpeg->base.destroy = nv40_mpeg_destroy;
285 pmpeg->base.init = nv40_mpeg_init;
286 pmpeg->base.fini = nv40_mpeg_fini;
287 pmpeg->base.context_new = nv40_mpeg_context_new;
288 pmpeg->base.context_del = nv40_mpeg_context_del;
289 pmpeg->base.object_new = nv40_mpeg_object_new;
290
291 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
292 * all VPE engines, for this driver's purposes the PMPEG engine
293 * will be treated as the "master" and handle the global VPE
294 * bits too
295 */
296 pmpeg->base.set_tile_region = nv40_vpe_set_tile_region;
297 nouveau_irq_register(dev, 0, nv40_vpe_isr);
298
299 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
300 NVOBJ_CLASS(dev, 0x3174, MPEG);
301 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma);
302 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma);
303 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma);
304
305#if 0
306 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
307 NVOBJ_CLASS(dev, 0x4075, ME);
308#endif
309 return 0;
310
311}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
index de81151648f8..8cf63a8b30cd 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "drm_fixed.h"
27#include "nouveau_drv.h" 26#include "nouveau_drv.h"
28#include "nouveau_hw.h" 27#include "nouveau_hw.h"
29 28
@@ -47,45 +46,52 @@ nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
47} 46}
48 47
49int 48int
50nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, 49nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
51 int *N, int *fN, int *M, int *P) 50 int *pN, int *pfN, int *pM, int *P)
52{ 51{
53 fixed20_12 fb_div, a, b; 52 u32 best_err = ~0, err;
54 u32 refclk = pll->refclk / 10; 53 int M, lM, hM, N, fN;
55 u32 max_vco_freq = pll->vco1.maxfreq / 10;
56 u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10;
57 clk /= 10;
58 54
59 *P = max_vco_freq / clk; 55 *P = pll->vco1.maxfreq / clk;
60 if (*P > pll->max_p) 56 if (*P > pll->max_p)
61 *P = pll->max_p; 57 *P = pll->max_p;
62 if (*P < pll->min_p) 58 if (*P < pll->min_p)
63 *P = pll->min_p; 59 *P = pll->min_p;
64 60
65 /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ 61 lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq;
66 a.full = dfixed_const(refclk + max_vco_inputfreq); 62 lM = max(lM, (int)pll->vco1.min_m);
67 b.full = dfixed_const(max_vco_inputfreq); 63 hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq;
68 a.full = dfixed_div(a, b); 64 hM = min(hM, (int)pll->vco1.max_m);
69 a.full = dfixed_floor(a);
70 *M = dfixed_trunc(a);
71 65
72 /* fb_div = (vco * *M) / refclk; */ 66 for (M = lM; M <= hM; M++) {
73 fb_div.full = dfixed_const(clk * *P); 67 u32 tmp = clk * *P * M;
74 fb_div.full = dfixed_mul(fb_div, a); 68 N = tmp / pll->refclk;
75 a.full = dfixed_const(refclk); 69 fN = tmp % pll->refclk;
76 fb_div.full = dfixed_div(fb_div, a); 70 if (!pfN && fN >= pll->refclk / 2)
71 N++;
77 72
78 /* *N = floor(fb_div); */ 73 if (N < pll->vco1.min_n)
79 a.full = dfixed_floor(fb_div); 74 continue;
80 *N = dfixed_trunc(fb_div); 75 if (N > pll->vco1.max_n)
76 break;
81 77
82 /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ 78 err = abs(clk - (pll->refclk * N / M / *P));
83 b.full = dfixed_const(8192); 79 if (err < best_err) {
84 a.full = dfixed_mul(a, b); 80 best_err = err;
85 fb_div.full = dfixed_mul(fb_div, b); 81 *pN = N;
86 fb_div.full = fb_div.full - a.full; 82 *pM = M;
87 *fN = dfixed_trunc(fb_div) - 4096; 83 }
88 *fN &= 0xffff;
89 84
90 return clk; 85 if (pfN) {
86 *pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff;
87 return clk;
88 }
89 }
90
91 if (unlikely(best_err == ~0)) {
92 NV_ERROR(dev, "unable to find matching pll values\n");
93 return -EINVAL;
94 }
95
96 return pll->refclk * *pN / *pM / *P;
91} 97}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index a19ccaa025b3..ebabacf38da9 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -286,7 +286,7 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
286 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); 286 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
287 } else 287 } else
288 if (dev_priv->chipset < NV_C0) { 288 if (dev_priv->chipset < NV_C0) {
289 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 289 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
290 if (ret <= 0) 290 if (ret <= 0)
291 return 0; 291 return 0;
292 292
@@ -298,7 +298,7 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); 298 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
299 nv_wr32(dev, pll.reg + 8, N2); 299 nv_wr32(dev, pll.reg + 8, N2);
300 } else { 300 } else {
301 ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); 301 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
302 if (ret <= 0) 302 if (ret <= 0)
303 return 0; 303 return 0;
304 304
@@ -349,14 +349,14 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
349 struct drm_gem_object *gem; 349 struct drm_gem_object *gem;
350 int ret = 0, i; 350 int ret = 0, i;
351 351
352 if (width != 64 || height != 64)
353 return -EINVAL;
354
355 if (!buffer_handle) { 352 if (!buffer_handle) {
356 nv_crtc->cursor.hide(nv_crtc, true); 353 nv_crtc->cursor.hide(nv_crtc, true);
357 return 0; 354 return 0;
358 } 355 }
359 356
357 if (width != 64 || height != 64)
358 return -EINVAL;
359
360 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); 360 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
361 if (!gem) 361 if (!gem)
362 return -ENOENT; 362 return -ENOENT;
@@ -532,8 +532,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
532 if (atomic) { 532 if (atomic) {
533 drm_fb = passed_fb; 533 drm_fb = passed_fb;
534 fb = nouveau_framebuffer(passed_fb); 534 fb = nouveau_framebuffer(passed_fb);
535 } 535 } else {
536 else {
537 /* If not atomic, we can go ahead and pin, and unpin the 536 /* If not atomic, we can go ahead and pin, and unpin the
538 * old fb we were passed. 537 * old fb we were passed.
539 */ 538 */
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 75a376cc342a..74a3f6872701 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -517,13 +517,25 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
517 if (bios->fp.if_is_24bit) 517 if (bios->fp.if_is_24bit)
518 script |= 0x0200; 518 script |= 0x0200;
519 } else { 519 } else {
520 /* determine number of lvds links */
521 if (nv_connector && nv_connector->edid &&
522 nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
523 /* http://www.spwg.org */
524 if (((u8 *)nv_connector->edid)[121] == 2)
525 script |= 0x0100;
526 } else
520 if (pxclk >= bios->fp.duallink_transition_clk) { 527 if (pxclk >= bios->fp.duallink_transition_clk) {
521 script |= 0x0100; 528 script |= 0x0100;
529 }
530
531 /* determine panel depth */
532 if (script & 0x0100) {
522 if (bios->fp.strapless_is_24bit & 2) 533 if (bios->fp.strapless_is_24bit & 2)
523 script |= 0x0200; 534 script |= 0x0200;
524 } else 535 } else {
525 if (bios->fp.strapless_is_24bit & 1) 536 if (bios->fp.strapless_is_24bit & 1)
526 script |= 0x0200; 537 script |= 0x0200;
538 }
527 539
528 if (nv_connector && nv_connector->edid && 540 if (nv_connector && nv_connector->edid &&
529 (nv_connector->edid->revision >= 4) && 541 (nv_connector->edid->revision >= 4) &&
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index b02a5b1e7d37..e25cbb46789a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -31,10 +31,95 @@
31#include "nouveau_grctx.h" 31#include "nouveau_grctx.h"
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_vm.h" 33#include "nouveau_vm.h"
34#include "nouveau_ramht.h"
34#include "nv50_evo.h" 35#include "nv50_evo.h"
35 36
36static int nv50_graph_register(struct drm_device *); 37struct nv50_graph_engine {
37static void nv50_graph_isr(struct drm_device *); 38 struct nouveau_exec_engine base;
39 u32 ctxprog[512];
40 u32 ctxprog_size;
41 u32 grctx_size;
42};
43
44static void
45nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
46{
47 const uint32_t mask = 0x00010001;
48
49 if (enabled)
50 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
51 else
52 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
53}
54
55static struct nouveau_channel *
56nv50_graph_channel(struct drm_device *dev)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 uint32_t inst;
60 int i;
61
62 /* Be sure we're not in the middle of a context switch or bad things
63 * will happen, such as unloading the wrong pgraph context.
64 */
65 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
66 NV_ERROR(dev, "Ctxprog is still running\n");
67
68 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
69 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
70 return NULL;
71 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
72
73 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
74 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
75
76 if (chan && chan->ramin && chan->ramin->vinst == inst)
77 return chan;
78 }
79
80 return NULL;
81}
82
83static int
84nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
85{
86 uint32_t fifo = nv_rd32(dev, 0x400500);
87
88 nv_wr32(dev, 0x400500, fifo & ~1);
89 nv_wr32(dev, 0x400784, inst);
90 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
91 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
92 nv_wr32(dev, 0x400040, 0xffffffff);
93 (void)nv_rd32(dev, 0x400040);
94 nv_wr32(dev, 0x400040, 0x00000000);
95 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
96
97 if (nouveau_wait_for_idle(dev))
98 nv_wr32(dev, 0x40032c, inst | (1<<31));
99 nv_wr32(dev, 0x400500, fifo);
100
101 return 0;
102}
103
104static int
105nv50_graph_unload_context(struct drm_device *dev)
106{
107 uint32_t inst;
108
109 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
110 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
111 return 0;
112 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
113
114 nouveau_wait_for_idle(dev);
115 nv_wr32(dev, 0x400784, inst);
116 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
117 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
118 nouveau_wait_for_idle(dev);
119
120 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
121 return 0;
122}
38 123
39static void 124static void
40nv50_graph_init_reset(struct drm_device *dev) 125nv50_graph_init_reset(struct drm_device *dev)
@@ -52,7 +137,6 @@ nv50_graph_init_intr(struct drm_device *dev)
52{ 137{
53 NV_DEBUG(dev, "\n"); 138 NV_DEBUG(dev, "\n");
54 139
55 nouveau_irq_register(dev, 12, nv50_graph_isr);
56 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 140 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
57 nv_wr32(dev, 0x400138, 0xffffffff); 141 nv_wr32(dev, 0x400138, 0xffffffff);
58 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); 142 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -135,34 +219,14 @@ nv50_graph_init_zcull(struct drm_device *dev)
135static int 219static int
136nv50_graph_init_ctxctl(struct drm_device *dev) 220nv50_graph_init_ctxctl(struct drm_device *dev)
137{ 221{
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 222 struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
139 struct nouveau_grctx ctx = {};
140 uint32_t *cp;
141 int i; 223 int i;
142 224
143 NV_DEBUG(dev, "\n"); 225 NV_DEBUG(dev, "\n");
144 226
145 cp = kmalloc(512 * 4, GFP_KERNEL); 227 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
146 if (!cp) { 228 for (i = 0; i < pgraph->ctxprog_size; i++)
147 NV_ERROR(dev, "failed to allocate ctxprog\n"); 229 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
148 dev_priv->engine.graph.accel_blocked = true;
149 return 0;
150 }
151
152 ctx.dev = dev;
153 ctx.mode = NOUVEAU_GRCTX_PROG;
154 ctx.data = cp;
155 ctx.ctxprog_max = 512;
156 if (!nv50_grctx_init(&ctx)) {
157 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
158
159 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
160 for (i = 0; i < ctx.ctxprog_len; i++)
161 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
162 } else {
163 dev_priv->engine.graph.accel_blocked = true;
164 }
165 kfree(cp);
166 230
167 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 231 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
168 nv_wr32(dev, 0x400320, 4); 232 nv_wr32(dev, 0x400320, 4);
@@ -171,8 +235,8 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
171 return 0; 235 return 0;
172} 236}
173 237
174int 238static int
175nv50_graph_init(struct drm_device *dev) 239nv50_graph_init(struct drm_device *dev, int engine)
176{ 240{
177 int ret; 241 int ret;
178 242
@@ -186,105 +250,66 @@ nv50_graph_init(struct drm_device *dev)
186 if (ret) 250 if (ret)
187 return ret; 251 return ret;
188 252
189 ret = nv50_graph_register(dev);
190 if (ret)
191 return ret;
192 nv50_graph_init_intr(dev); 253 nv50_graph_init_intr(dev);
193 return 0; 254 return 0;
194} 255}
195 256
196void 257static int
197nv50_graph_takedown(struct drm_device *dev) 258nv50_graph_fini(struct drm_device *dev, int engine)
198{ 259{
199 NV_DEBUG(dev, "\n"); 260 NV_DEBUG(dev, "\n");
261 nv50_graph_unload_context(dev);
200 nv_wr32(dev, 0x40013c, 0x00000000); 262 nv_wr32(dev, 0x40013c, 0x00000000);
201 nouveau_irq_unregister(dev, 12); 263 return 0;
202}
203
204void
205nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
206{
207 const uint32_t mask = 0x00010001;
208
209 if (enabled)
210 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
211 else
212 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
213}
214
215struct nouveau_channel *
216nv50_graph_channel(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 uint32_t inst;
220 int i;
221
222 /* Be sure we're not in the middle of a context switch or bad things
223 * will happen, such as unloading the wrong pgraph context.
224 */
225 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
226 NV_ERROR(dev, "Ctxprog is still running\n");
227
228 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
229 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
230 return NULL;
231 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
232
233 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
234 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
235
236 if (chan && chan->ramin && chan->ramin->vinst == inst)
237 return chan;
238 }
239
240 return NULL;
241} 264}
242 265
243int 266static int
244nv50_graph_create_context(struct nouveau_channel *chan) 267nv50_graph_context_new(struct nouveau_channel *chan, int engine)
245{ 268{
246 struct drm_device *dev = chan->dev; 269 struct drm_device *dev = chan->dev;
247 struct drm_nouveau_private *dev_priv = dev->dev_private; 270 struct drm_nouveau_private *dev_priv = dev->dev_private;
248 struct nouveau_gpuobj *ramin = chan->ramin; 271 struct nouveau_gpuobj *ramin = chan->ramin;
249 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 272 struct nouveau_gpuobj *grctx = NULL;
273 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
250 struct nouveau_grctx ctx = {}; 274 struct nouveau_grctx ctx = {};
251 int hdr, ret; 275 int hdr, ret;
252 276
253 NV_DEBUG(dev, "ch%d\n", chan->id); 277 NV_DEBUG(dev, "ch%d\n", chan->id);
254 278
255 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, 279 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
256 NVOBJ_FLAG_ZERO_ALLOC | 280 NVOBJ_FLAG_ZERO_ALLOC |
257 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 281 NVOBJ_FLAG_ZERO_FREE, &grctx);
258 if (ret) 282 if (ret)
259 return ret; 283 return ret;
260 284
261 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 285 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
262 nv_wo32(ramin, hdr + 0x00, 0x00190002); 286 nv_wo32(ramin, hdr + 0x00, 0x00190002);
263 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + 287 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
264 pgraph->grctx_size - 1); 288 nv_wo32(ramin, hdr + 0x08, grctx->vinst);
265 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
266 nv_wo32(ramin, hdr + 0x0c, 0); 289 nv_wo32(ramin, hdr + 0x0c, 0);
267 nv_wo32(ramin, hdr + 0x10, 0); 290 nv_wo32(ramin, hdr + 0x10, 0);
268 nv_wo32(ramin, hdr + 0x14, 0x00010000); 291 nv_wo32(ramin, hdr + 0x14, 0x00010000);
269 292
270 ctx.dev = chan->dev; 293 ctx.dev = chan->dev;
271 ctx.mode = NOUVEAU_GRCTX_VALS; 294 ctx.mode = NOUVEAU_GRCTX_VALS;
272 ctx.data = chan->ramin_grctx; 295 ctx.data = grctx;
273 nv50_grctx_init(&ctx); 296 nv50_grctx_init(&ctx);
274 297
275 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 298 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
276 299
277 dev_priv->engine.instmem.flush(dev); 300 dev_priv->engine.instmem.flush(dev);
278 atomic_inc(&chan->vm->pgraph_refs); 301
302 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
303 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
279 return 0; 304 return 0;
280} 305}
281 306
282void 307static void
283nv50_graph_destroy_context(struct nouveau_channel *chan) 308nv50_graph_context_del(struct nouveau_channel *chan, int engine)
284{ 309{
310 struct nouveau_gpuobj *grctx = chan->engctx[engine];
285 struct drm_device *dev = chan->dev; 311 struct drm_device *dev = chan->dev;
286 struct drm_nouveau_private *dev_priv = dev->dev_private; 312 struct drm_nouveau_private *dev_priv = dev->dev_private;
287 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
288 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 313 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
289 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 314 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
290 unsigned long flags; 315 unsigned long flags;
@@ -296,72 +321,49 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
296 321
297 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 322 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
298 pfifo->reassign(dev, false); 323 pfifo->reassign(dev, false);
299 pgraph->fifo_access(dev, false); 324 nv50_graph_fifo_access(dev, false);
300 325
301 if (pgraph->channel(dev) == chan) 326 if (nv50_graph_channel(dev) == chan)
302 pgraph->unload_context(dev); 327 nv50_graph_unload_context(dev);
303 328
304 for (i = hdr; i < hdr + 24; i += 4) 329 for (i = hdr; i < hdr + 24; i += 4)
305 nv_wo32(chan->ramin, i, 0); 330 nv_wo32(chan->ramin, i, 0);
306 dev_priv->engine.instmem.flush(dev); 331 dev_priv->engine.instmem.flush(dev);
307 332
308 pgraph->fifo_access(dev, true); 333 nv50_graph_fifo_access(dev, true);
309 pfifo->reassign(dev, true); 334 pfifo->reassign(dev, true);
310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 335 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
311 336
312 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 337 nouveau_gpuobj_ref(NULL, &grctx);
313 338
314 atomic_dec(&chan->vm->pgraph_refs); 339 atomic_dec(&chan->vm->engref[engine]);
340 chan->engctx[engine] = NULL;
315} 341}
316 342
317static int 343static int
318nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) 344nv50_graph_object_new(struct nouveau_channel *chan, int engine,
319{ 345 u32 handle, u16 class)
320 uint32_t fifo = nv_rd32(dev, 0x400500);
321
322 nv_wr32(dev, 0x400500, fifo & ~1);
323 nv_wr32(dev, 0x400784, inst);
324 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
325 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
326 nv_wr32(dev, 0x400040, 0xffffffff);
327 (void)nv_rd32(dev, 0x400040);
328 nv_wr32(dev, 0x400040, 0x00000000);
329 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
330
331 if (nouveau_wait_for_idle(dev))
332 nv_wr32(dev, 0x40032c, inst | (1<<31));
333 nv_wr32(dev, 0x400500, fifo);
334
335 return 0;
336}
337
338int
339nv50_graph_load_context(struct nouveau_channel *chan)
340{
341 uint32_t inst = chan->ramin->vinst >> 12;
342
343 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
344 return nv50_graph_do_load_context(chan->dev, inst);
345}
346
347int
348nv50_graph_unload_context(struct drm_device *dev)
349{ 346{
350 uint32_t inst; 347 struct drm_device *dev = chan->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349 struct nouveau_gpuobj *obj = NULL;
350 int ret;
351 351
352 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 352 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
353 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 353 if (ret)
354 return 0; 354 return ret;
355 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 355 obj->engine = 1;
356 obj->class = class;
356 357
357 nouveau_wait_for_idle(dev); 358 nv_wo32(obj, 0x00, class);
358 nv_wr32(dev, 0x400784, inst); 359 nv_wo32(obj, 0x04, 0x00000000);
359 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 360 nv_wo32(obj, 0x08, 0x00000000);
360 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 361 nv_wo32(obj, 0x0c, 0x00000000);
361 nouveau_wait_for_idle(dev); 362 dev_priv->engine.instmem.flush(dev);
362 363
363 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 364 ret = nouveau_ramht_insert(chan, handle, obj);
364 return 0; 365 nouveau_gpuobj_ref(NULL, &obj);
366 return ret;
365} 367}
366 368
367static void 369static void
@@ -442,68 +444,15 @@ nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
442 return 0; 444 return 0;
443} 445}
444 446
445static int
446nv50_graph_register(struct drm_device *dev)
447{
448 struct drm_nouveau_private *dev_priv = dev->dev_private;
449
450 if (dev_priv->engine.graph.registered)
451 return 0;
452
453 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
454 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
455 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
456 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
457 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
458 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
459
460 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
461 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
462 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
463
464 /* tesla */
465 if (dev_priv->chipset == 0x50)
466 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
467 else
468 if (dev_priv->chipset < 0xa0)
469 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
470 else {
471 switch (dev_priv->chipset) {
472 case 0xa0:
473 case 0xaa:
474 case 0xac:
475 NVOBJ_CLASS(dev, 0x8397, GR);
476 break;
477 case 0xa3:
478 case 0xa5:
479 case 0xa8:
480 NVOBJ_CLASS(dev, 0x8597, GR);
481 break;
482 case 0xaf:
483 NVOBJ_CLASS(dev, 0x8697, GR);
484 break;
485 }
486 }
487
488 /* compute */
489 NVOBJ_CLASS(dev, 0x50c0, GR);
490 if (dev_priv->chipset > 0xa0 &&
491 dev_priv->chipset != 0xaa &&
492 dev_priv->chipset != 0xac)
493 NVOBJ_CLASS(dev, 0x85c0, GR);
494
495 dev_priv->engine.graph.registered = true;
496 return 0;
497}
498 447
499void 448static void
500nv50_graph_tlb_flush(struct drm_device *dev) 449nv50_graph_tlb_flush(struct drm_device *dev, int engine)
501{ 450{
502 nv50_vm_flush_engine(dev, 0); 451 nv50_vm_flush_engine(dev, 0);
503} 452}
504 453
505void 454static void
506nv84_graph_tlb_flush(struct drm_device *dev) 455nv84_graph_tlb_flush(struct drm_device *dev, int engine)
507{ 456{
508 struct drm_nouveau_private *dev_priv = dev->dev_private; 457 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 458 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -548,8 +497,7 @@ nv84_graph_tlb_flush(struct drm_device *dev)
548 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 497 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
549} 498}
550 499
551static struct nouveau_enum nv50_mp_exec_error_names[] = 500static struct nouveau_enum nv50_mp_exec_error_names[] = {
552{
553 { 3, "STACK_UNDERFLOW", NULL }, 501 { 3, "STACK_UNDERFLOW", NULL },
554 { 4, "QUADON_ACTIVE", NULL }, 502 { 4, "QUADON_ACTIVE", NULL },
555 { 8, "TIMEOUT", NULL }, 503 { 8, "TIMEOUT", NULL },
@@ -663,7 +611,7 @@ nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
663 nv_rd32(dev, addr + 0x20); 611 nv_rd32(dev, addr + 0x20);
664 pc = nv_rd32(dev, addr + 0x24); 612 pc = nv_rd32(dev, addr + 0x24);
665 oplow = nv_rd32(dev, addr + 0x70); 613 oplow = nv_rd32(dev, addr + 0x70);
666 ophigh= nv_rd32(dev, addr + 0x74); 614 ophigh = nv_rd32(dev, addr + 0x74);
667 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " 615 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
668 "TP %d MP %d: ", tpid, i); 616 "TP %d MP %d: ", tpid, i);
669 nouveau_enum_print(nv50_mp_exec_error_names, status); 617 nouveau_enum_print(nv50_mp_exec_error_names, status);
@@ -991,7 +939,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
991 return 1; 939 return 1;
992} 940}
993 941
994static int 942int
995nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 943nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
996{ 944{
997 struct drm_nouveau_private *dev_priv = dev->dev_private; 945 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -1073,3 +1021,101 @@ nv50_graph_isr(struct drm_device *dev)
1073 if (nv_rd32(dev, 0x400824) & (1 << 31)) 1021 if (nv_rd32(dev, 0x400824) & (1 << 31))
1074 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1022 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1075} 1023}
1024
1025static void
1026nv50_graph_destroy(struct drm_device *dev, int engine)
1027{
1028 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
1029
1030 NVOBJ_ENGINE_DEL(dev, GR);
1031
1032 nouveau_irq_unregister(dev, 12);
1033 kfree(pgraph);
1034}
1035
1036int
1037nv50_graph_create(struct drm_device *dev)
1038{
1039 struct drm_nouveau_private *dev_priv = dev->dev_private;
1040 struct nv50_graph_engine *pgraph;
1041 struct nouveau_grctx ctx = {};
1042 int ret;
1043
1044 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1045 if (!pgraph)
1046 return -ENOMEM;
1047
1048 ctx.dev = dev;
1049 ctx.mode = NOUVEAU_GRCTX_PROG;
1050 ctx.data = pgraph->ctxprog;
1051 ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1052
1053 ret = nv50_grctx_init(&ctx);
1054 if (ret) {
1055 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1056 kfree(pgraph);
1057 return 0;
1058 }
1059
1060 pgraph->grctx_size = ctx.ctxvals_pos * 4;
1061 pgraph->ctxprog_size = ctx.ctxprog_len;
1062
1063 pgraph->base.destroy = nv50_graph_destroy;
1064 pgraph->base.init = nv50_graph_init;
1065 pgraph->base.fini = nv50_graph_fini;
1066 pgraph->base.context_new = nv50_graph_context_new;
1067 pgraph->base.context_del = nv50_graph_context_del;
1068 pgraph->base.object_new = nv50_graph_object_new;
1069 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
1070 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
1071 else
1072 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
1073
1074 nouveau_irq_register(dev, 12, nv50_graph_isr);
1075
1076 /* NVSW really doesn't live here... */
1077 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1078 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1079 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1080 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1081 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1082 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1083
1084 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1085 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1086 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
1087 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
1088
1089 /* tesla */
1090 if (dev_priv->chipset == 0x50)
1091 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
1092 else
1093 if (dev_priv->chipset < 0xa0)
1094 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
1095 else {
1096 switch (dev_priv->chipset) {
1097 case 0xa0:
1098 case 0xaa:
1099 case 0xac:
1100 NVOBJ_CLASS(dev, 0x8397, GR);
1101 break;
1102 case 0xa3:
1103 case 0xa5:
1104 case 0xa8:
1105 NVOBJ_CLASS(dev, 0x8597, GR);
1106 break;
1107 case 0xaf:
1108 NVOBJ_CLASS(dev, 0x8697, GR);
1109 break;
1110 }
1111 }
1112
1113 /* compute */
1114 NVOBJ_CLASS(dev, 0x50c0, GR);
1115 if (dev_priv->chipset > 0xa0 &&
1116 dev_priv->chipset != 0xaa &&
1117 dev_priv->chipset != 0xac)
1118 NVOBJ_CLASS(dev, 0x85c0, GR);
1119
1120 return 0;
1121}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 336aab2a24a6..de9abff12b90 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -747,7 +747,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
747 gr_def(ctx, offset + 0x64, 0x0000001f); 747 gr_def(ctx, offset + 0x64, 0x0000001f);
748 gr_def(ctx, offset + 0x68, 0x0000000f); 748 gr_def(ctx, offset + 0x68, 0x0000000f);
749 gr_def(ctx, offset + 0x6c, 0x0000000f); 749 gr_def(ctx, offset + 0x6c, 0x0000000f);
750 } else if(dev_priv->chipset < 0xa0) { 750 } else if (dev_priv->chipset < 0xa0) {
751 cp_ctx(ctx, offset + 0x50, 1); 751 cp_ctx(ctx, offset + 0x50, 1);
752 cp_ctx(ctx, offset + 0x70, 1); 752 cp_ctx(ctx, offset + 0x70, 1);
753 } else { 753 } else {
@@ -924,7 +924,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
924 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ 924 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
925 } else { 925 } else {
926 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ 926 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
927 } 927 }
928 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ 928 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
929 if (dev_priv->chipset != 0x50) 929 if (dev_priv->chipset != 0x50)
930 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ 930 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
@@ -1803,9 +1803,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1803 xf_emit(ctx, 1, 0); /* 1ff */ 1803 xf_emit(ctx, 1, 0); /* 1ff */
1804 xf_emit(ctx, 8, 0); /* 0? */ 1804 xf_emit(ctx, 8, 0); /* 0? */
1805 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ 1805 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
1806 } 1806 } else {
1807 else
1808 {
1809 xf_emit(ctx, 0xc, 0); /* RO */ 1807 xf_emit(ctx, 0xc, 0); /* RO */
1810 /* SEEK */ 1808 /* SEEK */
1811 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ 1809 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
@@ -2836,7 +2834,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2836 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ 2834 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2837 if (IS_NVA3F(dev_priv->chipset)) 2835 if (IS_NVA3F(dev_priv->chipset))
2838 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2836 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2839 if(dev_priv->chipset == 0x50) 2837 if (dev_priv->chipset == 0x50)
2840 xf_emit(ctx, 1, 0); /* ff */ 2838 xf_emit(ctx, 1, 0); /* ff */
2841 else 2839 else
2842 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ 2840 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
new file mode 100644
index 000000000000..1dc5913f78c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -0,0 +1,256 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_ramht.h"
28
29struct nv50_mpeg_engine {
30 struct nouveau_exec_engine base;
31};
32
33static inline u32
34CTX_PTR(struct drm_device *dev, u32 offset)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37
38 if (dev_priv->chipset == 0x50)
39 offset += 0x0260;
40 else
41 offset += 0x0060;
42
43 return offset;
44}
45
46static int
47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct drm_device *dev = chan->dev;
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nouveau_gpuobj *ramin = chan->ramin;
52 struct nouveau_gpuobj *ctx = NULL;
53 int ret;
54
55 NV_DEBUG(dev, "ch%d\n", chan->id);
56
57 ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &ctx);
59 if (ret)
60 return ret;
61
62 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
63 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
64 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
65 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
66 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
67 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
68
69 nv_wo32(ctx, 0x70, 0x00801ec1);
70 nv_wo32(ctx, 0x7c, 0x0000037c);
71 dev_priv->engine.instmem.flush(dev);
72
73 chan->engctx[engine] = ctx;
74 return 0;
75}
76
77static void
78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
79{
80 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev;
83 unsigned long flags;
84 u32 inst, i;
85
86 if (!chan->ramin)
87 return;
88
89 inst = chan->ramin->vinst >> 12;
90 inst |= 0x80000000;
91
92 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
93 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
94 if (nv_rd32(dev, 0x00b318) == inst)
95 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
97 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
98
99 for (i = 0x00; i <= 0x14; i += 4)
100 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
101 nouveau_gpuobj_ref(NULL, &ctx);
102 chan->engctx[engine] = NULL;
103}
104
105static int
106nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
107 u32 handle, u16 class)
108{
109 struct drm_device *dev = chan->dev;
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111 struct nouveau_gpuobj *obj = NULL;
112 int ret;
113
114 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
115 if (ret)
116 return ret;
117 obj->engine = 2;
118 obj->class = class;
119
120 nv_wo32(obj, 0x00, class);
121 nv_wo32(obj, 0x04, 0x00000000);
122 nv_wo32(obj, 0x08, 0x00000000);
123 nv_wo32(obj, 0x0c, 0x00000000);
124 dev_priv->engine.instmem.flush(dev);
125
126 ret = nouveau_ramht_insert(chan, handle, obj);
127 nouveau_gpuobj_ref(NULL, &obj);
128 return ret;
129}
130
131static void
132nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
133{
134 nv50_vm_flush_engine(dev, 0x08);
135}
136
137static int
138nv50_mpeg_init(struct drm_device *dev, int engine)
139{
140 nv_wr32(dev, 0x00b32c, 0x00000000);
141 nv_wr32(dev, 0x00b314, 0x00000100);
142 nv_wr32(dev, 0x00b0e0, 0x0000001a);
143
144 nv_wr32(dev, 0x00b220, 0x00000044);
145 nv_wr32(dev, 0x00b300, 0x00801ec1);
146 nv_wr32(dev, 0x00b390, 0x00000000);
147 nv_wr32(dev, 0x00b394, 0x00000000);
148 nv_wr32(dev, 0x00b398, 0x00000000);
149 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
150
151 nv_wr32(dev, 0x00b100, 0xffffffff);
152 nv_wr32(dev, 0x00b140, 0xffffffff);
153
154 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
155 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
156 return -EBUSY;
157 }
158
159 return 0;
160}
161
162static int
163nv50_mpeg_fini(struct drm_device *dev, int engine)
164{
165 /*XXX: context save for s/r */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
167 nv_wr32(dev, 0x00b140, 0x00000000);
168 return 0;
169}
170
171static void
172nv50_mpeg_isr(struct drm_device *dev)
173{
174 u32 stat = nv_rd32(dev, 0x00b100);
175 u32 type = nv_rd32(dev, 0x00b230);
176 u32 mthd = nv_rd32(dev, 0x00b234);
177 u32 data = nv_rd32(dev, 0x00b238);
178 u32 show = stat;
179
180 if (stat & 0x01000000) {
181 /* happens on initial binding of the object */
182 if (type == 0x00000020 && mthd == 0x0000) {
183 nv_wr32(dev, 0x00b308, 0x00000100);
184 show &= ~0x01000000;
185 }
186 }
187
188 if (show && nouveau_ratelimit()) {
189 NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
190 stat, type, mthd, data);
191 }
192
193 nv_wr32(dev, 0x00b100, stat);
194 nv_wr32(dev, 0x00b230, 0x00000001);
195 nv50_fb_vm_trap(dev, 1);
196}
197
198static void
199nv50_vpe_isr(struct drm_device *dev)
200{
201 if (nv_rd32(dev, 0x00b100))
202 nv50_mpeg_isr(dev);
203
204 if (nv_rd32(dev, 0x00b800)) {
205 u32 stat = nv_rd32(dev, 0x00b800);
206 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
207 nv_wr32(dev, 0xb800, stat);
208 }
209}
210
211static void
212nv50_mpeg_destroy(struct drm_device *dev, int engine)
213{
214 struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
215
216 nouveau_irq_unregister(dev, 0);
217
218 NVOBJ_ENGINE_DEL(dev, MPEG);
219 kfree(pmpeg);
220}
221
222int
223nv50_mpeg_create(struct drm_device *dev)
224{
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nv50_mpeg_engine *pmpeg;
227
228 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
229 if (!pmpeg)
230 return -ENOMEM;
231
232 pmpeg->base.destroy = nv50_mpeg_destroy;
233 pmpeg->base.init = nv50_mpeg_init;
234 pmpeg->base.fini = nv50_mpeg_fini;
235 pmpeg->base.context_new = nv50_mpeg_context_new;
236 pmpeg->base.context_del = nv50_mpeg_context_del;
237 pmpeg->base.object_new = nv50_mpeg_object_new;
238 pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
239
240 if (dev_priv->chipset == 0x50) {
241 nouveau_irq_register(dev, 0, nv50_vpe_isr);
242 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
243 NVOBJ_CLASS(dev, 0x3174, MPEG);
244#if 0
245 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
246 NVOBJ_CLASS(dev, 0x4075, ME);
247#endif
248 } else {
249 nouveau_irq_register(dev, 0, nv50_mpeg_isr);
250 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
251 NVOBJ_CLASS(dev, 0x8274, MPEG);
252 }
253
254 return 0;
255
256}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 7dbb305d7e63..8a2810011bda 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -47,6 +47,21 @@ nv50_pm_clock_get(struct drm_device *dev, u32 id)
47 47
48 reg0 = nv_rd32(dev, pll.reg + 0); 48 reg0 = nv_rd32(dev, pll.reg + 0);
49 reg1 = nv_rd32(dev, pll.reg + 4); 49 reg1 = nv_rd32(dev, pll.reg + 4);
50
51 if ((reg0 & 0x80000000) == 0) {
52 if (id == PLL_SHADER) {
53 NV_DEBUG(dev, "Shader PLL is disabled. "
54 "Shader clock is twice the core\n");
55 ret = nv50_pm_clock_get(dev, PLL_CORE);
56 if (ret > 0)
57 return ret << 1;
58 } else if (id == PLL_MEMORY) {
59 NV_DEBUG(dev, "Memory PLL is disabled. "
60 "Memory clock is equal to the ref_clk\n");
61 return pll.refclk;
62 }
63 }
64
50 P = (reg0 & 0x00070000) >> 16; 65 P = (reg0 & 0x00070000) >> 16;
51 N = (reg1 & 0x0000ff00) >> 8; 66 N = (reg1 & 0x0000ff00) >> 8;
52 M = (reg1 & 0x000000ff); 67 M = (reg1 & 0x000000ff);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 6c2694490741..1a0dd491a0e4 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -151,8 +151,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
151 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 151 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
152 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 152 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
154 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 154 int i;
155 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
156 155
157 pinstmem->flush(vm->dev); 156 pinstmem->flush(vm->dev);
158 157
@@ -163,11 +162,10 @@ nv50_vm_flush(struct nouveau_vm *vm)
163 } 162 }
164 163
165 pfifo->tlb_flush(vm->dev); 164 pfifo->tlb_flush(vm->dev);
166 165 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
167 if (atomic_read(&vm->pgraph_refs)) 166 if (atomic_read(&vm->engref[i]))
168 pgraph->tlb_flush(vm->dev); 167 dev_priv->eng[i]->tlb_flush(vm->dev, i);
169 if (atomic_read(&vm->pcrypt_refs)) 168 }
170 pcrypt->tlb_flush(vm->dev);
171} 169}
172 170
173void 171void
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index fabc7fd30b1d..75b809a51748 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -26,46 +26,48 @@
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_util.h" 27#include "nouveau_util.h"
28#include "nouveau_vm.h" 28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
29 30
30static void nv84_crypt_isr(struct drm_device *); 31struct nv84_crypt_engine {
32 struct nouveau_exec_engine base;
33};
31 34
32int 35static int
33nv84_crypt_create_context(struct nouveau_channel *chan) 36nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
34{ 37{
35 struct drm_device *dev = chan->dev; 38 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 39 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *ramin = chan->ramin; 40 struct nouveau_gpuobj *ramin = chan->ramin;
41 struct nouveau_gpuobj *ctx;
38 int ret; 42 int ret;
39 43
40 NV_DEBUG(dev, "ch%d\n", chan->id); 44 NV_DEBUG(dev, "ch%d\n", chan->id);
41 45
42 ret = nouveau_gpuobj_new(dev, chan, 256, 0, 46 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
43 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, 47 NVOBJ_FLAG_ZERO_FREE, &ctx);
44 &chan->crypt_ctx);
45 if (ret) 48 if (ret)
46 return ret; 49 return ret;
47 50
48 nv_wo32(ramin, 0xa0, 0x00190000); 51 nv_wo32(ramin, 0xa0, 0x00190000);
49 nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); 52 nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
50 nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); 53 nv_wo32(ramin, 0xa8, ctx->vinst);
51 nv_wo32(ramin, 0xac, 0); 54 nv_wo32(ramin, 0xac, 0);
52 nv_wo32(ramin, 0xb0, 0); 55 nv_wo32(ramin, 0xb0, 0);
53 nv_wo32(ramin, 0xb4, 0); 56 nv_wo32(ramin, 0xb4, 0);
54
55 dev_priv->engine.instmem.flush(dev); 57 dev_priv->engine.instmem.flush(dev);
56 atomic_inc(&chan->vm->pcrypt_refs); 58
59 atomic_inc(&chan->vm->engref[engine]);
60 chan->engctx[engine] = ctx;
57 return 0; 61 return 0;
58} 62}
59 63
60void 64static void
61nv84_crypt_destroy_context(struct nouveau_channel *chan) 65nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
62{ 66{
67 struct nouveau_gpuobj *ctx = chan->engctx[engine];
63 struct drm_device *dev = chan->dev; 68 struct drm_device *dev = chan->dev;
64 u32 inst; 69 u32 inst;
65 70
66 if (!chan->crypt_ctx)
67 return;
68
69 inst = (chan->ramin->vinst >> 12); 71 inst = (chan->ramin->vinst >> 12);
70 inst |= 0x80000000; 72 inst |= 0x80000000;
71 73
@@ -80,43 +82,39 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan)
80 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); 82 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
81 nv_wr32(dev, 0x10200c, 0x00000010); 83 nv_wr32(dev, 0x10200c, 0x00000010);
82 84
83 nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); 85 nouveau_gpuobj_ref(NULL, &ctx);
84 atomic_dec(&chan->vm->pcrypt_refs);
85}
86 86
87void 87 atomic_dec(&chan->vm->engref[engine]);
88nv84_crypt_tlb_flush(struct drm_device *dev) 88 chan->engctx[engine] = NULL;
89{
90 nv50_vm_flush_engine(dev, 0x0a);
91} 89}
92 90
93int 91static int
94nv84_crypt_init(struct drm_device *dev) 92nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class)
95{ 94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; 97 struct nouveau_gpuobj *obj = NULL;
98 98 int ret;
99 if (!pcrypt->registered) {
100 NVOBJ_CLASS(dev, 0x74c1, CRYPT);
101 pcrypt->registered = true;
102 }
103 99
104 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 100 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
105 nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 101 if (ret)
102 return ret;
103 obj->engine = 5;
104 obj->class = class;
106 105
107 nouveau_irq_register(dev, 14, nv84_crypt_isr); 106 nv_wo32(obj, 0x00, class);
108 nv_wr32(dev, 0x102130, 0xffffffff); 107 dev_priv->engine.instmem.flush(dev);
109 nv_wr32(dev, 0x102140, 0xffffffbf);
110 108
111 nv_wr32(dev, 0x10200c, 0x00000010); 109 ret = nouveau_ramht_insert(chan, handle, obj);
112 return 0; 110 nouveau_gpuobj_ref(NULL, &obj);
111 return ret;
113} 112}
114 113
115void 114static void
116nv84_crypt_fini(struct drm_device *dev) 115nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
117{ 116{
118 nv_wr32(dev, 0x102140, 0x00000000); 117 nv50_vm_flush_engine(dev, 0x0a);
119 nouveau_irq_unregister(dev, 14);
120} 118}
121 119
122static void 120static void
@@ -138,3 +136,58 @@ nv84_crypt_isr(struct drm_device *dev)
138 136
139 nv50_fb_vm_trap(dev, show); 137 nv50_fb_vm_trap(dev, show);
140} 138}
139
140static int
141nv84_crypt_fini(struct drm_device *dev, int engine)
142{
143 nv_wr32(dev, 0x102140, 0x00000000);
144 return 0;
145}
146
147static int
148nv84_crypt_init(struct drm_device *dev, int engine)
149{
150 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
151 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
152
153 nv_wr32(dev, 0x102130, 0xffffffff);
154 nv_wr32(dev, 0x102140, 0xffffffbf);
155
156 nv_wr32(dev, 0x10200c, 0x00000010);
157 return 0;
158}
159
160static void
161nv84_crypt_destroy(struct drm_device *dev, int engine)
162{
163 struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
164
165 NVOBJ_ENGINE_DEL(dev, CRYPT);
166
167 nouveau_irq_unregister(dev, 14);
168 kfree(pcrypt);
169}
170
171int
172nv84_crypt_create(struct drm_device *dev)
173{
174 struct nv84_crypt_engine *pcrypt;
175
176 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
177 if (!pcrypt)
178 return -ENOMEM;
179
180 pcrypt->base.destroy = nv84_crypt_destroy;
181 pcrypt->base.init = nv84_crypt_init;
182 pcrypt->base.fini = nv84_crypt_fini;
183 pcrypt->base.context_new = nv84_crypt_context_new;
184 pcrypt->base.context_del = nv84_crypt_context_del;
185 pcrypt->base.object_new = nv84_crypt_object_new;
186 pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
187
188 nouveau_irq_register(dev, 14, nv84_crypt_isr);
189
190 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
191 NVOBJ_CLASS (dev, 0x74c1, CRYPT);
192 return 0;
193}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
new file mode 100644
index 000000000000..b86820a61220
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -0,0 +1,226 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nva3_copy.fuc.h"
32
33struct nva3_copy_engine {
34 struct nouveau_exec_engine base;
35};
36
37static int
38nva3_copy_context_new(struct nouveau_channel *chan, int engine)
39{
40 struct drm_device *dev = chan->dev;
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *ramin = chan->ramin;
43 struct nouveau_gpuobj *ctx = NULL;
44 int ret;
45
46 NV_DEBUG(dev, "ch%d\n", chan->id);
47
48 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
49 NVOBJ_FLAG_ZERO_FREE, &ctx);
50 if (ret)
51 return ret;
52
53 nv_wo32(ramin, 0xc0, 0x00190000);
54 nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
55 nv_wo32(ramin, 0xc8, ctx->vinst);
56 nv_wo32(ramin, 0xcc, 0x00000000);
57 nv_wo32(ramin, 0xd0, 0x00000000);
58 nv_wo32(ramin, 0xd4, 0x00000000);
59 dev_priv->engine.instmem.flush(dev);
60
61 atomic_inc(&chan->vm->engref[engine]);
62 chan->engctx[engine] = ctx;
63 return 0;
64}
65
66static int
67nva3_copy_object_new(struct nouveau_channel *chan, int engine,
68 u32 handle, u16 class)
69{
70 struct nouveau_gpuobj *ctx = chan->engctx[engine];
71
72 /* fuc engine doesn't need an object, our ramht code does.. */
73 ctx->engine = 3;
74 ctx->class = class;
75 return nouveau_ramht_insert(chan, handle, ctx);
76}
77
78static void
79nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80{
81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev;
83 u32 inst;
84
85 inst = (chan->ramin->vinst >> 12);
86 inst |= 0x40000000;
87
88 /* disable fifo access */
89 nv_wr32(dev, 0x104048, 0x00000000);
90 /* mark channel as unloaded if it's currently active */
91 if (nv_rd32(dev, 0x104050) == inst)
92 nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
93 /* mark next channel as invalid if it's about to be loaded */
94 if (nv_rd32(dev, 0x104054) == inst)
95 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
96 /* restore fifo access */
97 nv_wr32(dev, 0x104048, 0x00000003);
98
99 for (inst = 0xc0; inst <= 0xd4; inst += 4)
100 nv_wo32(chan->ramin, inst, 0x00000000);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103
104 atomic_dec(&chan->vm->engref[engine]);
105 chan->engctx[engine] = ctx;
106}
107
108static void
109nva3_copy_tlb_flush(struct drm_device *dev, int engine)
110{
111 nv50_vm_flush_engine(dev, 0x0d);
112}
113
114static int
115nva3_copy_init(struct drm_device *dev, int engine)
116{
117 int i;
118
119 nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
120 nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
121 nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
122
123 /* upload ucode */
124 nv_wr32(dev, 0x1041c0, 0x01000000);
125 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
126 nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
127
128 nv_wr32(dev, 0x104180, 0x01000000);
129 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
130 if ((i & 0x3f) == 0)
131 nv_wr32(dev, 0x104188, i >> 6);
132 nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
133 }
134
135 /* start it running */
136 nv_wr32(dev, 0x10410c, 0x00000000);
137 nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
138 nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
139 return 0;
140}
141
142static int
143nva3_copy_fini(struct drm_device *dev, int engine)
144{
145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146
147 /* trigger fuc context unload */
148 nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
149 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
150 nv_wr32(dev, 0x104000, 0x00000008);
151 nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
152
153 nv_wr32(dev, 0x104014, 0xffffffff);
154 return 0;
155}
156
157static struct nouveau_enum nva3_copy_isr_error_name[] = {
158 { 0x0001, "ILLEGAL_MTHD" },
159 { 0x0002, "INVALID_ENUM" },
160 { 0x0003, "INVALID_BITFIELD" },
161 {}
162};
163
164static void
165nva3_copy_isr(struct drm_device *dev)
166{
167 u32 dispatch = nv_rd32(dev, 0x10401c);
168 u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
169 u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
170 u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
171 u32 addr = nv_rd32(dev, 0x104040) >> 16;
172 u32 mthd = (addr & 0x07ff) << 2;
173 u32 subc = (addr & 0x3800) >> 11;
174 u32 data = nv_rd32(dev, 0x104044);
175 int chid = nv50_graph_isr_chid(dev, inst);
176
177 if (stat & 0x00000040) {
178 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
179 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
180 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
181 chid, inst, subc, mthd, data);
182 nv_wr32(dev, 0x104004, 0x00000040);
183 stat &= ~0x00000040;
184 }
185
186 if (stat) {
187 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
188 nv_wr32(dev, 0x104004, stat);
189 }
190 nv50_fb_vm_trap(dev, 1);
191}
192
193static void
194nva3_copy_destroy(struct drm_device *dev, int engine)
195{
196 struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
197
198 nouveau_irq_unregister(dev, 22);
199
200 NVOBJ_ENGINE_DEL(dev, COPY0);
201 kfree(pcopy);
202}
203
204int
205nva3_copy_create(struct drm_device *dev)
206{
207 struct nva3_copy_engine *pcopy;
208
209 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
210 if (!pcopy)
211 return -ENOMEM;
212
213 pcopy->base.destroy = nva3_copy_destroy;
214 pcopy->base.init = nva3_copy_init;
215 pcopy->base.fini = nva3_copy_fini;
216 pcopy->base.context_new = nva3_copy_context_new;
217 pcopy->base.context_del = nva3_copy_context_del;
218 pcopy->base.object_new = nva3_copy_object_new;
219 pcopy->base.tlb_flush = nva3_copy_tlb_flush;
220
221 nouveau_irq_register(dev, 22, nva3_copy_isr);
222
223 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
224 NVOBJ_CLASS(dev, 0x85b5, COPY0);
225 return 0;
226}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
new file mode 100644
index 000000000000..eaf35f8321ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -0,0 +1,870 @@
1/* fuc microcode for copy engine on nva3- chipsets
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build for nva3:nvc0
27 * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
28 *
29 * To build for nvc0-
30 * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
31 */
32
33ifdef(`NVA3',
34.section nva3_pcopy_data,
35.section nvc0_pcopy_data
36)
37
38ctx_object: .b32 0
39ifdef(`NVA3',
40ctx_dma:
41ctx_dma_query: .b32 0
42ctx_dma_src: .b32 0
43ctx_dma_dst: .b32 0
44,)
45.equ ctx_dma_count 3
46ctx_query_address_high: .b32 0
47ctx_query_address_low: .b32 0
48ctx_query_counter: .b32 0
49ctx_src_address_high: .b32 0
50ctx_src_address_low: .b32 0
51ctx_src_pitch: .b32 0
52ctx_src_tile_mode: .b32 0
53ctx_src_xsize: .b32 0
54ctx_src_ysize: .b32 0
55ctx_src_zsize: .b32 0
56ctx_src_zoff: .b32 0
57ctx_src_xoff: .b32 0
58ctx_src_yoff: .b32 0
59ctx_src_cpp: .b32 0
60ctx_dst_address_high: .b32 0
61ctx_dst_address_low: .b32 0
62ctx_dst_pitch: .b32 0
63ctx_dst_tile_mode: .b32 0
64ctx_dst_xsize: .b32 0
65ctx_dst_ysize: .b32 0
66ctx_dst_zsize: .b32 0
67ctx_dst_zoff: .b32 0
68ctx_dst_xoff: .b32 0
69ctx_dst_yoff: .b32 0
70ctx_dst_cpp: .b32 0
71ctx_format: .b32 0
72ctx_swz_const0: .b32 0
73ctx_swz_const1: .b32 0
74ctx_xcnt: .b32 0
75ctx_ycnt: .b32 0
76.align 256
77
78dispatch_table:
79// mthd 0x0000, NAME
80.b16 0x000 1
81.b32 ctx_object ~0xffffffff
82// mthd 0x0100, NOP
83.b16 0x040 1
84.b32 0x00010000 + cmd_nop ~0xffffffff
85// mthd 0x0140, PM_TRIGGER
86.b16 0x050 1
87.b32 0x00010000 + cmd_pm_trigger ~0xffffffff
88ifdef(`NVA3', `
89// mthd 0x0180-0x018c, DMA_
90.b16 0x060 ctx_dma_count
91dispatch_dma:
92.b32 0x00010000 + cmd_dma ~0xffffffff
93.b32 0x00010000 + cmd_dma ~0xffffffff
94.b32 0x00010000 + cmd_dma ~0xffffffff
95',)
96// mthd 0x0200-0x0218, SRC_TILE
97.b16 0x80 7
98.b32 ctx_src_tile_mode ~0x00000fff
99.b32 ctx_src_xsize ~0x0007ffff
100.b32 ctx_src_ysize ~0x00001fff
101.b32 ctx_src_zsize ~0x000007ff
102.b32 ctx_src_zoff ~0x00000fff
103.b32 ctx_src_xoff ~0x0007ffff
104.b32 ctx_src_yoff ~0x00001fff
105// mthd 0x0220-0x0238, DST_TILE
106.b16 0x88 7
107.b32 ctx_dst_tile_mode ~0x00000fff
108.b32 ctx_dst_xsize ~0x0007ffff
109.b32 ctx_dst_ysize ~0x00001fff
110.b32 ctx_dst_zsize ~0x000007ff
111.b32 ctx_dst_zoff ~0x00000fff
112.b32 ctx_dst_xoff ~0x0007ffff
113.b32 ctx_dst_yoff ~0x00001fff
114// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
115.b16 0xc0 2
116.b32 0x00010000 + cmd_exec ~0xffffffff
117.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
118// mthd 0x030c-0x0340, various stuff
119.b16 0xc3 14
120.b32 ctx_src_address_high ~0x000000ff
121.b32 ctx_src_address_low ~0xfffffff0
122.b32 ctx_dst_address_high ~0x000000ff
123.b32 ctx_dst_address_low ~0xfffffff0
124.b32 ctx_src_pitch ~0x0007ffff
125.b32 ctx_dst_pitch ~0x0007ffff
126.b32 ctx_xcnt ~0x0000ffff
127.b32 ctx_ycnt ~0x00001fff
128.b32 ctx_format ~0x0333ffff
129.b32 ctx_swz_const0 ~0xffffffff
130.b32 ctx_swz_const1 ~0xffffffff
131.b32 ctx_query_address_high ~0x000000ff
132.b32 ctx_query_address_low ~0xffffffff
133.b32 ctx_query_counter ~0xffffffff
134.b16 0x800 0
135
136ifdef(`NVA3',
137.section nva3_pcopy_code,
138.section nvc0_pcopy_code
139)
140
141main:
142 clear b32 $r0
143 mov $sp $r0
144
145 // setup i0 handler and route fifo and ctxswitch to it
146 mov $r1 ih
147 mov $iv0 $r1
148 mov $r1 0x400
149 movw $r2 0xfff3
150 sethi $r2 0
151 iowr I[$r2 + 0x300] $r2
152
153 // enable interrupts
154 or $r2 0xc
155 iowr I[$r1] $r2
156 bset $flags ie0
157
158 // enable fifo access and context switching
159 mov $r1 0x1200
160 mov $r2 3
161 iowr I[$r1] $r2
162
163 // sleep forever, waking for interrupts
164 bset $flags $p0
165 spin:
166 sleep $p0
167 bra spin
168
169// i0 handler
170ih:
171 iord $r1 I[$r0 + 0x200]
172
173 and $r2 $r1 0x00000008
174 bra e ih_no_chsw
175 call chsw
176 ih_no_chsw:
177 and $r2 $r1 0x00000004
178 bra e ih_no_cmd
179 call dispatch
180
181 ih_no_cmd:
182 and $r1 $r1 0x0000000c
183 iowr I[$r0 + 0x100] $r1
184 iret
185
186// $p1 direction (0 = unload, 1 = load)
187// $r3 channel
188swctx:
189 mov $r4 0x7700
190 mov $xtargets $r4
191ifdef(`NVA3', `
192 // target 7 hardcoded to ctx dma object
193 mov $xdbase $r0
194', ` // NVC0
195 // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
196 mov $r4 0x2100
197 iord $r4 I[$r4 + 0]
198 and $r4 1
199 shl b32 $r4 4
200 add b32 $r4 0x30
201
202 // channel is in vram
203 mov $r15 0x61c
204 shl b32 $r15 6
205 mov $r5 0x114
206 iowrs I[$r15] $r5
207
208 // read 16-byte PCOPYn info, containing context pointer, from channel
209 shl b32 $r5 $r3 4
210 add b32 $r5 2
211 mov $xdbase $r5
212 mov $r5 $sp
213 // get a chunk of stack space, aligned to 256 byte boundary
214 sub b32 $r5 0x100
215 mov $r6 0xff
216 not b32 $r6
217 and $r5 $r6
218 sethi $r5 0x00020000
219 xdld $r4 $r5
220 xdwait
221 sethi $r5 0
222
223 // set context pointer, from within channel VM
224 mov $r14 0
225 iowrs I[$r15] $r14
226 ld b32 $r4 D[$r5 + 0]
227 shr b32 $r4 8
228 ld b32 $r6 D[$r5 + 4]
229 shl b32 $r6 24
230 or $r4 $r6
231 mov $xdbase $r4
232')
233 // 256-byte context, at start of data segment
234 mov b32 $r4 $r0
235 sethi $r4 0x60000
236
237 // swap!
238 bra $p1 swctx_load
239 xdst $r0 $r4
240 bra swctx_done
241 swctx_load:
242 xdld $r0 $r4
243 swctx_done:
244 xdwait
245 ret
246
247chsw:
248 // read current channel
249 mov $r2 0x1400
250 iord $r3 I[$r2]
251
252 // if it's active, unload it and return
253 xbit $r15 $r3 0x1e
254 bra e chsw_no_unload
255 bclr $flags $p1
256 call swctx
257 bclr $r3 0x1e
258 iowr I[$r2] $r3
259 mov $r4 1
260 iowr I[$r2 + 0x200] $r4
261 ret
262
263 // read next channel
264 chsw_no_unload:
265 iord $r3 I[$r2 + 0x100]
266
267 // is there a channel waiting to be loaded?
268 xbit $r13 $r3 0x1e
269 bra e chsw_finish_load
270 bset $flags $p1
271 call swctx
272ifdef(`NVA3',
273 // load dma objects back into TARGET regs
274 mov $r5 ctx_dma
275 mov $r6 ctx_dma_count
276 chsw_load_ctx_dma:
277 ld b32 $r7 D[$r5 + $r6 * 4]
278 add b32 $r8 $r6 0x180
279 shl b32 $r8 8
280 iowr I[$r8] $r7
281 sub b32 $r6 1
282 bra nc chsw_load_ctx_dma
283,)
284
285 chsw_finish_load:
286 mov $r3 2
287 iowr I[$r2 + 0x200] $r3
288 ret
289
290dispatch:
291 // read incoming fifo command
292 mov $r3 0x1900
293 iord $r2 I[$r3 + 0x100]
294 iord $r3 I[$r3 + 0x000]
295 and $r4 $r2 0x7ff
296 // $r2 will be used to store exception data
297 shl b32 $r2 0x10
298
299 // lookup method in the dispatch table, ILLEGAL_MTHD if not found
300 mov $r5 dispatch_table
301 clear b32 $r6
302 clear b32 $r7
303 dispatch_loop:
304 ld b16 $r6 D[$r5 + 0]
305 ld b16 $r7 D[$r5 + 2]
306 add b32 $r5 4
307 cmpu b32 $r4 $r6
308 bra c dispatch_illegal_mthd
309 add b32 $r7 $r6
310 cmpu b32 $r4 $r7
311 bra c dispatch_valid_mthd
312 sub b32 $r7 $r6
313 shl b32 $r7 3
314 add b32 $r5 $r7
315 bra dispatch_loop
316
317 // ensure no bits set in reserved fields, INVALID_BITFIELD
318 dispatch_valid_mthd:
319 sub b32 $r4 $r6
320 shl b32 $r4 3
321 add b32 $r4 $r5
322 ld b32 $r5 D[$r4 + 4]
323 and $r5 $r3
324 cmpu b32 $r5 0
325 bra ne dispatch_invalid_bitfield
326
327 // depending on dispatch flags: execute method, or save data as state
328 ld b16 $r5 D[$r4 + 0]
329 ld b16 $r6 D[$r4 + 2]
330 cmpu b32 $r6 0
331 bra ne dispatch_cmd
332 st b32 D[$r5] $r3
333 bra dispatch_done
334 dispatch_cmd:
335 bclr $flags $p1
336 call $r5
337 bra $p1 dispatch_error
338 bra dispatch_done
339
340 dispatch_invalid_bitfield:
341 or $r2 2
342 dispatch_illegal_mthd:
343 or $r2 1
344
345 // store exception data in SCRATCH0/SCRATCH1, signal hostirq
346 dispatch_error:
347 mov $r4 0x1000
348 iowr I[$r4 + 0x000] $r2
349 iowr I[$r4 + 0x100] $r3
350 mov $r2 0x40
351 iowr I[$r0] $r2
352 hostirq_wait:
353 iord $r2 I[$r0 + 0x200]
354 and $r2 0x40
355 cmpu b32 $r2 0
356 bra ne hostirq_wait
357
358 dispatch_done:
359 mov $r2 0x1d00
360 mov $r3 1
361 iowr I[$r2] $r3
362 ret
363
364// No-operation
365//
366// Inputs:
367// $r1: irqh state
368// $r2: hostirq state
369// $r3: data
370// $r4: dispatch table entry
371// Outputs:
372// $r1: irqh state
373// $p1: set on error
374// $r2: hostirq state
375// $r3: data
376cmd_nop:
377 ret
378
379// PM_TRIGGER
380//
381// Inputs:
382// $r1: irqh state
383// $r2: hostirq state
384// $r3: data
385// $r4: dispatch table entry
386// Outputs:
387// $r1: irqh state
388// $p1: set on error
389// $r2: hostirq state
390// $r3: data
391cmd_pm_trigger:
392 mov $r2 0x2200
393 clear b32 $r3
394 sethi $r3 0x20000
395 iowr I[$r2] $r3
396 ret
397
398ifdef(`NVA3',
399// SET_DMA_* method handler
400//
401// Inputs:
402// $r1: irqh state
403// $r2: hostirq state
404// $r3: data
405// $r4: dispatch table entry
406// Outputs:
407// $r1: irqh state
408// $p1: set on error
409// $r2: hostirq state
410// $r3: data
411cmd_dma:
412 sub b32 $r4 dispatch_dma
413 shr b32 $r4 1
414 bset $r3 0x1e
415 st b32 D[$r4 + ctx_dma] $r3
416 add b32 $r4 0x600
417 shl b32 $r4 6
418 iowr I[$r4] $r3
419 ret
420,)
421
422// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
423//
424cmd_exec_set_format:
425 // zero out a chunk of the stack to store the swizzle into
426 add $sp -0x10
427 st b32 D[$sp + 0x00] $r0
428 st b32 D[$sp + 0x04] $r0
429 st b32 D[$sp + 0x08] $r0
430 st b32 D[$sp + 0x0c] $r0
431
432 // extract cpp, src_ncomp and dst_ncomp from FORMAT
433 ld b32 $r4 D[$r0 + ctx_format]
434 extr $r5 $r4 16:17
435 add b32 $r5 1
436 extr $r6 $r4 20:21
437 add b32 $r6 1
438 extr $r7 $r4 24:25
439 add b32 $r7 1
440
441 // convert FORMAT swizzle mask to hw swizzle mask
442 bclr $flags $p2
443 clear b32 $r8
444 clear b32 $r9
445 ncomp_loop:
446 and $r10 $r4 0xf
447 shr b32 $r4 4
448 clear b32 $r11
449 bpc_loop:
450 cmpu b8 $r10 4
451 bra nc cmp_c0
452 mulu $r12 $r10 $r5
453 add b32 $r12 $r11
454 bset $flags $p2
455 bra bpc_next
456 cmp_c0:
457 bra ne cmp_c1
458 mov $r12 0x10
459 add b32 $r12 $r11
460 bra bpc_next
461 cmp_c1:
462 cmpu b8 $r10 6
463 bra nc cmp_zero
464 mov $r12 0x14
465 add b32 $r12 $r11
466 bra bpc_next
467 cmp_zero:
468 mov $r12 0x80
469 bpc_next:
470 st b8 D[$sp + $r8] $r12
471 add b32 $r8 1
472 add b32 $r11 1
473 cmpu b32 $r11 $r5
474 bra c bpc_loop
475 add b32 $r9 1
476 cmpu b32 $r9 $r7
477 bra c ncomp_loop
478
479 // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
480 mulu $r6 $r5
481 st b32 D[$r0 + ctx_src_cpp] $r6
482 ld b32 $r8 D[$r0 + ctx_xcnt]
483 mulu $r6 $r8
484 bra $p2 dst_xcnt
485 clear b32 $r6
486
487 dst_xcnt:
488 mulu $r7 $r5
489 st b32 D[$r0 + ctx_dst_cpp] $r7
490 mulu $r7 $r8
491
492 mov $r5 0x810
493 shl b32 $r5 6
494 iowr I[$r5 + 0x000] $r6
495 iowr I[$r5 + 0x100] $r7
496 add b32 $r5 0x800
497 ld b32 $r6 D[$r0 + ctx_dst_cpp]
498 sub b32 $r6 1
499 shl b32 $r6 8
500 ld b32 $r7 D[$r0 + ctx_src_cpp]
501 sub b32 $r7 1
502 or $r6 $r7
503 iowr I[$r5 + 0x000] $r6
504 add b32 $r5 0x100
505 ld b32 $r6 D[$sp + 0x00]
506 iowr I[$r5 + 0x000] $r6
507 ld b32 $r6 D[$sp + 0x04]
508 iowr I[$r5 + 0x100] $r6
509 ld b32 $r6 D[$sp + 0x08]
510 iowr I[$r5 + 0x200] $r6
511 ld b32 $r6 D[$sp + 0x0c]
512 iowr I[$r5 + 0x300] $r6
513 add b32 $r5 0x400
514 ld b32 $r6 D[$r0 + ctx_swz_const0]
515 iowr I[$r5 + 0x000] $r6
516 ld b32 $r6 D[$r0 + ctx_swz_const1]
517 iowr I[$r5 + 0x100] $r6
518 add $sp 0x10
519 ret
520
521// Setup to handle a tiled surface
522//
523// Calculates a number of parameters the hardware requires in order
524// to correctly handle tiling.
525//
526// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE):
527// nTx = round_up(w * cpp, 1 << Tp) >> Tp
528// nTy = round_up(h, 1 << Th) >> Th
529// Txo = (x * cpp) & ((1 << Tp) - 1)
530// Tx = (x * cpp) >> Tp
531// Tyo = y & ((1 << Th) - 1)
532// Ty = y >> Th
533// Tzo = z & ((1 << Td) - 1)
534// Tz = z >> Td
535//
536// off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo
537// off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp;
538//
539// Inputs:
540// $r4: hw command (0x104800)
541// $r5: ctx offset adjustment for src/dst selection
542// $p2: set if dst surface
543//
544cmd_exec_set_surface_tiled:
545 // translate TILE_MODE into Tp, Th, Td shift values
546 ld b32 $r7 D[$r5 + ctx_src_tile_mode]
547 extr $r9 $r7 8:11
548 extr $r8 $r7 4:7
549ifdef(`NVA3',
550 add b32 $r8 2
551,
552 add b32 $r8 3
553)
554 extr $r7 $r7 0:3
555 cmp b32 $r7 0xe
556 bra ne xtile64
557 mov $r7 4
558 bra xtileok
559 xtile64:
560 xbit $r7 $flags $p2
561 add b32 $r7 17
562 bset $r4 $r7
563 mov $r7 6
564 xtileok:
565
566 // Op = (x * cpp) & ((1 << Tp) - 1)
567 // Tx = (x * cpp) >> Tp
568 ld b32 $r10 D[$r5 + ctx_src_xoff]
569 ld b32 $r11 D[$r5 + ctx_src_cpp]
570 mulu $r10 $r11
571 mov $r11 1
572 shl b32 $r11 $r7
573 sub b32 $r11 1
574 and $r12 $r10 $r11
575 shr b32 $r10 $r7
576
577 // Tyo = y & ((1 << Th) - 1)
578 // Ty = y >> Th
579 ld b32 $r13 D[$r5 + ctx_src_yoff]
580 mov $r14 1
581 shl b32 $r14 $r8
582 sub b32 $r14 1
583 and $r11 $r13 $r14
584 shr b32 $r13 $r8
585
586 // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo)
587 add b32 $r14 1
588 shl b32 $r15 $r14 12
589 sub b32 $r14 $r11
590 or $r15 $r14
591 xbit $r6 $flags $p2
592 add b32 $r6 0x208
593 shl b32 $r6 8
594 iowr I[$r6 + 0x000] $r15
595
596 // Op += Tyo << Tp
597 shl b32 $r11 $r7
598 add b32 $r12 $r11
599
600 // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
601 ld b32 $r15 D[$r5 + ctx_src_xsize]
602 ld b32 $r11 D[$r5 + ctx_src_cpp]
603 mulu $r15 $r11
604 mov $r11 1
605 shl b32 $r11 $r7
606 sub b32 $r11 1
607 add b32 $r15 $r11
608 shr b32 $r15 $r7
609 push $r15
610
611 // nTy = (h + ((1 << Th) - 1)) >> Th
612 ld b32 $r15 D[$r5 + ctx_src_ysize]
613 mov $r11 1
614 shl b32 $r11 $r8
615 sub b32 $r11 1
616 add b32 $r15 $r11
617 shr b32 $r15 $r8
618 push $r15
619
620 // Tys = Tp + Th
621 // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td
622 add b32 $r7 $r8
623 sub b32 $r8 2
624 mov $r11 1
625 shl b32 $r11 $r8
626 shl b32 $r11 $r9
627
628 // Tzo = z & ((1 << Td) - 1)
629 // Tz = z >> Td
630 // Op += Tzo << Tys
631 // Ts = Tys + Td
632 ld b32 $r8 D[$r5 + ctx_src_zoff]
633 mov $r14 1
634 shl b32 $r14 $r9
635 sub b32 $r14 1
636 and $r15 $r8 $r14
637 shl b32 $r15 $r7
638 add b32 $r12 $r15
639 add b32 $r7 $r9
640 shr b32 $r8 $r9
641
642 // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts
643 pop $r15
644 pop $r9
645 mulu $r13 $r9
646 add b32 $r10 $r13
647 mulu $r8 $r9
648 mulu $r8 $r15
649 add b32 $r10 $r8
650 shl b32 $r10 $r7
651
652 // PITCH = (nTx - 1) << Ts
653 sub b32 $r9 1
654 shl b32 $r9 $r7
655 iowr I[$r6 + 0x200] $r9
656
657 // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
658 // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
659 ld b32 $r7 D[$r5 + ctx_src_address_low]
660 ld b32 $r8 D[$r5 + ctx_src_address_high]
661 add b32 $r10 $r12
662 add b32 $r7 $r10
663 adc b32 $r8 0
664 shl b32 $r8 16
665 or $r8 $r11
666 sub b32 $r6 0x600
667 iowr I[$r6 + 0x000] $r7
668 add b32 $r6 0x400
669 iowr I[$r6 + 0x000] $r8
670 ret
671
672// Setup to handle a linear surface
673//
674// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting
675//
676cmd_exec_set_surface_linear:
677 xbit $r6 $flags $p2
678 add b32 $r6 0x202
679 shl b32 $r6 8
680 ld b32 $r7 D[$r5 + ctx_src_address_low]
681 iowr I[$r6 + 0x000] $r7
682 add b32 $r6 0x400
683 ld b32 $r7 D[$r5 + ctx_src_address_high]
684 shl b32 $r7 16
685 iowr I[$r6 + 0x000] $r7
686 add b32 $r6 0x400
687 ld b32 $r7 D[$r5 + ctx_src_pitch]
688 iowr I[$r6 + 0x000] $r7
689 ret
690
691// wait for regs to be available for use
692cmd_exec_wait:
693 push $r0
694 push $r1
695 mov $r0 0x800
696 shl b32 $r0 6
697 loop:
698 iord $r1 I[$r0]
699 and $r1 1
700 bra ne loop
701 pop $r1
702 pop $r0
703 ret
704
705cmd_exec_query:
706 // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
707 xbit $r4 $r3 13
708 bra ne query_counter
709 call cmd_exec_wait
710 mov $r4 0x80c
711 shl b32 $r4 6
712 ld b32 $r5 D[$r0 + ctx_query_address_low]
713 add b32 $r5 4
714 iowr I[$r4 + 0x000] $r5
715 iowr I[$r4 + 0x100] $r0
716 mov $r5 0xc
717 iowr I[$r4 + 0x200] $r5
718 add b32 $r4 0x400
719 ld b32 $r5 D[$r0 + ctx_query_address_high]
720 shl b32 $r5 16
721 iowr I[$r4 + 0x000] $r5
722 add b32 $r4 0x500
723 mov $r5 0x00000b00
724 sethi $r5 0x00010000
725 iowr I[$r4 + 0x000] $r5
726 mov $r5 0x00004040
727 shl b32 $r5 1
728 sethi $r5 0x80800000
729 iowr I[$r4 + 0x100] $r5
730 mov $r5 0x00001110
731 sethi $r5 0x13120000
732 iowr I[$r4 + 0x200] $r5
733 mov $r5 0x00001514
734 sethi $r5 0x17160000
735 iowr I[$r4 + 0x300] $r5
736 mov $r5 0x00002601
737 sethi $r5 0x00010000
738 mov $r4 0x800
739 shl b32 $r4 6
740 iowr I[$r4 + 0x000] $r5
741
742 // write COUNTER
743 query_counter:
744 call cmd_exec_wait
745 mov $r4 0x80c
746 shl b32 $r4 6
747 ld b32 $r5 D[$r0 + ctx_query_address_low]
748 iowr I[$r4 + 0x000] $r5
749 iowr I[$r4 + 0x100] $r0
750 mov $r5 0x4
751 iowr I[$r4 + 0x200] $r5
752 add b32 $r4 0x400
753 ld b32 $r5 D[$r0 + ctx_query_address_high]
754 shl b32 $r5 16
755 iowr I[$r4 + 0x000] $r5
756 add b32 $r4 0x500
757 mov $r5 0x00000300
758 iowr I[$r4 + 0x000] $r5
759 mov $r5 0x00001110
760 sethi $r5 0x13120000
761 iowr I[$r4 + 0x100] $r5
762 ld b32 $r5 D[$r0 + ctx_query_counter]
763 add b32 $r4 0x500
764 iowr I[$r4 + 0x000] $r5
765 mov $r5 0x00002601
766 sethi $r5 0x00010000
767 mov $r4 0x800
768 shl b32 $r4 6
769 iowr I[$r4 + 0x000] $r5
770 ret
771
772// Execute a copy operation
773//
774// Inputs:
775// $r1: irqh state
776// $r2: hostirq state
777// $r3: data
778// 000002000 QUERY_SHORT
779// 000001000 QUERY
780// 000000100 DST_LINEAR
781// 000000010 SRC_LINEAR
782// 000000001 FORMAT
783// $r4: dispatch table entry
784// Outputs:
785// $r1: irqh state
786// $p1: set on error
787// $r2: hostirq state
788// $r3: data
789cmd_exec:
790 call cmd_exec_wait
791
792 // if format requested, call function to calculate it, otherwise
793 // fill in cpp/xcnt for both surfaces as if (cpp == 1)
794 xbit $r15 $r3 0
795 bra e cmd_exec_no_format
796 call cmd_exec_set_format
797 mov $r4 0x200
798 bra cmd_exec_init_src_surface
799 cmd_exec_no_format:
800 mov $r6 0x810
801 shl b32 $r6 6
802 mov $r7 1
803 st b32 D[$r0 + ctx_src_cpp] $r7
804 st b32 D[$r0 + ctx_dst_cpp] $r7
805 ld b32 $r7 D[$r0 + ctx_xcnt]
806 iowr I[$r6 + 0x000] $r7
807 iowr I[$r6 + 0x100] $r7
808 clear b32 $r4
809
810 cmd_exec_init_src_surface:
811 bclr $flags $p2
812 clear b32 $r5
813 xbit $r15 $r3 4
814 bra e src_tiled
815 call cmd_exec_set_surface_linear
816 bra cmd_exec_init_dst_surface
817 src_tiled:
818 call cmd_exec_set_surface_tiled
819 bset $r4 7
820
821 cmd_exec_init_dst_surface:
822 bset $flags $p2
823 mov $r5 ctx_dst_address_high - ctx_src_address_high
824 xbit $r15 $r3 8
825 bra e dst_tiled
826 call cmd_exec_set_surface_linear
827 bra cmd_exec_kick
828 dst_tiled:
829 call cmd_exec_set_surface_tiled
830 bset $r4 8
831
832 cmd_exec_kick:
833 mov $r5 0x800
834 shl b32 $r5 6
835 ld b32 $r6 D[$r0 + ctx_ycnt]
836 iowr I[$r5 + 0x100] $r6
837 mov $r6 0x0041
838 // SRC_TARGET = 1, DST_TARGET = 2
839 sethi $r6 0x44000000
840 or $r4 $r6
841 iowr I[$r5] $r4
842
843 // if requested, queue up a QUERY write after the copy has completed
844 xbit $r15 $r3 12
845 bra e cmd_exec_done
846 call cmd_exec_query
847
848 cmd_exec_done:
849 ret
850
851// Flush write cache
852//
853// Inputs:
854// $r1: irqh state
855// $r2: hostirq state
856// $r3: data
857// $r4: dispatch table entry
858// Outputs:
859// $r1: irqh state
860// $p1: set on error
861// $r2: hostirq state
862// $r3: data
863cmd_wrcache_flush:
864 mov $r2 0x2200
865 clear b32 $r3
866 sethi $r3 0x10000
867 iowr I[$r2] $r3
868 ret
869
870.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
new file mode 100644
index 000000000000..2731de22ebe9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -0,0 +1,534 @@
1uint32_t nva3_pcopy_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00010000,
67 0x00000000,
68 0x00000000,
69 0x00010040,
70 0x00010160,
71 0x00000000,
72 0x00010050,
73 0x00010162,
74 0x00000000,
75 0x00030060,
76 0x00010170,
77 0x00000000,
78 0x00010170,
79 0x00000000,
80 0x00010170,
81 0x00000000,
82 0x00070080,
83 0x00000028,
84 0xfffff000,
85 0x0000002c,
86 0xfff80000,
87 0x00000030,
88 0xffffe000,
89 0x00000034,
90 0xfffff800,
91 0x00000038,
92 0xfffff000,
93 0x0000003c,
94 0xfff80000,
95 0x00000040,
96 0xffffe000,
97 0x00070088,
98 0x00000054,
99 0xfffff000,
100 0x00000058,
101 0xfff80000,
102 0x0000005c,
103 0xffffe000,
104 0x00000060,
105 0xfffff800,
106 0x00000064,
107 0xfffff000,
108 0x00000068,
109 0xfff80000,
110 0x0000006c,
111 0xffffe000,
112 0x000200c0,
113 0x00010492,
114 0x00000000,
115 0x0001051b,
116 0x00000000,
117 0x000e00c3,
118 0x0000001c,
119 0xffffff00,
120 0x00000020,
121 0x0000000f,
122 0x00000048,
123 0xffffff00,
124 0x0000004c,
125 0x0000000f,
126 0x00000024,
127 0xfff80000,
128 0x00000050,
129 0xfff80000,
130 0x00000080,
131 0xffff0000,
132 0x00000084,
133 0xffffe000,
134 0x00000074,
135 0xfccc0000,
136 0x00000078,
137 0x00000000,
138 0x0000007c,
139 0x00000000,
140 0x00000010,
141 0xffffff00,
142 0x00000014,
143 0x00000000,
144 0x00000018,
145 0x00000000,
146 0x00000800,
147};
148
149uint32_t nva3_pcopy_code[] = {
150 0x04fe04bd,
151 0x3517f000,
152 0xf10010fe,
153 0xf1040017,
154 0xf0fff327,
155 0x22d00023,
156 0x0c25f0c0,
157 0xf40012d0,
158 0x17f11031,
159 0x27f01200,
160 0x0012d003,
161 0xf40031f4,
162 0x0ef40028,
163 0x8001cffd,
164 0xf40812c4,
165 0x21f4060b,
166 0x0412c472,
167 0xf4060bf4,
168 0x11c4c321,
169 0x4001d00c,
170 0x47f101f8,
171 0x4bfe7700,
172 0x0007fe00,
173 0xf00204b9,
174 0x01f40643,
175 0x0604fa09,
176 0xfa060ef4,
177 0x03f80504,
178 0x27f100f8,
179 0x23cf1400,
180 0x1e3fc800,
181 0xf4170bf4,
182 0x21f40132,
183 0x1e3af052,
184 0xf00023d0,
185 0x24d00147,
186 0xcf00f880,
187 0x3dc84023,
188 0x220bf41e,
189 0xf40131f4,
190 0x57f05221,
191 0x0367f004,
192 0xa07856bc,
193 0xb6018068,
194 0x87d00884,
195 0x0162b600,
196 0xf0f018f4,
197 0x23d00237,
198 0xf100f880,
199 0xcf190037,
200 0x33cf4032,
201 0xff24e400,
202 0x1024b607,
203 0x010057f1,
204 0x74bd64bd,
205 0x58005658,
206 0x50b60157,
207 0x0446b804,
208 0xbb4d08f4,
209 0x47b80076,
210 0x0f08f404,
211 0xb60276bb,
212 0x57bb0374,
213 0xdf0ef400,
214 0xb60246bb,
215 0x45bb0344,
216 0x01459800,
217 0xb00453fd,
218 0x1bf40054,
219 0x00455820,
220 0xb0014658,
221 0x1bf40064,
222 0x00538009,
223 0xf4300ef4,
224 0x55f90132,
225 0xf40c01f4,
226 0x25f0250e,
227 0x0125f002,
228 0x100047f1,
229 0xd00042d0,
230 0x27f04043,
231 0x0002d040,
232 0xf08002cf,
233 0x24b04024,
234 0xf71bf400,
235 0x1d0027f1,
236 0xd00137f0,
237 0x00f80023,
238 0x27f100f8,
239 0x34bd2200,
240 0xd00233f0,
241 0x00f80023,
242 0x012842b7,
243 0xf00145b6,
244 0x43801e39,
245 0x0040b701,
246 0x0644b606,
247 0xf80043d0,
248 0xf030f400,
249 0xb00001b0,
250 0x01b00101,
251 0x0301b002,
252 0xc71d0498,
253 0x50b63045,
254 0x3446c701,
255 0xc70160b6,
256 0x70b63847,
257 0x0232f401,
258 0x94bd84bd,
259 0xb60f4ac4,
260 0xb4bd0445,
261 0xf404a430,
262 0xa5ff0f18,
263 0x00cbbbc0,
264 0xf40231f4,
265 0x1bf4220e,
266 0x10c7f00c,
267 0xf400cbbb,
268 0xa430160e,
269 0x0c18f406,
270 0xbb14c7f0,
271 0x0ef400cb,
272 0x80c7f107,
273 0x01c83800,
274 0xb60180b6,
275 0xb5b801b0,
276 0xc308f404,
277 0xb80190b6,
278 0x08f40497,
279 0x0065fdb2,
280 0x98110680,
281 0x68fd2008,
282 0x0502f400,
283 0x75fd64bd,
284 0x1c078000,
285 0xf10078fd,
286 0xb6081057,
287 0x56d00654,
288 0x4057d000,
289 0x080050b7,
290 0xb61c0698,
291 0x64b60162,
292 0x11079808,
293 0xfd0172b6,
294 0x56d00567,
295 0x0050b700,
296 0x0060b401,
297 0xb40056d0,
298 0x56d00160,
299 0x0260b440,
300 0xb48056d0,
301 0x56d00360,
302 0x0050b7c0,
303 0x1e069804,
304 0x980056d0,
305 0x56d01f06,
306 0x1030f440,
307 0x579800f8,
308 0x6879c70a,
309 0xb66478c7,
310 0x77c70280,
311 0x0e76b060,
312 0xf0091bf4,
313 0x0ef40477,
314 0x027cf00f,
315 0xfd1170b6,
316 0x77f00947,
317 0x0f5a9806,
318 0xfd115b98,
319 0xb7f000ab,
320 0x04b7bb01,
321 0xff01b2b6,
322 0xa7bbc4ab,
323 0x105d9805,
324 0xbb01e7f0,
325 0xe2b604e8,
326 0xb4deff01,
327 0xb605d8bb,
328 0xef9401e0,
329 0x02ebbb0c,
330 0xf005fefd,
331 0x60b7026c,
332 0x64b60208,
333 0x006fd008,
334 0xbb04b7bb,
335 0x5f9800cb,
336 0x115b980b,
337 0xf000fbfd,
338 0xb7bb01b7,
339 0x01b2b604,
340 0xbb00fbbb,
341 0xf0f905f7,
342 0xf00c5f98,
343 0xb8bb01b7,
344 0x01b2b604,
345 0xbb00fbbb,
346 0xf0f905f8,
347 0xb60078bb,
348 0xb7f00282,
349 0x04b8bb01,
350 0x9804b9bb,
351 0xe7f00e58,
352 0x04e9bb01,
353 0xff01e2b6,
354 0xf7bbf48e,
355 0x00cfbb04,
356 0xbb0079bb,
357 0xf0fc0589,
358 0xd9fd90fc,
359 0x00adbb00,
360 0xfd0089fd,
361 0xa8bb008f,
362 0x04a7bb00,
363 0xbb0192b6,
364 0x69d00497,
365 0x08579880,
366 0xbb075898,
367 0x7abb00ac,
368 0x0081b600,
369 0xfd1084b6,
370 0x62b7058b,
371 0x67d00600,
372 0x0060b700,
373 0x0068d004,
374 0x6cf000f8,
375 0x0260b702,
376 0x0864b602,
377 0xd0085798,
378 0x60b70067,
379 0x57980400,
380 0x1074b607,
381 0xb70067d0,
382 0x98040060,
383 0x67d00957,
384 0xf900f800,
385 0xf110f900,
386 0xb6080007,
387 0x01cf0604,
388 0x0114f000,
389 0xfcfa1bf4,
390 0xf800fc10,
391 0x0d34c800,
392 0xf5701bf4,
393 0xf103ab21,
394 0xb6080c47,
395 0x05980644,
396 0x0450b605,
397 0xd00045d0,
398 0x57f04040,
399 0x8045d00c,
400 0x040040b7,
401 0xb6040598,
402 0x45d01054,
403 0x0040b700,
404 0x0057f105,
405 0x0153f00b,
406 0xf10045d0,
407 0xb6404057,
408 0x53f10154,
409 0x45d08080,
410 0x1057f140,
411 0x1253f111,
412 0x8045d013,
413 0x151457f1,
414 0x171653f1,
415 0xf1c045d0,
416 0xf0260157,
417 0x47f10153,
418 0x44b60800,
419 0x0045d006,
420 0x03ab21f5,
421 0x080c47f1,
422 0x980644b6,
423 0x45d00505,
424 0x4040d000,
425 0xd00457f0,
426 0x40b78045,
427 0x05980400,
428 0x1054b604,
429 0xb70045d0,
430 0xf1050040,
431 0xd0030057,
432 0x57f10045,
433 0x53f11110,
434 0x45d01312,
435 0x06059840,
436 0x050040b7,
437 0xf10045d0,
438 0xf0260157,
439 0x47f10153,
440 0x44b60800,
441 0x0045d006,
442 0x21f500f8,
443 0x3fc803ab,
444 0x0e0bf400,
445 0x018921f5,
446 0x020047f1,
447 0xf11e0ef4,
448 0xb6081067,
449 0x77f00664,
450 0x11078001,
451 0x981c0780,
452 0x67d02007,
453 0x4067d000,
454 0x32f444bd,
455 0xc854bd02,
456 0x0bf4043f,
457 0x8221f50a,
458 0x0a0ef403,
459 0x027621f5,
460 0xf40749f0,
461 0x57f00231,
462 0x083fc82c,
463 0xf50a0bf4,
464 0xf4038221,
465 0x21f50a0e,
466 0x49f00276,
467 0x0057f108,
468 0x0654b608,
469 0xd0210698,
470 0x67f04056,
471 0x0063f141,
472 0x0546fd44,
473 0xc80054d0,
474 0x0bf40c3f,
475 0xc521f507,
476 0xf100f803,
477 0xbd220027,
478 0x0133f034,
479 0xf80023d0,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534};
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index dbbafed36406..e4b2b9e934b2 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -27,32 +27,74 @@
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30/*XXX: boards using limits 0x40 need fixing, the register layout 30/* This is actually a lot more complex than it appears here, but hopefully
31 * is correct here, but, there's some other funny magic 31 * this should be able to deal with what the VBIOS leaves for us..
32 * that modifies things, so it's not likely we'll set/read 32 *
33 * the correct timings yet.. working on it... 33 * If not, well, I'll jump off that bridge when I come to it.
34 */ 34 */
35 35
36struct nva3_pm_state { 36struct nva3_pm_state {
37 struct pll_lims pll; 37 enum pll_types type;
38 int N, M, P; 38 u32 src0;
39 u32 src1;
40 u32 ctrl;
41 u32 coef;
42 u32 old_pnm;
43 u32 new_pnm;
44 u32 new_div;
39}; 45};
40 46
47static int
48nva3_pm_pll_offset(u32 id)
49{
50 static const u32 pll_map[] = {
51 0x00, PLL_CORE,
52 0x01, PLL_SHADER,
53 0x02, PLL_MEMORY,
54 0x00, 0x00
55 };
56 const u32 *map = pll_map;
57
58 while (map[1]) {
59 if (id == map[1])
60 return map[0];
61 map += 2;
62 }
63
64 return -ENOENT;
65}
66
41int 67int
42nva3_pm_clock_get(struct drm_device *dev, u32 id) 68nva3_pm_clock_get(struct drm_device *dev, u32 id)
43{ 69{
70 u32 src0, src1, ctrl, coef;
44 struct pll_lims pll; 71 struct pll_lims pll;
45 int P, N, M, ret; 72 int ret, off;
46 u32 reg; 73 int P, N, M;
47 74
48 ret = get_pll_limits(dev, id, &pll); 75 ret = get_pll_limits(dev, id, &pll);
49 if (ret) 76 if (ret)
50 return ret; 77 return ret;
51 78
52 reg = nv_rd32(dev, pll.reg + 4); 79 off = nva3_pm_pll_offset(id);
53 P = (reg & 0x003f0000) >> 16; 80 if (off < 0)
54 N = (reg & 0x0000ff00) >> 8; 81 return off;
55 M = (reg & 0x000000ff); 82
83 src0 = nv_rd32(dev, 0x4120 + (off * 4));
84 src1 = nv_rd32(dev, 0x4160 + (off * 4));
85 ctrl = nv_rd32(dev, pll.reg + 0);
86 coef = nv_rd32(dev, pll.reg + 4);
87 NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
88 id, src0, src1, ctrl, coef);
89
90 if (ctrl & 0x00000008) {
91 u32 div = ((src1 & 0x003c0000) >> 18) + 1;
92 return (pll.refclk * 2) / div;
93 }
94
95 P = (coef & 0x003f0000) >> 16;
96 N = (coef & 0x0000ff00) >> 8;
97 M = (coef & 0x000000ff);
56 return pll.refclk * N / M / P; 98 return pll.refclk * N / M / P;
57} 99}
58 100
@@ -60,36 +102,103 @@ void *
60nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 102nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
61 u32 id, int khz) 103 u32 id, int khz)
62{ 104{
63 struct nva3_pm_state *state; 105 struct nva3_pm_state *pll;
64 int dummy, ret; 106 struct pll_lims limits;
107 int N, M, P, diff;
108 int ret, off;
109
110 ret = get_pll_limits(dev, id, &limits);
111 if (ret < 0)
112 return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
113
114 off = nva3_pm_pll_offset(id);
115 if (id < 0)
116 return ERR_PTR(-EINVAL);
65 117
66 state = kzalloc(sizeof(*state), GFP_KERNEL); 118
67 if (!state) 119 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
120 if (!pll)
68 return ERR_PTR(-ENOMEM); 121 return ERR_PTR(-ENOMEM);
122 pll->type = id;
123 pll->src0 = 0x004120 + (off * 4);
124 pll->src1 = 0x004160 + (off * 4);
125 pll->ctrl = limits.reg + 0;
126 pll->coef = limits.reg + 4;
69 127
70 ret = get_pll_limits(dev, id, &state->pll); 128 /* If target clock is within [-2, 3) MHz of a divisor, we'll
71 if (ret < 0) { 129 * use that instead of calculating MNP values
72 kfree(state); 130 */
73 return (ret == -ENOENT) ? NULL : ERR_PTR(ret); 131 pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16);
132 if (pll->new_div) {
133 diff = khz - ((limits.refclk * 2) / pll->new_div);
134 if (diff < -2000 || diff >= 3000)
135 pll->new_div = 0;
74 } 136 }
75 137
76 ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy, 138 if (!pll->new_div) {
77 &state->M, &state->P); 139 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
78 if (ret < 0) { 140 if (ret < 0)
79 kfree(state); 141 return ERR_PTR(ret);
80 return ERR_PTR(ret); 142
143 pll->new_pnm = (P << 16) | (N << 8) | M;
144 pll->new_div = 2 - 1;
145 } else {
146 pll->new_pnm = 0;
147 pll->new_div--;
81 } 148 }
82 149
83 return state; 150 if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101)
151 pll->old_pnm = nv_rd32(dev, pll->coef);
152 return pll;
84} 153}
85 154
86void 155void
87nva3_pm_clock_set(struct drm_device *dev, void *pre_state) 156nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
88{ 157{
89 struct nva3_pm_state *state = pre_state; 158 struct nva3_pm_state *pll = pre_state;
90 u32 reg = state->pll.reg; 159 u32 ctrl = 0;
160
161 /* For the memory clock, NVIDIA will build a "script" describing
162 * the reclocking process and ask PDAEMON to execute it.
163 */
164 if (pll->type == PLL_MEMORY) {
165 nv_wr32(dev, 0x100210, 0);
166 nv_wr32(dev, 0x1002dc, 1);
167 nv_wr32(dev, 0x004018, 0x00001000);
168 ctrl = 0x18000100;
169 }
170
171 if (pll->old_pnm || !pll->new_pnm) {
172 nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 |
173 (pll->new_div << 18));
174 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
175 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
176 }
177
178 if (pll->new_pnm) {
179 nv_mask(dev, pll->src0, 0x00000101, 0x00000101);
180 nv_wr32(dev, pll->coef, pll->new_pnm);
181 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl);
182 nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000);
183 nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010);
184 nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl);
185 nv_mask(dev, pll->src1, 0x00000100, 0x00000000);
186 nv_mask(dev, pll->src1, 0x00000001, 0x00000000);
187 if (pll->type == PLL_MEMORY)
188 nv_wr32(dev, 0x4018, 0x10005000);
189 } else {
190 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
191 nv_mask(dev, pll->src0, 0x00000100, 0x00000000);
192 nv_mask(dev, pll->src0, 0x00000001, 0x00000000);
193 if (pll->type == PLL_MEMORY)
194 nv_wr32(dev, 0x4018, 0x1000d000);
195 }
196
197 if (pll->type == PLL_MEMORY) {
198 nv_wr32(dev, 0x1002dc, 0);
199 nv_wr32(dev, 0x100210, 0x80000000);
200 }
91 201
92 nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M); 202 kfree(pll);
93 kfree(state);
94} 203}
95 204
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
new file mode 100644
index 000000000000..208fa7ab3f42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nvc0_copy.fuc.h"
32
33struct nvc0_copy_engine {
34 struct nouveau_exec_engine base;
35 u32 irq;
36 u32 pmc;
37 u32 fuc;
38 u32 ctx;
39};
40
41static int
42nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
43{
44 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *ramin = chan->ramin;
48 struct nouveau_gpuobj *ctx = NULL;
49 int ret;
50
51 ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret)
55 return ret;
56
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst));
59 dev_priv->engine.instmem.flush(dev);
60
61 chan->engctx[engine] = ctx;
62 return 0;
63}
64
65static int
66nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
67 u32 handle, u16 class)
68{
69 return 0;
70}
71
72static void
73nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
74{
75 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
76 struct nouveau_gpuobj *ctx = chan->engctx[engine];
77 struct drm_device *dev = chan->dev;
78 u32 inst;
79
80 inst = (chan->ramin->vinst >> 12);
81 inst |= 0x40000000;
82
83 /* disable fifo access */
84 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
85 /* mark channel as unloaded if it's currently active */
86 if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
87 nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
88 /* mark next channel as invalid if it's about to be loaded */
89 if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
90 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
91 /* restore fifo access */
92 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
93
94 nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
95 nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
96 nouveau_gpuobj_ref(NULL, &ctx);
97
98 chan->engctx[engine] = ctx;
99}
100
101static int
102nvc0_copy_init(struct drm_device *dev, int engine)
103{
104 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
105 int i;
106
107 nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
108 nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
109 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
110
111 nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
112 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
113 nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
114
115 nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
116 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
117 if ((i & 0x3f) == 0)
118 nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
119 nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
120 }
121
122 nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
123 nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
124 nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
125 nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
126 return 0;
127}
128
129static int
130nvc0_copy_fini(struct drm_device *dev, int engine)
131{
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
133
134 nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
135
136 /* trigger fuc context unload */
137 nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
138 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
139 nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
140 nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
141
142 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
143 return 0;
144}
145
146static struct nouveau_enum nvc0_copy_isr_error_name[] = {
147 { 0x0001, "ILLEGAL_MTHD" },
148 { 0x0002, "INVALID_ENUM" },
149 { 0x0003, "INVALID_BITFIELD" },
150 {}
151};
152
153static void
154nvc0_copy_isr(struct drm_device *dev, int engine)
155{
156 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
157 u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
158 u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
159 u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
160 u32 chid = nvc0_graph_isr_chid(dev, inst);
161 u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
162 u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
163 u32 mthd = (addr & 0x07ff) << 2;
164 u32 subc = (addr & 0x3800) >> 11;
165 u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
169 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
170 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, pcopy->fuc + 0x004, stat);
179 }
180}
181
182static void
183nvc0_copy_isr_0(struct drm_device *dev)
184{
185 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
186}
187
188static void
189nvc0_copy_isr_1(struct drm_device *dev)
190{
191 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
192}
193
194static void
195nvc0_copy_destroy(struct drm_device *dev, int engine)
196{
197 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
198
199 nouveau_irq_unregister(dev, pcopy->irq);
200
201 if (engine == NVOBJ_ENGINE_COPY0)
202 NVOBJ_ENGINE_DEL(dev, COPY0);
203 else
204 NVOBJ_ENGINE_DEL(dev, COPY1);
205 kfree(pcopy);
206}
207
208int
209nvc0_copy_create(struct drm_device *dev, int engine)
210{
211 struct nvc0_copy_engine *pcopy;
212
213 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
214 if (!pcopy)
215 return -ENOMEM;
216
217 pcopy->base.destroy = nvc0_copy_destroy;
218 pcopy->base.init = nvc0_copy_init;
219 pcopy->base.fini = nvc0_copy_fini;
220 pcopy->base.context_new = nvc0_copy_context_new;
221 pcopy->base.context_del = nvc0_copy_context_del;
222 pcopy->base.object_new = nvc0_copy_object_new;
223
224 if (engine == 0) {
225 pcopy->irq = 5;
226 pcopy->pmc = 0x00000040;
227 pcopy->fuc = 0x104000;
228 pcopy->ctx = 0x0230;
229 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
230 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
231 NVOBJ_CLASS(dev, 0x90b5, COPY0);
232 } else {
233 pcopy->irq = 6;
234 pcopy->pmc = 0x00000080;
235 pcopy->fuc = 0x105000;
236 pcopy->ctx = 0x0240;
237 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
238 NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
239 NVOBJ_CLASS(dev, 0x90b8, COPY1);
240 }
241
242 return 0;
243}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
new file mode 100644
index 000000000000..419903880e9d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -0,0 +1,527 @@
1uint32_t nvc0_pcopy_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00010000,
67 0x00000000,
68 0x00000000,
69 0x00010040,
70 0x0001019f,
71 0x00000000,
72 0x00010050,
73 0x000101a1,
74 0x00000000,
75 0x00070080,
76 0x0000001c,
77 0xfffff000,
78 0x00000020,
79 0xfff80000,
80 0x00000024,
81 0xffffe000,
82 0x00000028,
83 0xfffff800,
84 0x0000002c,
85 0xfffff000,
86 0x00000030,
87 0xfff80000,
88 0x00000034,
89 0xffffe000,
90 0x00070088,
91 0x00000048,
92 0xfffff000,
93 0x0000004c,
94 0xfff80000,
95 0x00000050,
96 0xffffe000,
97 0x00000054,
98 0xfffff800,
99 0x00000058,
100 0xfffff000,
101 0x0000005c,
102 0xfff80000,
103 0x00000060,
104 0xffffe000,
105 0x000200c0,
106 0x000104b8,
107 0x00000000,
108 0x00010541,
109 0x00000000,
110 0x000e00c3,
111 0x00000010,
112 0xffffff00,
113 0x00000014,
114 0x0000000f,
115 0x0000003c,
116 0xffffff00,
117 0x00000040,
118 0x0000000f,
119 0x00000018,
120 0xfff80000,
121 0x00000044,
122 0xfff80000,
123 0x00000074,
124 0xffff0000,
125 0x00000078,
126 0xffffe000,
127 0x00000068,
128 0xfccc0000,
129 0x0000006c,
130 0x00000000,
131 0x00000070,
132 0x00000000,
133 0x00000004,
134 0xffffff00,
135 0x00000008,
136 0x00000000,
137 0x0000000c,
138 0x00000000,
139 0x00000800,
140};
141
142uint32_t nvc0_pcopy_code[] = {
143 0x04fe04bd,
144 0x3517f000,
145 0xf10010fe,
146 0xf1040017,
147 0xf0fff327,
148 0x22d00023,
149 0x0c25f0c0,
150 0xf40012d0,
151 0x17f11031,
152 0x27f01200,
153 0x0012d003,
154 0xf40031f4,
155 0x0ef40028,
156 0x8001cffd,
157 0xf40812c4,
158 0x21f4060b,
159 0x0412c4ca,
160 0xf5070bf4,
161 0xc4010221,
162 0x01d00c11,
163 0xf101f840,
164 0xfe770047,
165 0x47f1004b,
166 0x44cf2100,
167 0x0144f000,
168 0xb60444b6,
169 0xf7f13040,
170 0xf4b6061c,
171 0x1457f106,
172 0x00f5d101,
173 0xb6043594,
174 0x57fe0250,
175 0x0145fe00,
176 0x010052b7,
177 0x00ff67f1,
178 0x56fd60bd,
179 0x0253f004,
180 0xf80545fa,
181 0x0053f003,
182 0xd100e7f0,
183 0x549800fe,
184 0x0845b600,
185 0xb6015698,
186 0x46fd1864,
187 0x0047fe05,
188 0xf00204b9,
189 0x01f40643,
190 0x0604fa09,
191 0xfa060ef4,
192 0x03f80504,
193 0x27f100f8,
194 0x23cf1400,
195 0x1e3fc800,
196 0xf4170bf4,
197 0x21f40132,
198 0x1e3af053,
199 0xf00023d0,
200 0x24d00147,
201 0xcf00f880,
202 0x3dc84023,
203 0x090bf41e,
204 0xf40131f4,
205 0x37f05321,
206 0x8023d002,
207 0x37f100f8,
208 0x32cf1900,
209 0x0033cf40,
210 0x07ff24e4,
211 0xf11024b6,
212 0xbd010057,
213 0x5874bd64,
214 0x57580056,
215 0x0450b601,
216 0xf40446b8,
217 0x76bb4d08,
218 0x0447b800,
219 0xbb0f08f4,
220 0x74b60276,
221 0x0057bb03,
222 0xbbdf0ef4,
223 0x44b60246,
224 0x0045bb03,
225 0xfd014598,
226 0x54b00453,
227 0x201bf400,
228 0x58004558,
229 0x64b00146,
230 0x091bf400,
231 0xf4005380,
232 0x32f4300e,
233 0xf455f901,
234 0x0ef40c01,
235 0x0225f025,
236 0xf10125f0,
237 0xd0100047,
238 0x43d00042,
239 0x4027f040,
240 0xcf0002d0,
241 0x24f08002,
242 0x0024b040,
243 0xf1f71bf4,
244 0xf01d0027,
245 0x23d00137,
246 0xf800f800,
247 0x0027f100,
248 0xf034bd22,
249 0x23d00233,
250 0xf400f800,
251 0x01b0f030,
252 0x0101b000,
253 0xb00201b0,
254 0x04980301,
255 0x3045c71a,
256 0xc70150b6,
257 0x60b63446,
258 0x3847c701,
259 0xf40170b6,
260 0x84bd0232,
261 0x4ac494bd,
262 0x0445b60f,
263 0xa430b4bd,
264 0x0f18f404,
265 0xbbc0a5ff,
266 0x31f400cb,
267 0x220ef402,
268 0xf00c1bf4,
269 0xcbbb10c7,
270 0x160ef400,
271 0xf406a430,
272 0xc7f00c18,
273 0x00cbbb14,
274 0xf1070ef4,
275 0x380080c7,
276 0x80b601c8,
277 0x01b0b601,
278 0xf404b5b8,
279 0x90b6c308,
280 0x0497b801,
281 0xfdb208f4,
282 0x06800065,
283 0x1d08980e,
284 0xf40068fd,
285 0x64bd0502,
286 0x800075fd,
287 0x78fd1907,
288 0x1057f100,
289 0x0654b608,
290 0xd00056d0,
291 0x50b74057,
292 0x06980800,
293 0x0162b619,
294 0x980864b6,
295 0x72b60e07,
296 0x0567fd01,
297 0xb70056d0,
298 0xb4010050,
299 0x56d00060,
300 0x0160b400,
301 0xb44056d0,
302 0x56d00260,
303 0x0360b480,
304 0xb7c056d0,
305 0x98040050,
306 0x56d01b06,
307 0x1c069800,
308 0xf44056d0,
309 0x00f81030,
310 0xc7075798,
311 0x78c76879,
312 0x0380b664,
313 0xb06077c7,
314 0x1bf40e76,
315 0x0477f009,
316 0xf00f0ef4,
317 0x70b6027c,
318 0x0947fd11,
319 0x980677f0,
320 0x5b980c5a,
321 0x00abfd0e,
322 0xbb01b7f0,
323 0xb2b604b7,
324 0xc4abff01,
325 0x9805a7bb,
326 0xe7f00d5d,
327 0x04e8bb01,
328 0xff01e2b6,
329 0xd8bbb4de,
330 0x01e0b605,
331 0xbb0cef94,
332 0xfefd02eb,
333 0x026cf005,
334 0x020860b7,
335 0xd00864b6,
336 0xb7bb006f,
337 0x00cbbb04,
338 0x98085f98,
339 0xfbfd0e5b,
340 0x01b7f000,
341 0xb604b7bb,
342 0xfbbb01b2,
343 0x05f7bb00,
344 0x5f98f0f9,
345 0x01b7f009,
346 0xb604b8bb,
347 0xfbbb01b2,
348 0x05f8bb00,
349 0x78bbf0f9,
350 0x0282b600,
351 0xbb01b7f0,
352 0xb9bb04b8,
353 0x0b589804,
354 0xbb01e7f0,
355 0xe2b604e9,
356 0xf48eff01,
357 0xbb04f7bb,
358 0x79bb00cf,
359 0x0589bb00,
360 0x90fcf0fc,
361 0xbb00d9fd,
362 0x89fd00ad,
363 0x008ffd00,
364 0xbb00a8bb,
365 0x92b604a7,
366 0x0497bb01,
367 0x988069d0,
368 0x58980557,
369 0x00acbb04,
370 0xb6007abb,
371 0x84b60081,
372 0x058bfd10,
373 0x060062b7,
374 0xb70067d0,
375 0xd0040060,
376 0x00f80068,
377 0xb7026cf0,
378 0xb6020260,
379 0x57980864,
380 0x0067d005,
381 0x040060b7,
382 0xb6045798,
383 0x67d01074,
384 0x0060b700,
385 0x06579804,
386 0xf80067d0,
387 0xf900f900,
388 0x0007f110,
389 0x0604b608,
390 0xf00001cf,
391 0x1bf40114,
392 0xfc10fcfa,
393 0xc800f800,
394 0x1bf40d34,
395 0xd121f570,
396 0x0c47f103,
397 0x0644b608,
398 0xb6020598,
399 0x45d00450,
400 0x4040d000,
401 0xd00c57f0,
402 0x40b78045,
403 0x05980400,
404 0x1054b601,
405 0xb70045d0,
406 0xf1050040,
407 0xf00b0057,
408 0x45d00153,
409 0x4057f100,
410 0x0154b640,
411 0x808053f1,
412 0xf14045d0,
413 0xf1111057,
414 0xd0131253,
415 0x57f18045,
416 0x53f11514,
417 0x45d01716,
418 0x0157f1c0,
419 0x0153f026,
420 0x080047f1,
421 0xd00644b6,
422 0x21f50045,
423 0x47f103d1,
424 0x44b6080c,
425 0x02059806,
426 0xd00045d0,
427 0x57f04040,
428 0x8045d004,
429 0x040040b7,
430 0xb6010598,
431 0x45d01054,
432 0x0040b700,
433 0x0057f105,
434 0x0045d003,
435 0x111057f1,
436 0x131253f1,
437 0x984045d0,
438 0x40b70305,
439 0x45d00500,
440 0x0157f100,
441 0x0153f026,
442 0x080047f1,
443 0xd00644b6,
444 0x00f80045,
445 0x03d121f5,
446 0xf4003fc8,
447 0x21f50e0b,
448 0x47f101af,
449 0x0ef40200,
450 0x1067f11e,
451 0x0664b608,
452 0x800177f0,
453 0x07800e07,
454 0x1d079819,
455 0xd00067d0,
456 0x44bd4067,
457 0xbd0232f4,
458 0x043fc854,
459 0xf50a0bf4,
460 0xf403a821,
461 0x21f50a0e,
462 0x49f0029c,
463 0x0231f407,
464 0xc82c57f0,
465 0x0bf4083f,
466 0xa821f50a,
467 0x0a0ef403,
468 0x029c21f5,
469 0xf10849f0,
470 0xb6080057,
471 0x06980654,
472 0x4056d01e,
473 0xf14167f0,
474 0xfd440063,
475 0x54d00546,
476 0x0c3fc800,
477 0xf5070bf4,
478 0xf803eb21,
479 0x0027f100,
480 0xf034bd22,
481 0x23d00133,
482 0x0000f800,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527};
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 2886f2726a9e..fb4f5943e01b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -37,7 +37,7 @@ struct nvc0_fifo_priv {
37}; 37};
38 38
39struct nvc0_fifo_chan { 39struct nvc0_fifo_chan {
40 struct nouveau_bo *user; 40 struct nouveau_gpuobj *user;
41 struct nouveau_gpuobj *ramfc; 41 struct nouveau_gpuobj *ramfc;
42}; 42};
43 43
@@ -106,7 +106,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
107 struct nvc0_fifo_priv *priv = pfifo->priv; 107 struct nvc0_fifo_priv *priv = pfifo->priv;
108 struct nvc0_fifo_chan *fifoch; 108 struct nvc0_fifo_chan *fifoch;
109 u64 ib_virt, user_vinst; 109 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
110 int ret; 110 int ret;
111 111
112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); 112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
@@ -115,28 +115,13 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
115 fifoch = chan->fifo_priv; 115 fifoch = chan->fifo_priv;
116 116
117 /* allocate vram for control regs, map into polling area */ 117 /* allocate vram for control regs, map into polling area */
118 ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, 118 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
119 0, 0, &fifoch->user); 119 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
120 if (ret) 120 if (ret)
121 goto error; 121 goto error;
122 122
123 ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
124 if (ret) {
125 nouveau_bo_ref(NULL, &fifoch->user);
126 goto error;
127 }
128
129 user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
130
131 ret = nouveau_bo_map(fifoch->user);
132 if (ret) {
133 nouveau_bo_unpin(fifoch->user);
134 nouveau_bo_ref(NULL, &fifoch->user);
135 goto error;
136 }
137
138 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, 123 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
139 fifoch->user->bo.mem.mm_node); 124 *(struct nouveau_mem **)fifoch->user->node);
140 125
141 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 126 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
142 priv->user_vma.offset + (chan->id * 0x1000), 127 priv->user_vma.offset + (chan->id * 0x1000),
@@ -146,20 +131,6 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
146 goto error; 131 goto error;
147 } 132 }
148 133
149 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
150
151 /* zero channel regs */
152 nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
153 nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
154 nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
155 nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
156 nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
157 nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
158 nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
159 nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
160 nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
161 nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
162
163 /* ramfc */ 134 /* ramfc */
164 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, 135 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
165 chan->ramin->vinst, 0x100, 136 chan->ramin->vinst, 0x100,
@@ -167,8 +138,8 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
167 if (ret) 138 if (ret)
168 goto error; 139 goto error;
169 140
170 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst)); 141 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst));
171 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst)); 142 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst));
172 nv_wo32(fifoch->ramfc, 0x10, 0x0000face); 143 nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
173 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); 144 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
174 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); 145 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
@@ -223,11 +194,7 @@ nvc0_fifo_destroy_context(struct nouveau_channel *chan)
223 return; 194 return;
224 195
225 nouveau_gpuobj_ref(NULL, &fifoch->ramfc); 196 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
226 if (fifoch->user) { 197 nouveau_gpuobj_ref(NULL, &fifoch->user);
227 nouveau_bo_unmap(fifoch->user);
228 nouveau_bo_unpin(fifoch->user);
229 nouveau_bo_ref(NULL, &fifoch->user);
230 }
231 kfree(fifoch); 198 kfree(fifoch);
232} 199}
233 200
@@ -240,6 +207,21 @@ nvc0_fifo_load_context(struct nouveau_channel *chan)
240int 207int
241nvc0_fifo_unload_context(struct drm_device *dev) 208nvc0_fifo_unload_context(struct drm_device *dev)
242{ 209{
210 int i;
211
212 for (i = 0; i < 128; i++) {
213 if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1))
214 continue;
215
216 nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000);
217 nv_wr32(dev, 0x002634, i);
218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
220 i, nv_rd32(dev, 0x002634));
221 return -EBUSY;
222 }
223 }
224
243 return 0; 225 return 0;
244} 226}
245 227
@@ -309,6 +291,7 @@ nvc0_fifo_init(struct drm_device *dev)
309{ 291{
310 struct drm_nouveau_private *dev_priv = dev->dev_private; 292 struct drm_nouveau_private *dev_priv = dev->dev_private;
311 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 293 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
294 struct nouveau_channel *chan;
312 struct nvc0_fifo_priv *priv; 295 struct nvc0_fifo_priv *priv;
313 int ret, i; 296 int ret, i;
314 297
@@ -351,23 +334,74 @@ nvc0_fifo_init(struct drm_device *dev)
351 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ 334 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
352 nv_wr32(dev, 0x002100, 0xffffffff); 335 nv_wr32(dev, 0x002100, 0xffffffff);
353 nv_wr32(dev, 0x002140, 0xbfffffff); 336 nv_wr32(dev, 0x002140, 0xbfffffff);
337
338 /* restore PFIFO context table */
339 for (i = 0; i < 128; i++) {
340 chan = dev_priv->channels.ptr[i];
341 if (!chan || !chan->fifo_priv)
342 continue;
343
344 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
345 (chan->ramin->vinst >> 12));
346 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
347 }
348 nvc0_fifo_playlist_update(dev);
349
354 return 0; 350 return 0;
355} 351}
356 352
357struct nouveau_enum nvc0_fifo_fault_unit[] = { 353struct nouveau_enum nvc0_fifo_fault_unit[] = {
358 { 0, "PGRAPH" }, 354 { 0x00, "PGRAPH" },
359 { 3, "PEEPHOLE" }, 355 { 0x03, "PEEPHOLE" },
360 { 4, "BAR1" }, 356 { 0x04, "BAR1" },
361 { 5, "BAR3" }, 357 { 0x05, "BAR3" },
362 { 7, "PFIFO" }, 358 { 0x07, "PFIFO" },
359 { 0x10, "PBSP" },
360 { 0x11, "PPPP" },
361 { 0x13, "PCOUNTER" },
362 { 0x14, "PVP" },
363 { 0x15, "PCOPY0" },
364 { 0x16, "PCOPY1" },
365 { 0x17, "PDAEMON" },
363 {} 366 {}
364}; 367};
365 368
366struct nouveau_enum nvc0_fifo_fault_reason[] = { 369struct nouveau_enum nvc0_fifo_fault_reason[] = {
367 { 0, "PT_NOT_PRESENT" }, 370 { 0x00, "PT_NOT_PRESENT" },
368 { 1, "PT_TOO_SHORT" }, 371 { 0x01, "PT_TOO_SHORT" },
369 { 2, "PAGE_NOT_PRESENT" }, 372 { 0x02, "PAGE_NOT_PRESENT" },
370 { 3, "VM_LIMIT_EXCEEDED" }, 373 { 0x03, "VM_LIMIT_EXCEEDED" },
374 { 0x04, "NO_CHANNEL" },
375 { 0x05, "PAGE_SYSTEM_ONLY" },
376 { 0x06, "PAGE_READ_ONLY" },
377 { 0x0a, "COMPRESSED_SYSRAM" },
378 { 0x0c, "INVALID_STORAGE_TYPE" },
379 {}
380};
381
382struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
383 { 0x01, "PCOPY0" },
384 { 0x02, "PCOPY1" },
385 { 0x04, "DISPATCH" },
386 { 0x05, "CTXCTL" },
387 { 0x06, "PFIFO" },
388 { 0x07, "BAR_READ" },
389 { 0x08, "BAR_WRITE" },
390 { 0x0b, "PVP" },
391 { 0x0c, "PPPP" },
392 { 0x0d, "PBSP" },
393 { 0x11, "PCOUNTER" },
394 { 0x12, "PDAEMON" },
395 { 0x14, "CCACHE" },
396 { 0x15, "CCACHE_POST" },
397 {}
398};
399
400struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
401 { 0x01, "TEX" },
402 { 0x0c, "ESETUP" },
403 { 0x0e, "CTXCTL" },
404 { 0x0f, "PROP" },
371 {} 405 {}
372}; 406};
373 407
@@ -385,12 +419,20 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
385 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); 419 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
386 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); 420 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
387 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); 421 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
422 u32 client = (stat & 0x00001f00) >> 8;
388 423
389 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", 424 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
390 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); 425 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
391 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); 426 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
392 printk("] from "); 427 printk("] from ");
393 nouveau_enum_print(nvc0_fifo_fault_unit, unit); 428 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
429 if (stat & 0x00000040) {
430 printk("/");
431 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
432 } else {
433 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
434 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
435 }
394 printk(" on channel 0x%010llx\n", (u64)inst << 12); 436 printk(" on channel 0x%010llx\n", (u64)inst << 12);
395} 437}
396 438
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 3de9b721d8db..ca6db204d644 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -30,27 +30,40 @@
30#include "nouveau_mm.h" 30#include "nouveau_mm.h"
31#include "nvc0_graph.h" 31#include "nvc0_graph.h"
32 32
33static void nvc0_graph_isr(struct drm_device *); 33static int
34static void nvc0_runk140_isr(struct drm_device *); 34nvc0_graph_load_context(struct nouveau_channel *chan)
35static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
36
37void
38nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
39{ 35{
36 struct drm_device *dev = chan->dev;
37
38 nv_wr32(dev, 0x409840, 0x00000030);
39 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
40 nv_wr32(dev, 0x409504, 0x00000003);
41 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
42 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
43
44 return 0;
40} 45}
41 46
42struct nouveau_channel * 47static int
43nvc0_graph_channel(struct drm_device *dev) 48nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
44{ 49{
45 return NULL; 50 nv_wr32(dev, 0x409840, 0x00000003);
51 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
52 nv_wr32(dev, 0x409504, 0x00000009);
53 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
54 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
55 return -EBUSY;
56 }
57
58 return 0;
46} 59}
47 60
48static int 61static int
49nvc0_graph_construct_context(struct nouveau_channel *chan) 62nvc0_graph_construct_context(struct nouveau_channel *chan)
50{ 63{
51 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 64 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
52 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 65 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
53 struct nvc0_graph_chan *grch = chan->pgraph_ctx; 66 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
54 struct drm_device *dev = chan->dev; 67 struct drm_device *dev = chan->dev;
55 int ret, i; 68 int ret, i;
56 u32 *ctx; 69 u32 *ctx;
@@ -89,9 +102,8 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
89static int 102static int
90nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) 103nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
91{ 104{
92 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 105 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 106 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct nvc0_graph_chan *grch = chan->pgraph_ctx;
95 struct drm_device *dev = chan->dev; 107 struct drm_device *dev = chan->dev;
96 int i = 0, gpc, tp, ret; 108 int i = 0, gpc, tp, ret;
97 u32 magic; 109 u32 magic;
@@ -158,29 +170,27 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
158 return 0; 170 return 0;
159} 171}
160 172
161int 173static int
162nvc0_graph_create_context(struct nouveau_channel *chan) 174nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
163{ 175{
164 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 176 struct drm_device *dev = chan->dev;
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 178 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
166 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 179 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
167 struct nvc0_graph_priv *priv = pgraph->priv;
168 struct nvc0_graph_chan *grch; 180 struct nvc0_graph_chan *grch;
169 struct drm_device *dev = chan->dev;
170 struct nouveau_gpuobj *grctx; 181 struct nouveau_gpuobj *grctx;
171 int ret, i; 182 int ret, i;
172 183
173 chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL); 184 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
174 if (!chan->pgraph_ctx) 185 if (!grch)
175 return -ENOMEM; 186 return -ENOMEM;
176 grch = chan->pgraph_ctx; 187 chan->engctx[NVOBJ_ENGINE_GR] = grch;
177 188
178 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 189 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
179 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, 190 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
180 &grch->grctx); 191 &grch->grctx);
181 if (ret) 192 if (ret)
182 goto error; 193 goto error;
183 chan->ramin_grctx = grch->grctx;
184 grctx = grch->grctx; 194 grctx = grch->grctx;
185 195
186 ret = nvc0_graph_create_context_mmio_list(chan); 196 ret = nvc0_graph_create_context_mmio_list(chan);
@@ -200,104 +210,49 @@ nvc0_graph_create_context(struct nouveau_channel *chan)
200 for (i = 0; i < priv->grctx_size; i += 4) 210 for (i = 0; i < priv->grctx_size; i += 4)
201 nv_wo32(grctx, i, priv->grctx_vals[i / 4]); 211 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
202 212
203 nv_wo32(grctx, 0xf4, 0); 213 nv_wo32(grctx, 0xf4, 0);
204 nv_wo32(grctx, 0xf8, 0); 214 nv_wo32(grctx, 0xf8, 0);
205 nv_wo32(grctx, 0x10, grch->mmio_nr); 215 nv_wo32(grctx, 0x10, grch->mmio_nr);
206 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); 216 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
207 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); 217 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
208 nv_wo32(grctx, 0x1c, 1); 218 nv_wo32(grctx, 0x1c, 1);
209 nv_wo32(grctx, 0x20, 0); 219 nv_wo32(grctx, 0x20, 0);
210 nv_wo32(grctx, 0x28, 0); 220 nv_wo32(grctx, 0x28, 0);
211 nv_wo32(grctx, 0x2c, 0); 221 nv_wo32(grctx, 0x2c, 0);
212 pinstmem->flush(dev); 222 pinstmem->flush(dev);
213 return 0; 223 return 0;
214 224
215error: 225error:
216 pgraph->destroy_context(chan); 226 priv->base.context_del(chan, engine);
217 return ret; 227 return ret;
218} 228}
219 229
220void 230static void
221nvc0_graph_destroy_context(struct nouveau_channel *chan) 231nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
222{ 232{
223 struct nvc0_graph_chan *grch; 233 struct nvc0_graph_chan *grch = chan->engctx[engine];
224
225 grch = chan->pgraph_ctx;
226 chan->pgraph_ctx = NULL;
227 if (!grch)
228 return;
229 234
230 nouveau_gpuobj_ref(NULL, &grch->mmio); 235 nouveau_gpuobj_ref(NULL, &grch->mmio);
231 nouveau_gpuobj_ref(NULL, &grch->unk418810); 236 nouveau_gpuobj_ref(NULL, &grch->unk418810);
232 nouveau_gpuobj_ref(NULL, &grch->unk40800c); 237 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
233 nouveau_gpuobj_ref(NULL, &grch->unk408004); 238 nouveau_gpuobj_ref(NULL, &grch->unk408004);
234 nouveau_gpuobj_ref(NULL, &grch->grctx); 239 nouveau_gpuobj_ref(NULL, &grch->grctx);
235 chan->ramin_grctx = NULL; 240 chan->engctx[engine] = NULL;
236} 241}
237 242
238int 243static int
239nvc0_graph_load_context(struct nouveau_channel *chan) 244nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
245 u32 handle, u16 class)
240{ 246{
241 struct drm_device *dev = chan->dev;
242
243 nv_wr32(dev, 0x409840, 0x00000030);
244 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
245 nv_wr32(dev, 0x409504, 0x00000003);
246 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
247 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
248
249 return 0; 247 return 0;
250} 248}
251 249
252static int 250static int
253nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan) 251nvc0_graph_fini(struct drm_device *dev, int engine)
254{ 252{
255 nv_wr32(dev, 0x409840, 0x00000003);
256 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
257 nv_wr32(dev, 0x409504, 0x00000009);
258 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
259 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
260 return -EBUSY;
261 }
262
263 return 0; 253 return 0;
264} 254}
265 255
266int
267nvc0_graph_unload_context(struct drm_device *dev)
268{
269 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
270 return nvc0_graph_unload_context_to(dev, inst);
271}
272
273static void
274nvc0_graph_destroy(struct drm_device *dev)
275{
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
278 struct nvc0_graph_priv *priv;
279
280 priv = pgraph->priv;
281 if (!priv)
282 return;
283
284 nouveau_irq_unregister(dev, 12);
285 nouveau_irq_unregister(dev, 25);
286
287 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
288 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
289
290 if (priv->grctx_vals)
291 kfree(priv->grctx_vals);
292 kfree(priv);
293}
294
295void
296nvc0_graph_takedown(struct drm_device *dev)
297{
298 nvc0_graph_destroy(dev);
299}
300
301static int 256static int
302nvc0_graph_mthd_page_flip(struct nouveau_channel *chan, 257nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
303 u32 class, u32 mthd, u32 data) 258 u32 class, u32 mthd, u32 data)
@@ -306,119 +261,10 @@ nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
306 return 0; 261 return 0;
307} 262}
308 263
309static int
310nvc0_graph_create(struct drm_device *dev)
311{
312 struct drm_nouveau_private *dev_priv = dev->dev_private;
313 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
314 struct nvc0_graph_priv *priv;
315 int ret, gpc, i;
316
317 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
318 if (!priv)
319 return -ENOMEM;
320 pgraph->priv = priv;
321
322 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
323 if (ret)
324 goto error;
325
326 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
327 if (ret)
328 goto error;
329
330 for (i = 0; i < 0x1000; i += 4) {
331 nv_wo32(priv->unk4188b4, i, 0x00000010);
332 nv_wo32(priv->unk4188b8, i, 0x00000010);
333 }
334
335 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
336 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
337 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
338 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
339 priv->tp_total += priv->tp_nr[gpc];
340 }
341
342 /*XXX: these need figuring out... */
343 switch (dev_priv->chipset) {
344 case 0xc0:
345 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
346 priv->magic_not_rop_nr = 0x07;
347 /* filled values up to tp_total, the rest 0 */
348 priv->magicgpc980[0] = 0x22111000;
349 priv->magicgpc980[1] = 0x00000233;
350 priv->magicgpc980[2] = 0x00000000;
351 priv->magicgpc980[3] = 0x00000000;
352 priv->magicgpc918 = 0x000ba2e9;
353 } else
354 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
355 priv->magic_not_rop_nr = 0x05;
356 priv->magicgpc980[0] = 0x11110000;
357 priv->magicgpc980[1] = 0x00233222;
358 priv->magicgpc980[2] = 0x00000000;
359 priv->magicgpc980[3] = 0x00000000;
360 priv->magicgpc918 = 0x00092493;
361 } else
362 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
363 priv->magic_not_rop_nr = 0x06;
364 priv->magicgpc980[0] = 0x11110000;
365 priv->magicgpc980[1] = 0x03332222;
366 priv->magicgpc980[2] = 0x00000000;
367 priv->magicgpc980[3] = 0x00000000;
368 priv->magicgpc918 = 0x00088889;
369 }
370 break;
371 case 0xc3: /* 450, 4/0/0/0, 2 */
372 priv->magic_not_rop_nr = 0x03;
373 priv->magicgpc980[0] = 0x00003210;
374 priv->magicgpc980[1] = 0x00000000;
375 priv->magicgpc980[2] = 0x00000000;
376 priv->magicgpc980[3] = 0x00000000;
377 priv->magicgpc918 = 0x00200000;
378 break;
379 case 0xc4: /* 460, 3/4/0/0, 4 */
380 priv->magic_not_rop_nr = 0x01;
381 priv->magicgpc980[0] = 0x02321100;
382 priv->magicgpc980[1] = 0x00000000;
383 priv->magicgpc980[2] = 0x00000000;
384 priv->magicgpc980[3] = 0x00000000;
385 priv->magicgpc918 = 0x00124925;
386 break;
387 }
388
389 if (!priv->magic_not_rop_nr) {
390 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
391 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
392 priv->tp_nr[3], priv->rop_nr);
393 /* use 0xc3's values... */
394 priv->magic_not_rop_nr = 0x03;
395 priv->magicgpc980[0] = 0x00003210;
396 priv->magicgpc980[1] = 0x00000000;
397 priv->magicgpc980[2] = 0x00000000;
398 priv->magicgpc980[3] = 0x00000000;
399 priv->magicgpc918 = 0x00200000;
400 }
401
402 nouveau_irq_register(dev, 12, nvc0_graph_isr);
403 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
404 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
405 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
406 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
407 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
408 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
409 return 0;
410
411error:
412 nvc0_graph_destroy(dev);
413 return ret;
414}
415
416static void 264static void
417nvc0_graph_init_obj418880(struct drm_device *dev) 265nvc0_graph_init_obj418880(struct drm_device *dev)
418{ 266{
419 struct drm_nouveau_private *dev_priv = dev->dev_private; 267 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
420 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
421 struct nvc0_graph_priv *priv = pgraph->priv;
422 int i; 268 int i;
423 269
424 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); 270 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
@@ -449,35 +295,42 @@ nvc0_graph_init_regs(struct drm_device *dev)
449static void 295static void
450nvc0_graph_init_gpc_0(struct drm_device *dev) 296nvc0_graph_init_gpc_0(struct drm_device *dev)
451{ 297{
452 struct drm_nouveau_private *dev_priv = dev->dev_private; 298 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
453 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 299 u32 data[TP_MAX / 8];
454 int gpc; 300 u8 tpnr[GPC_MAX];
455 301 int i, gpc, tpc;
456 // TP ROP UNKVAL(magic_not_rop_nr) 302
457 // 450: 4/0/0/0 2 3 303 /*
458 // 460: 3/4/0/0 4 1 304 * TP ROP UNKVAL(magic_not_rop_nr)
459 // 465: 3/4/4/0 4 7 305 * 450: 4/0/0/0 2 3
460 // 470: 3/3/4/4 5 5 306 * 460: 3/4/0/0 4 1
461 // 480: 3/4/4/4 6 6 307 * 465: 3/4/4/0 4 7
462 308 * 470: 3/3/4/4 5 5
463 // magicgpc918 309 * 480: 3/4/4/4 6 6
464 // 450: 00200000 00000000001000000000000000000000 310 *
465 // 460: 00124925 00000000000100100100100100100101 311 * magicgpc918
466 // 465: 000ba2e9 00000000000010111010001011101001 312 * 450: 00200000 00000000001000000000000000000000
467 // 470: 00092493 00000000000010010010010010010011 313 * 460: 00124925 00000000000100100100100100100101
468 // 480: 00088889 00000000000010001000100010001001 314 * 465: 000ba2e9 00000000000010111010001011101001
469 315 * 470: 00092493 00000000000010010010010010010011
470 /* filled values up to tp_total, remainder 0 */ 316 * 480: 00088889 00000000000010001000100010001001
471 // 450: 00003210 00000000 00000000 00000000 317 */
472 // 460: 02321100 00000000 00000000 00000000 318
473 // 465: 22111000 00000233 00000000 00000000 319 memset(data, 0x00, sizeof(data));
474 // 470: 11110000 00233222 00000000 00000000 320 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
475 // 480: 11110000 03332222 00000000 00000000 321 for (i = 0, gpc = -1; i < priv->tp_total; i++) {
476 322 do {
477 nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]); 323 gpc = (gpc + 1) % priv->gpc_nr;
478 nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]); 324 } while (!tpnr[gpc]);
479 nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]); 325 tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
480 nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]); 326
327 data[i / 8] |= tpc << ((i % 8) * 4);
328 }
329
330 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
331 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
332 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
333 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
481 334
482 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 335 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
483 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | 336 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
@@ -509,8 +362,7 @@ nvc0_graph_init_units(struct drm_device *dev)
509static void 362static void
510nvc0_graph_init_gpc_1(struct drm_device *dev) 363nvc0_graph_init_gpc_1(struct drm_device *dev)
511{ 364{
512 struct drm_nouveau_private *dev_priv = dev->dev_private; 365 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
513 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
514 int gpc, tp; 366 int gpc, tp;
515 367
516 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 368 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
@@ -535,8 +387,7 @@ nvc0_graph_init_gpc_1(struct drm_device *dev)
535static void 387static void
536nvc0_graph_init_rop(struct drm_device *dev) 388nvc0_graph_init_rop(struct drm_device *dev)
537{ 389{
538 struct drm_nouveau_private *dev_priv = dev->dev_private; 390 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
539 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
540 int rop; 391 int rop;
541 392
542 for (rop = 0; rop < priv->rop_nr; rop++) { 393 for (rop = 0; rop < priv->rop_nr; rop++) {
@@ -547,62 +398,36 @@ nvc0_graph_init_rop(struct drm_device *dev)
547 } 398 }
548} 399}
549 400
550static int 401static void
551nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base, 402nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
552 const char *code_fw, const char *data_fw) 403 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
553{ 404{
554 const struct firmware *fw; 405 int i;
555 char name[32];
556 int ret, i;
557
558 snprintf(name, sizeof(name), "nouveau/%s", data_fw);
559 ret = request_firmware(&fw, name, &dev->pdev->dev);
560 if (ret) {
561 NV_ERROR(dev, "failed to load %s\n", data_fw);
562 return ret;
563 }
564 406
565 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000); 407 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
566 for (i = 0; i < fw->size / 4; i++) 408 for (i = 0; i < data->size / 4; i++)
567 nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]); 409 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
568 release_firmware(fw);
569
570 snprintf(name, sizeof(name), "nouveau/%s", code_fw);
571 ret = request_firmware(&fw, name, &dev->pdev->dev);
572 if (ret) {
573 NV_ERROR(dev, "failed to load %s\n", code_fw);
574 return ret;
575 }
576 410
577 nv_wr32(dev, fuc_base + 0x0180, 0x01000000); 411 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
578 for (i = 0; i < fw->size / 4; i++) { 412 for (i = 0; i < code->size / 4; i++) {
579 if ((i & 0x3f) == 0) 413 if ((i & 0x3f) == 0)
580 nv_wr32(dev, fuc_base + 0x0188, i >> 6); 414 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
581 nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]); 415 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
582 } 416 }
583 release_firmware(fw);
584
585 return 0;
586} 417}
587 418
588static int 419static int
589nvc0_graph_init_ctxctl(struct drm_device *dev) 420nvc0_graph_init_ctxctl(struct drm_device *dev)
590{ 421{
591 struct drm_nouveau_private *dev_priv = dev->dev_private; 422 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
592 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
593 u32 r000260; 423 u32 r000260;
594 int ret;
595 424
596 /* load fuc microcode */ 425 /* load fuc microcode */
597 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 426 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
598 ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d"); 427 nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
599 if (ret == 0) 428 nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
600 ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
601 nv_wr32(dev, 0x000260, r000260); 429 nv_wr32(dev, 0x000260, r000260);
602 430
603 if (ret)
604 return ret;
605
606 /* start both of them running */ 431 /* start both of them running */
607 nv_wr32(dev, 0x409840, 0xffffffff); 432 nv_wr32(dev, 0x409840, 0xffffffff);
608 nv_wr32(dev, 0x41a10c, 0x00000000); 433 nv_wr32(dev, 0x41a10c, 0x00000000);
@@ -644,41 +469,19 @@ nvc0_graph_init_ctxctl(struct drm_device *dev)
644 return 0; 469 return 0;
645} 470}
646 471
647int 472static int
648nvc0_graph_init(struct drm_device *dev) 473nvc0_graph_init(struct drm_device *dev, int engine)
649{ 474{
650 struct drm_nouveau_private *dev_priv = dev->dev_private;
651 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
652 int ret; 475 int ret;
653 476
654 dev_priv->engine.graph.accel_blocked = true;
655
656 switch (dev_priv->chipset) {
657 case 0xc0:
658 case 0xc3:
659 case 0xc4:
660 break;
661 default:
662 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
663 if (nouveau_noaccel != 0)
664 return 0;
665 break;
666 }
667
668 nv_mask(dev, 0x000200, 0x18001000, 0x00000000); 477 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
669 nv_mask(dev, 0x000200, 0x18001000, 0x18001000); 478 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
670 479
671 if (!pgraph->priv) {
672 ret = nvc0_graph_create(dev);
673 if (ret)
674 return ret;
675 }
676
677 nvc0_graph_init_obj418880(dev); 480 nvc0_graph_init_obj418880(dev);
678 nvc0_graph_init_regs(dev); 481 nvc0_graph_init_regs(dev);
679 //nvc0_graph_init_unitplemented_magics(dev); 482 /*nvc0_graph_init_unitplemented_magics(dev);*/
680 nvc0_graph_init_gpc_0(dev); 483 nvc0_graph_init_gpc_0(dev);
681 //nvc0_graph_init_unitplemented_c242(dev); 484 /*nvc0_graph_init_unitplemented_c242(dev);*/
682 485
683 nv_wr32(dev, 0x400500, 0x00010001); 486 nv_wr32(dev, 0x400500, 0x00010001);
684 nv_wr32(dev, 0x400100, 0xffffffff); 487 nv_wr32(dev, 0x400100, 0xffffffff);
@@ -697,12 +500,13 @@ nvc0_graph_init(struct drm_device *dev)
697 nv_wr32(dev, 0x400054, 0x34ce3464); 500 nv_wr32(dev, 0x400054, 0x34ce3464);
698 501
699 ret = nvc0_graph_init_ctxctl(dev); 502 ret = nvc0_graph_init_ctxctl(dev);
700 if (ret == 0) 503 if (ret)
701 dev_priv->engine.graph.accel_blocked = false; 504 return ret;
505
702 return 0; 506 return 0;
703} 507}
704 508
705static int 509int
706nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) 510nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
707{ 511{
708 struct drm_nouveau_private *dev_priv = dev->dev_private; 512 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -806,3 +610,187 @@ nvc0_runk140_isr(struct drm_device *dev)
806 units &= ~(1 << unit); 610 units &= ~(1 << unit);
807 } 611 }
808} 612}
613
614static int
615nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
616 struct nvc0_graph_fuc *fuc)
617{
618 struct drm_nouveau_private *dev_priv = dev->dev_private;
619 const struct firmware *fw;
620 char f[32];
621 int ret;
622
623 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
624 ret = request_firmware(&fw, f, &dev->pdev->dev);
625 if (ret) {
626 snprintf(f, sizeof(f), "nouveau/%s", fwname);
627 ret = request_firmware(&fw, f, &dev->pdev->dev);
628 if (ret) {
629 NV_ERROR(dev, "failed to load %s\n", fwname);
630 return ret;
631 }
632 }
633
634 fuc->size = fw->size;
635 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
636 release_firmware(fw);
637 return (fuc->data != NULL) ? 0 : -ENOMEM;
638}
639
640static void
641nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
642{
643 if (fuc->data) {
644 kfree(fuc->data);
645 fuc->data = NULL;
646 }
647}
648
649static void
650nvc0_graph_destroy(struct drm_device *dev, int engine)
651{
652 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
653
654 nvc0_graph_destroy_fw(&priv->fuc409c);
655 nvc0_graph_destroy_fw(&priv->fuc409d);
656 nvc0_graph_destroy_fw(&priv->fuc41ac);
657 nvc0_graph_destroy_fw(&priv->fuc41ad);
658
659 nouveau_irq_unregister(dev, 12);
660 nouveau_irq_unregister(dev, 25);
661
662 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
663 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
664
665 if (priv->grctx_vals)
666 kfree(priv->grctx_vals);
667
668 NVOBJ_ENGINE_DEL(dev, GR);
669 kfree(priv);
670}
671
672int
673nvc0_graph_create(struct drm_device *dev)
674{
675 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nvc0_graph_priv *priv;
677 int ret, gpc, i;
678
679 switch (dev_priv->chipset) {
680 case 0xc0:
681 case 0xc3:
682 case 0xc4:
683 break;
684 default:
685 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
686 return 0;
687 }
688
689 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
690 if (!priv)
691 return -ENOMEM;
692
693 priv->base.destroy = nvc0_graph_destroy;
694 priv->base.init = nvc0_graph_init;
695 priv->base.fini = nvc0_graph_fini;
696 priv->base.context_new = nvc0_graph_context_new;
697 priv->base.context_del = nvc0_graph_context_del;
698 priv->base.object_new = nvc0_graph_object_new;
699
700 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
701 nouveau_irq_register(dev, 12, nvc0_graph_isr);
702 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
703
704 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
705 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
706 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
707 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
708 ret = 0;
709 goto error;
710 }
711
712
713 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
714 if (ret)
715 goto error;
716
717 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
718 if (ret)
719 goto error;
720
721 for (i = 0; i < 0x1000; i += 4) {
722 nv_wo32(priv->unk4188b4, i, 0x00000010);
723 nv_wo32(priv->unk4188b8, i, 0x00000010);
724 }
725
726 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
727 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
728 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
729 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
730 priv->tp_total += priv->tp_nr[gpc];
731 }
732
733 /*XXX: these need figuring out... */
734 switch (dev_priv->chipset) {
735 case 0xc0:
736 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
737 priv->magic_not_rop_nr = 0x07;
738 /* filled values up to tp_total, the rest 0 */
739 priv->magicgpc918 = 0x000ba2e9;
740 } else
741 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
742 priv->magic_not_rop_nr = 0x05;
743 priv->magicgpc918 = 0x00092493;
744 } else
745 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
746 priv->magic_not_rop_nr = 0x06;
747 priv->magicgpc918 = 0x00088889;
748 }
749 break;
750 case 0xc3: /* 450, 4/0/0/0, 2 */
751 priv->magic_not_rop_nr = 0x03;
752 priv->magicgpc918 = 0x00200000;
753 break;
754 case 0xc4: /* 460, 3/4/0/0, 4 */
755 priv->magic_not_rop_nr = 0x01;
756 priv->magicgpc918 = 0x00124925;
757 break;
758 }
759
760 if (!priv->magic_not_rop_nr) {
761 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
762 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
763 priv->tp_nr[3], priv->rop_nr);
764 /* use 0xc3's values... */
765 priv->magic_not_rop_nr = 0x03;
766 priv->magicgpc918 = 0x00200000;
767 }
768
769 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
770 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
771 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
772 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
773 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
774 return 0;
775
776error:
777 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
778 return ret;
779}
780
781MODULE_FIRMWARE("nouveau/nvc0_fuc409c");
782MODULE_FIRMWARE("nouveau/nvc0_fuc409d");
783MODULE_FIRMWARE("nouveau/nvc0_fuc41ac");
784MODULE_FIRMWARE("nouveau/nvc0_fuc41ad");
785MODULE_FIRMWARE("nouveau/nvc3_fuc409c");
786MODULE_FIRMWARE("nouveau/nvc3_fuc409d");
787MODULE_FIRMWARE("nouveau/nvc3_fuc41ac");
788MODULE_FIRMWARE("nouveau/nvc3_fuc41ad");
789MODULE_FIRMWARE("nouveau/nvc4_fuc409c");
790MODULE_FIRMWARE("nouveau/nvc4_fuc409d");
791MODULE_FIRMWARE("nouveau/nvc4_fuc41ac");
792MODULE_FIRMWARE("nouveau/nvc4_fuc41ad");
793MODULE_FIRMWARE("nouveau/fuc409c");
794MODULE_FIRMWARE("nouveau/fuc409d");
795MODULE_FIRMWARE("nouveau/fuc41ac");
796MODULE_FIRMWARE("nouveau/fuc41ad");
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 40e26f9c56c4..f5d184e0689d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -28,13 +28,25 @@
28#define GPC_MAX 4 28#define GPC_MAX 4
29#define TP_MAX 32 29#define TP_MAX 32
30 30
31#define ROP_BCAST(r) (0x408800 + (r)) 31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r)) 32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r)) 33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r)) 34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) 35#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nvc0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
36 41
37struct nvc0_graph_priv { 42struct nvc0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nvc0_graph_fuc fuc409c;
46 struct nvc0_graph_fuc fuc409d;
47 struct nvc0_graph_fuc fuc41ac;
48 struct nvc0_graph_fuc fuc41ad;
49
38 u8 gpc_nr; 50 u8 gpc_nr;
39 u8 rop_nr; 51 u8 rop_nr;
40 u8 tp_nr[GPC_MAX]; 52 u8 tp_nr[GPC_MAX];
@@ -46,15 +58,14 @@ struct nvc0_graph_priv {
46 struct nouveau_gpuobj *unk4188b8; 58 struct nouveau_gpuobj *unk4188b8;
47 59
48 u8 magic_not_rop_nr; 60 u8 magic_not_rop_nr;
49 u32 magicgpc980[4];
50 u32 magicgpc918; 61 u32 magicgpc918;
51}; 62};
52 63
53struct nvc0_graph_chan { 64struct nvc0_graph_chan {
54 struct nouveau_gpuobj *grctx; 65 struct nouveau_gpuobj *grctx;
55 struct nouveau_gpuobj *unk408004; // 0x418810 too 66 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
56 struct nouveau_gpuobj *unk40800c; // 0x419004 too 67 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
57 struct nouveau_gpuobj *unk418810; // 0x419848 too 68 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
58 struct nouveau_gpuobj *mmio; 69 struct nouveau_gpuobj *mmio;
59 int mmio_nr; 70 int mmio_nr;
60}; 71};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index f880ff776db8..6df066114133 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1623,7 +1623,7 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
1623{ 1623{
1624 struct drm_nouveau_private *dev_priv = dev->dev_private; 1624 struct drm_nouveau_private *dev_priv = dev->dev_private;
1625 1625
1626 // ROPC_BROADCAST 1626 /* ROPC_BROADCAST */
1627 nv_wr32(dev, 0x408800, 0x02802a3c); 1627 nv_wr32(dev, 0x408800, 0x02802a3c);
1628 nv_wr32(dev, 0x408804, 0x00000040); 1628 nv_wr32(dev, 0x408804, 0x00000040);
1629 nv_wr32(dev, 0x408808, 0x0003e00d); 1629 nv_wr32(dev, 0x408808, 0x0003e00d);
@@ -1647,7 +1647,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1647{ 1647{
1648 int i; 1648 int i;
1649 1649
1650 // GPC_BROADCAST 1650 /* GPC_BROADCAST */
1651 nv_wr32(dev, 0x418380, 0x00000016); 1651 nv_wr32(dev, 0x418380, 0x00000016);
1652 nv_wr32(dev, 0x418400, 0x38004e00); 1652 nv_wr32(dev, 0x418400, 0x38004e00);
1653 nv_wr32(dev, 0x418404, 0x71e0ffff); 1653 nv_wr32(dev, 0x418404, 0x71e0ffff);
@@ -1728,7 +1728,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1728{ 1728{
1729 struct drm_nouveau_private *dev_priv = dev->dev_private; 1729 struct drm_nouveau_private *dev_priv = dev->dev_private;
1730 1730
1731 // GPC_BROADCAST.TP_BROADCAST 1731 /* GPC_BROADCAST.TP_BROADCAST */
1732 nv_wr32(dev, 0x419848, 0x00000000); 1732 nv_wr32(dev, 0x419848, 0x00000000);
1733 nv_wr32(dev, 0x419864, 0x0000012a); 1733 nv_wr32(dev, 0x419864, 0x0000012a);
1734 nv_wr32(dev, 0x419888, 0x00000000); 1734 nv_wr32(dev, 0x419888, 0x00000000);
@@ -1741,7 +1741,7 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1741 nv_wr32(dev, 0x419a1c, 0x00000000); 1741 nv_wr32(dev, 0x419a1c, 0x00000000);
1742 nv_wr32(dev, 0x419a20, 0x00000800); 1742 nv_wr32(dev, 0x419a20, 0x00000800);
1743 if (dev_priv->chipset != 0xc0) 1743 if (dev_priv->chipset != 0xc0)
1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3 1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */
1745 nv_wr32(dev, 0x419b00, 0x0a418820); 1745 nv_wr32(dev, 0x419b00, 0x0a418820);
1746 nv_wr32(dev, 0x419b04, 0x062080e6); 1746 nv_wr32(dev, 0x419b04, 0x062080e6);
1747 nv_wr32(dev, 0x419b08, 0x020398a4); 1747 nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1797,8 +1797,8 @@ int
1797nvc0_grctx_generate(struct nouveau_channel *chan) 1797nvc0_grctx_generate(struct nouveau_channel *chan)
1798{ 1798{
1799 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 1799 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1800 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; 1800 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
1801 struct nvc0_graph_chan *grch = chan->pgraph_ctx; 1801 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
1802 struct drm_device *dev = chan->dev; 1802 struct drm_device *dev = chan->dev;
1803 int i, gpc, tp, id; 1803 int i, gpc, tp, id;
1804 u32 r000260, tmp; 1804 u32 r000260, tmp;
@@ -1912,13 +1912,13 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1912 for (i = 1; i < 7; i++) 1912 for (i = 1; i < 7; i++)
1913 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); 1913 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
1914 1914
1915 // GPC_BROADCAST 1915 /* GPC_BROADCAST */
1916 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | 1916 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
1917 priv->magic_not_rop_nr); 1917 priv->magic_not_rop_nr);
1918 for (i = 0; i < 6; i++) 1918 for (i = 0; i < 6; i++)
1919 nv_wr32(dev, 0x418b08 + (i * 4), data[i]); 1919 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
1920 1920
1921 // GPC_BROADCAST.TP_BROADCAST 1921 /* GPC_BROADCAST.TP_BROADCAST */
1922 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | 1922 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
1923 priv->magic_not_rop_nr | 1923 priv->magic_not_rop_nr |
1924 data2[0]); 1924 data2[0]);
@@ -1926,7 +1926,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1926 for (i = 0; i < 6; i++) 1926 for (i = 0; i < 6; i++)
1927 nv_wr32(dev, 0x419b00 + (i * 4), data[i]); 1927 nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
1928 1928
1929 // UNK78xx 1929 /* UNK78xx */
1930 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) | 1930 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
1931 priv->magic_not_rop_nr); 1931 priv->magic_not_rop_nr);
1932 for (i = 0; i < 6; i++) 1932 for (i = 0; i < 6; i++)
@@ -1944,7 +1944,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1944 gpc = -1; 1944 gpc = -1;
1945 for (i = 0, gpc = -1; i < 32; i++) { 1945 for (i = 0, gpc = -1; i < 32; i++) {
1946 int ltp = i * (priv->tp_total - 1) / 32; 1946 int ltp = i * (priv->tp_total - 1) / 32;
1947 1947
1948 do { 1948 do {
1949 gpc = (gpc + 1) % priv->gpc_nr; 1949 gpc = (gpc + 1) % priv->gpc_nr;
1950 } while (!tpnr[gpc]); 1950 } while (!tpnr[gpc]);
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7bd745689097..ebdb0fdb8348 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -652,12 +652,12 @@ static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
652 652
653static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 653static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
654{ 654{
655 uint8_t count = U8((*ptr)++); 655 unsigned count = U8((*ptr)++);
656 SDEBUG(" count: %d\n", count); 656 SDEBUG(" count: %d\n", count);
657 if (arg == ATOM_UNIT_MICROSEC) 657 if (arg == ATOM_UNIT_MICROSEC)
658 udelay(count); 658 udelay(count);
659 else 659 else
660 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 660 msleep(count);
661} 661}
662 662
663static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) 663static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 7fd88497b930..49611e2365d9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -726,6 +726,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
726#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d 726#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
727#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e 727#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
728#define ATOM_ENCODER_CMD_SETUP 0x0f 728#define ATOM_ENCODER_CMD_SETUP 0x0f
729#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10
729 730
730// ucStatus 731// ucStatus
731#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 732#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
@@ -765,13 +766,19 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
765 USHORT usPixelClock; // in 10KHz; for bios convenient 766 USHORT usPixelClock; // in 10KHz; for bios convenient
766 ATOM_DIG_ENCODER_CONFIG_V3 acConfig; 767 ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
767 UCHAR ucAction; 768 UCHAR ucAction;
768 UCHAR ucEncoderMode; 769 union {
770 UCHAR ucEncoderMode;
769 // =0: DP encoder 771 // =0: DP encoder
770 // =1: LVDS encoder 772 // =1: LVDS encoder
771 // =2: DVI encoder 773 // =2: DVI encoder
772 // =3: HDMI encoder 774 // =3: HDMI encoder
773 // =4: SDVO encoder 775 // =4: SDVO encoder
774 // =5: DP audio 776 // =5: DP audio
777 UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
778 // =0: external DP
779 // =1: internal DP2
780 // =0x11: internal DP1 for NutMeg/Travis DP translator
781 };
775 UCHAR ucLaneNum; // how many lanes to enable 782 UCHAR ucLaneNum; // how many lanes to enable
776 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP 783 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
777 UCHAR ucReserved; 784 UCHAR ucReserved;
@@ -816,13 +823,19 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
816 UCHAR ucConfig; 823 UCHAR ucConfig;
817 }; 824 };
818 UCHAR ucAction; 825 UCHAR ucAction;
819 UCHAR ucEncoderMode; 826 union {
827 UCHAR ucEncoderMode;
820 // =0: DP encoder 828 // =0: DP encoder
821 // =1: LVDS encoder 829 // =1: LVDS encoder
822 // =2: DVI encoder 830 // =2: DVI encoder
823 // =3: HDMI encoder 831 // =3: HDMI encoder
824 // =4: SDVO encoder 832 // =4: SDVO encoder
825 // =5: DP audio 833 // =5: DP audio
834 UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
835 // =0: external DP
836 // =1: internal DP2
837 // =0x11: internal DP1 for NutMeg/Travis DP translator
838 };
826 UCHAR ucLaneNum; // how many lanes to enable 839 UCHAR ucLaneNum; // how many lanes to enable
827 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP 840 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
828 UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version 841 UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
@@ -836,6 +849,11 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
836#define PANEL_12BIT_PER_COLOR 0x04 849#define PANEL_12BIT_PER_COLOR 0x04
837#define PANEL_16BIT_PER_COLOR 0x05 850#define PANEL_16BIT_PER_COLOR 0x05
838 851
852//define ucPanelMode
853#define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00
854#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01
855#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11
856
839/****************************************************************************/ 857/****************************************************************************/
840// Structures used by UNIPHYTransmitterControlTable 858// Structures used by UNIPHYTransmitterControlTable
841// LVTMATransmitterControlTable 859// LVTMATransmitterControlTable
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 529a3a704731..ec848787d7d9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -420,7 +420,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
420 420
421 if (ASIC_IS_DCE5(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
423 args.v3.ucSpreadSpectrumType = ss->type; 423 args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
424 switch (pll_id) { 424 switch (pll_id) {
425 case ATOM_PPLL1: 425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
@@ -440,10 +440,12 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
440 case ATOM_PPLL_INVALID: 440 case ATOM_PPLL_INVALID:
441 return; 441 return;
442 } 442 }
443 args.v2.ucEnable = enable; 443 args.v3.ucEnable = enable;
444 if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
445 args.v3.ucEnable = ATOM_DISABLE;
444 } else if (ASIC_IS_DCE4(rdev)) { 446 } else if (ASIC_IS_DCE4(rdev)) {
445 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 447 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
446 args.v2.ucSpreadSpectrumType = ss->type; 448 args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
447 switch (pll_id) { 449 switch (pll_id) {
448 case ATOM_PPLL1: 450 case ATOM_PPLL1:
449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 451 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
@@ -464,32 +466,36 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
464 return; 466 return;
465 } 467 }
466 args.v2.ucEnable = enable; 468 args.v2.ucEnable = enable;
469 if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
470 args.v2.ucEnable = ATOM_DISABLE;
467 } else if (ASIC_IS_DCE3(rdev)) { 471 } else if (ASIC_IS_DCE3(rdev)) {
468 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 472 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
469 args.v1.ucSpreadSpectrumType = ss->type; 473 args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
470 args.v1.ucSpreadSpectrumStep = ss->step; 474 args.v1.ucSpreadSpectrumStep = ss->step;
471 args.v1.ucSpreadSpectrumDelay = ss->delay; 475 args.v1.ucSpreadSpectrumDelay = ss->delay;
472 args.v1.ucSpreadSpectrumRange = ss->range; 476 args.v1.ucSpreadSpectrumRange = ss->range;
473 args.v1.ucPpll = pll_id; 477 args.v1.ucPpll = pll_id;
474 args.v1.ucEnable = enable; 478 args.v1.ucEnable = enable;
475 } else if (ASIC_IS_AVIVO(rdev)) { 479 } else if (ASIC_IS_AVIVO(rdev)) {
476 if (enable == ATOM_DISABLE) { 480 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
481 (ss->type & ATOM_EXTERNAL_SS_MASK)) {
477 atombios_disable_ss(crtc); 482 atombios_disable_ss(crtc);
478 return; 483 return;
479 } 484 }
480 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 485 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
481 args.lvds_ss_2.ucSpreadSpectrumType = ss->type; 486 args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
482 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; 487 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
483 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; 488 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
484 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; 489 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
485 args.lvds_ss_2.ucEnable = enable; 490 args.lvds_ss_2.ucEnable = enable;
486 } else { 491 } else {
487 if (enable == ATOM_DISABLE) { 492 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
493 (ss->type & ATOM_EXTERNAL_SS_MASK)) {
488 atombios_disable_ss(crtc); 494 atombios_disable_ss(crtc);
489 return; 495 return;
490 } 496 }
491 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 497 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
492 args.lvds_ss.ucSpreadSpectrumType = ss->type; 498 args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
493 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; 499 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
494 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; 500 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
495 args.lvds_ss.ucEnable = enable; 501 args.lvds_ss.ucEnable = enable;
@@ -512,6 +518,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
512 struct radeon_device *rdev = dev->dev_private; 518 struct radeon_device *rdev = dev->dev_private;
513 struct drm_encoder *encoder = NULL; 519 struct drm_encoder *encoder = NULL;
514 struct radeon_encoder *radeon_encoder = NULL; 520 struct radeon_encoder *radeon_encoder = NULL;
521 struct drm_connector *connector = NULL;
515 u32 adjusted_clock = mode->clock; 522 u32 adjusted_clock = mode->clock;
516 int encoder_mode = 0; 523 int encoder_mode = 0;
517 u32 dp_clock = mode->clock; 524 u32 dp_clock = mode->clock;
@@ -546,9 +553,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
546 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
547 if (encoder->crtc == crtc) { 554 if (encoder->crtc == crtc) {
548 radeon_encoder = to_radeon_encoder(encoder); 555 radeon_encoder = to_radeon_encoder(encoder);
556 connector = radeon_get_connector_for_encoder(encoder);
557 if (connector)
558 bpc = connector->display_info.bpc;
549 encoder_mode = atombios_get_encoder_mode(encoder); 559 encoder_mode = atombios_get_encoder_mode(encoder);
550 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { 560 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
551 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 561 radeon_encoder_is_dp_bridge(encoder)) {
552 if (connector) { 562 if (connector) {
553 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 563 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
554 struct radeon_connector_atom_dig *dig_connector = 564 struct radeon_connector_atom_dig *dig_connector =
@@ -612,7 +622,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
612 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 622 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
613 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 623 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
614 args.v1.ucEncodeMode = encoder_mode; 624 args.v1.ucEncodeMode = encoder_mode;
615 if (ss_enabled) 625 if (ss_enabled && ss->percentage)
616 args.v1.ucConfig |= 626 args.v1.ucConfig |=
617 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 627 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
618 628
@@ -625,10 +635,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
625 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 635 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
626 args.v3.sInput.ucEncodeMode = encoder_mode; 636 args.v3.sInput.ucEncodeMode = encoder_mode;
627 args.v3.sInput.ucDispPllConfig = 0; 637 args.v3.sInput.ucDispPllConfig = 0;
628 if (ss_enabled) 638 if (ss_enabled && ss->percentage)
629 args.v3.sInput.ucDispPllConfig |= 639 args.v3.sInput.ucDispPllConfig |=
630 DISPPLL_CONFIG_SS_ENABLE; 640 DISPPLL_CONFIG_SS_ENABLE;
631 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 641 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
642 radeon_encoder_is_dp_bridge(encoder)) {
632 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 643 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
633 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 644 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
634 args.v3.sInput.ucDispPllConfig |= 645 args.v3.sInput.ucDispPllConfig |=
@@ -754,7 +765,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
754 u32 ref_div, 765 u32 ref_div,
755 u32 fb_div, 766 u32 fb_div,
756 u32 frac_fb_div, 767 u32 frac_fb_div,
757 u32 post_div) 768 u32 post_div,
769 int bpc,
770 bool ss_enabled,
771 struct radeon_atom_ss *ss)
758{ 772{
759 struct drm_device *dev = crtc->dev; 773 struct drm_device *dev = crtc->dev;
760 struct radeon_device *rdev = dev->dev_private; 774 struct radeon_device *rdev = dev->dev_private;
@@ -801,6 +815,8 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
801 args.v3.ucPostDiv = post_div; 815 args.v3.ucPostDiv = post_div;
802 args.v3.ucPpll = pll_id; 816 args.v3.ucPpll = pll_id;
803 args.v3.ucMiscInfo = (pll_id << 2); 817 args.v3.ucMiscInfo = (pll_id << 2);
818 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
819 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
804 args.v3.ucTransmitterId = encoder_id; 820 args.v3.ucTransmitterId = encoder_id;
805 args.v3.ucEncoderMode = encoder_mode; 821 args.v3.ucEncoderMode = encoder_mode;
806 break; 822 break;
@@ -812,6 +828,17 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
812 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 828 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
813 args.v5.ucPostDiv = post_div; 829 args.v5.ucPostDiv = post_div;
814 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ 830 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
831 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
832 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
833 switch (bpc) {
834 case 8:
835 default:
836 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
837 break;
838 case 10:
839 args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
840 break;
841 }
815 args.v5.ucTransmitterID = encoder_id; 842 args.v5.ucTransmitterID = encoder_id;
816 args.v5.ucEncoderMode = encoder_mode; 843 args.v5.ucEncoderMode = encoder_mode;
817 args.v5.ucPpll = pll_id; 844 args.v5.ucPpll = pll_id;
@@ -824,6 +851,23 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
824 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 851 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
825 args.v6.ucPostDiv = post_div; 852 args.v6.ucPostDiv = post_div;
826 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ 853 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
854 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
855 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
856 switch (bpc) {
857 case 8:
858 default:
859 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
860 break;
861 case 10:
862 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
863 break;
864 case 12:
865 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
866 break;
867 case 16:
868 args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
869 break;
870 }
827 args.v6.ucTransmitterID = encoder_id; 871 args.v6.ucTransmitterID = encoder_id;
828 args.v6.ucEncoderMode = encoder_mode; 872 args.v6.ucEncoderMode = encoder_mode;
829 args.v6.ucPpll = pll_id; 873 args.v6.ucPpll = pll_id;
@@ -855,6 +899,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
855 int encoder_mode = 0; 899 int encoder_mode = 0;
856 struct radeon_atom_ss ss; 900 struct radeon_atom_ss ss;
857 bool ss_enabled = false; 901 bool ss_enabled = false;
902 int bpc = 8;
858 903
859 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 904 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
860 if (encoder->crtc == crtc) { 905 if (encoder->crtc == crtc) {
@@ -891,41 +936,30 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
891 struct radeon_connector_atom_dig *dig_connector = 936 struct radeon_connector_atom_dig *dig_connector =
892 radeon_connector->con_priv; 937 radeon_connector->con_priv;
893 int dp_clock; 938 int dp_clock;
939 bpc = connector->display_info.bpc;
894 940
895 switch (encoder_mode) { 941 switch (encoder_mode) {
896 case ATOM_ENCODER_MODE_DP: 942 case ATOM_ENCODER_MODE_DP:
897 /* DP/eDP */ 943 /* DP/eDP */
898 dp_clock = dig_connector->dp_clock / 10; 944 dp_clock = dig_connector->dp_clock / 10;
899 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 945 if (ASIC_IS_DCE4(rdev))
900 if (ASIC_IS_DCE4(rdev)) 946 ss_enabled =
901 ss_enabled = 947 radeon_atombios_get_asic_ss_info(rdev, &ss,
902 radeon_atombios_get_asic_ss_info(rdev, &ss, 948 ASIC_INTERNAL_SS_ON_DP,
903 dig->lcd_ss_id, 949 dp_clock);
904 dp_clock); 950 else {
905 else 951 if (dp_clock == 16200) {
906 ss_enabled = 952 ss_enabled =
907 radeon_atombios_get_ppll_ss_info(rdev, &ss, 953 radeon_atombios_get_ppll_ss_info(rdev, &ss,
908 dig->lcd_ss_id); 954 ATOM_DP_SS_ID2);
909 } else { 955 if (!ss_enabled)
910 if (ASIC_IS_DCE4(rdev))
911 ss_enabled =
912 radeon_atombios_get_asic_ss_info(rdev, &ss,
913 ASIC_INTERNAL_SS_ON_DP,
914 dp_clock);
915 else {
916 if (dp_clock == 16200) {
917 ss_enabled =
918 radeon_atombios_get_ppll_ss_info(rdev, &ss,
919 ATOM_DP_SS_ID2);
920 if (!ss_enabled)
921 ss_enabled =
922 radeon_atombios_get_ppll_ss_info(rdev, &ss,
923 ATOM_DP_SS_ID1);
924 } else
925 ss_enabled = 956 ss_enabled =
926 radeon_atombios_get_ppll_ss_info(rdev, &ss, 957 radeon_atombios_get_ppll_ss_info(rdev, &ss,
927 ATOM_DP_SS_ID1); 958 ATOM_DP_SS_ID1);
928 } 959 } else
960 ss_enabled =
961 radeon_atombios_get_ppll_ss_info(rdev, &ss,
962 ATOM_DP_SS_ID1);
929 } 963 }
930 break; 964 break;
931 case ATOM_ENCODER_MODE_LVDS: 965 case ATOM_ENCODER_MODE_LVDS:
@@ -974,7 +1008,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
974 1008
975 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1009 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
976 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1010 encoder_mode, radeon_encoder->encoder_id, mode->clock,
977 ref_div, fb_div, frac_fb_div, post_div); 1011 ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
978 1012
979 if (ss_enabled) { 1013 if (ss_enabled) {
980 /* calculate ss amount and step size */ 1014 /* calculate ss amount and step size */
@@ -982,7 +1016,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
982 u32 step_size; 1016 u32 step_size;
983 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; 1017 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
984 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; 1018 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
985 ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 1019 ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
986 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; 1020 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
987 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) 1021 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
988 step_size = (4 * amount * ref_div * (ss.rate * 2048)) / 1022 step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
@@ -1395,11 +1429,19 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1395 uint32_t pll_in_use = 0; 1429 uint32_t pll_in_use = 0;
1396 1430
1397 if (ASIC_IS_DCE4(rdev)) { 1431 if (ASIC_IS_DCE4(rdev)) {
1398 /* if crtc is driving DP and we have an ext clock, use that */
1399 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1432 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1400 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1433 if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
1434 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1435 * depending on the asic:
1436 * DCE4: PPLL or ext clock
1437 * DCE5: DCPLL or ext clock
1438 *
1439 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
1440 * PPLL/DCPLL programming and only program the DP DTO for the
1441 * crtc virtual pixel clock.
1442 */
1401 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { 1443 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
1402 if (rdev->clock.dp_extclk) 1444 if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
1403 return ATOM_PPLL_INVALID; 1445 return ATOM_PPLL_INVALID;
1404 } 1446 }
1405 } 1447 }
@@ -1515,6 +1557,8 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
1515static void atombios_crtc_disable(struct drm_crtc *crtc) 1557static void atombios_crtc_disable(struct drm_crtc *crtc)
1516{ 1558{
1517 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1559 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1560 struct radeon_atom_ss ss;
1561
1518 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1562 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1519 1563
1520 switch (radeon_crtc->pll_id) { 1564 switch (radeon_crtc->pll_id) {
@@ -1522,7 +1566,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1522 case ATOM_PPLL2: 1566 case ATOM_PPLL2:
1523 /* disable the ppll */ 1567 /* disable the ppll */
1524 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1568 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1525 0, 0, ATOM_DISABLE, 0, 0, 0, 0); 1569 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
1526 break; 1570 break;
1527 default: 1571 default:
1528 break; 1572 break;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 695de9a38506..8c0f9e36ff8e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -43,158 +43,242 @@ static char *pre_emph_names[] = {
43 "0dB", "3.5dB", "6dB", "9.5dB" 43 "0dB", "3.5dB", "6dB", "9.5dB"
44}; 44};
45 45
46static const int dp_clocks[] = { 46/***** radeon AUX functions *****/
47 54000, /* 1 lane, 1.62 Ghz */ 47union aux_channel_transaction {
48 90000, /* 1 lane, 2.70 Ghz */ 48 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
49 108000, /* 2 lane, 1.62 Ghz */ 49 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
50 180000, /* 2 lane, 2.70 Ghz */
51 216000, /* 4 lane, 1.62 Ghz */
52 360000, /* 4 lane, 2.70 Ghz */
53}; 50};
54 51
55static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); 52static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
53 u8 *send, int send_bytes,
54 u8 *recv, int recv_size,
55 u8 delay, u8 *ack)
56{
57 struct drm_device *dev = chan->dev;
58 struct radeon_device *rdev = dev->dev_private;
59 union aux_channel_transaction args;
60 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
61 unsigned char *base;
62 int recv_bytes;
63
64 memset(&args, 0, sizeof(args));
56 65
57/* common helper functions */ 66 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
58static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 67
68 memcpy(base, send, send_bytes);
69
70 args.v1.lpAuxRequest = 0;
71 args.v1.lpDataOut = 16;
72 args.v1.ucDataOutLen = 0;
73 args.v1.ucChannelID = chan->rec.i2c_id;
74 args.v1.ucDelay = delay / 10;
75 if (ASIC_IS_DCE4(rdev))
76 args.v2.ucHPD_ID = chan->rec.hpd;
77
78 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
79
80 *ack = args.v1.ucReplyStatus;
81
82 /* timeout */
83 if (args.v1.ucReplyStatus == 1) {
84 DRM_DEBUG_KMS("dp_aux_ch timeout\n");
85 return -ETIMEDOUT;
86 }
87
88 /* flags not zero */
89 if (args.v1.ucReplyStatus == 2) {
90 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
91 return -EBUSY;
92 }
93
94 /* error */
95 if (args.v1.ucReplyStatus == 3) {
96 DRM_DEBUG_KMS("dp_aux_ch error\n");
97 return -EIO;
98 }
99
100 recv_bytes = args.v1.ucDataOutLen;
101 if (recv_bytes > recv_size)
102 recv_bytes = recv_size;
103
104 if (recv && recv_size)
105 memcpy(recv, base + 16, recv_bytes);
106
107 return recv_bytes;
108}
109
110static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
111 u16 address, u8 *send, u8 send_bytes, u8 delay)
59{ 112{
60 int i; 113 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
61 u8 max_link_bw; 114 int ret;
62 u8 max_lane_count; 115 u8 msg[20];
116 int msg_bytes = send_bytes + 4;
117 u8 ack;
63 118
64 if (!dpcd) 119 if (send_bytes > 16)
65 return 0; 120 return -1;
66 121
67 max_link_bw = dpcd[DP_MAX_LINK_RATE]; 122 msg[0] = address;
68 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 123 msg[1] = address >> 8;
124 msg[2] = AUX_NATIVE_WRITE << 4;
125 msg[3] = (msg_bytes << 4) | (send_bytes - 1);
126 memcpy(&msg[4], send, send_bytes);
69 127
70 switch (max_link_bw) { 128 while (1) {
71 case DP_LINK_BW_1_62: 129 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
72 default: 130 msg, msg_bytes, NULL, 0, delay, &ack);
73 for (i = 0; i < num_dp_clocks; i++) { 131 if (ret < 0)
74 if (i % 2) 132 return ret;
75 continue; 133 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
76 switch (max_lane_count) { 134 break;
77 case 1: 135 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
78 if (i > 1) 136 udelay(400);
79 return 0; 137 else
80 break; 138 return -EIO;
81 case 2:
82 if (i > 3)
83 return 0;
84 break;
85 case 4:
86 default:
87 break;
88 }
89 if (dp_clocks[i] > mode_clock) {
90 if (i < 2)
91 return 1;
92 else if (i < 4)
93 return 2;
94 else
95 return 4;
96 }
97 }
98 break;
99 case DP_LINK_BW_2_7:
100 for (i = 0; i < num_dp_clocks; i++) {
101 switch (max_lane_count) {
102 case 1:
103 if (i > 1)
104 return 0;
105 break;
106 case 2:
107 if (i > 3)
108 return 0;
109 break;
110 case 4:
111 default:
112 break;
113 }
114 if (dp_clocks[i] > mode_clock) {
115 if (i < 2)
116 return 1;
117 else if (i < 4)
118 return 2;
119 else
120 return 4;
121 }
122 }
123 break;
124 } 139 }
125 140
126 return 0; 141 return send_bytes;
127} 142}
128 143
129static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 144static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
145 u16 address, u8 *recv, int recv_bytes, u8 delay)
130{ 146{
131 int i; 147 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
132 u8 max_link_bw; 148 u8 msg[4];
133 u8 max_lane_count; 149 int msg_bytes = 4;
150 u8 ack;
151 int ret;
134 152
135 if (!dpcd) 153 msg[0] = address;
136 return 0; 154 msg[1] = address >> 8;
155 msg[2] = AUX_NATIVE_READ << 4;
156 msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
157
158 while (1) {
159 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
160 msg, msg_bytes, recv, recv_bytes, delay, &ack);
161 if (ret == 0)
162 return -EPROTO;
163 if (ret < 0)
164 return ret;
165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
166 return ret;
167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
168 udelay(400);
169 else
170 return -EIO;
171 }
172}
137 173
138 max_link_bw = dpcd[DP_MAX_LINK_RATE]; 174static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
139 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; 175 u16 reg, u8 val)
176{
177 radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
178}
140 179
141 switch (max_link_bw) { 180static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
142 case DP_LINK_BW_1_62: 181 u16 reg)
182{
183 u8 val = 0;
184
185 radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
186
187 return val;
188}
189
190int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
191 u8 write_byte, u8 *read_byte)
192{
193 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
194 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
195 u16 address = algo_data->address;
196 u8 msg[5];
197 u8 reply[2];
198 unsigned retry;
199 int msg_bytes;
200 int reply_bytes = 1;
201 int ret;
202 u8 ack;
203
204 /* Set up the command byte */
205 if (mode & MODE_I2C_READ)
206 msg[2] = AUX_I2C_READ << 4;
207 else
208 msg[2] = AUX_I2C_WRITE << 4;
209
210 if (!(mode & MODE_I2C_STOP))
211 msg[2] |= AUX_I2C_MOT << 4;
212
213 msg[0] = address;
214 msg[1] = address >> 8;
215
216 switch (mode) {
217 case MODE_I2C_WRITE:
218 msg_bytes = 5;
219 msg[3] = msg_bytes << 4;
220 msg[4] = write_byte;
221 break;
222 case MODE_I2C_READ:
223 msg_bytes = 4;
224 msg[3] = msg_bytes << 4;
225 break;
143 default: 226 default:
144 for (i = 0; i < num_dp_clocks; i++) { 227 msg_bytes = 4;
145 if (i % 2) 228 msg[3] = 3 << 4;
146 continue;
147 switch (max_lane_count) {
148 case 1:
149 if (i > 1)
150 return 0;
151 break;
152 case 2:
153 if (i > 3)
154 return 0;
155 break;
156 case 4:
157 default:
158 break;
159 }
160 if (dp_clocks[i] > mode_clock)
161 return 162000;
162 }
163 break; 229 break;
164 case DP_LINK_BW_2_7:
165 for (i = 0; i < num_dp_clocks; i++) {
166 switch (max_lane_count) {
167 case 1:
168 if (i > 1)
169 return 0;
170 break;
171 case 2:
172 if (i > 3)
173 return 0;
174 break;
175 case 4:
176 default:
177 break;
178 }
179 if (dp_clocks[i] > mode_clock)
180 return (i % 2) ? 270000 : 162000;
181 }
182 } 230 }
183 231
184 return 0; 232 for (retry = 0; retry < 4; retry++) {
185} 233 ret = radeon_process_aux_ch(auxch,
234 msg, msg_bytes, reply, reply_bytes, 0, &ack);
235 if (ret < 0) {
236 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
237 return ret;
238 }
186 239
187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 240 switch (ack & AUX_NATIVE_REPLY_MASK) {
188{ 241 case AUX_NATIVE_REPLY_ACK:
189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); 242 /* I2C-over-AUX Reply field is only valid
190 int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock); 243 * when paired with AUX ACK.
244 */
245 break;
246 case AUX_NATIVE_REPLY_NACK:
247 DRM_DEBUG_KMS("aux_ch native nack\n");
248 return -EREMOTEIO;
249 case AUX_NATIVE_REPLY_DEFER:
250 DRM_DEBUG_KMS("aux_ch native defer\n");
251 udelay(400);
252 continue;
253 default:
254 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
255 return -EREMOTEIO;
256 }
191 257
192 if ((lanes == 0) || (dp_clock == 0)) 258 switch (ack & AUX_I2C_REPLY_MASK) {
193 return MODE_CLOCK_HIGH; 259 case AUX_I2C_REPLY_ACK:
260 if (mode == MODE_I2C_READ)
261 *read_byte = reply[0];
262 return ret;
263 case AUX_I2C_REPLY_NACK:
264 DRM_DEBUG_KMS("aux_i2c nack\n");
265 return -EREMOTEIO;
266 case AUX_I2C_REPLY_DEFER:
267 DRM_DEBUG_KMS("aux_i2c defer\n");
268 udelay(400);
269 break;
270 default:
271 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
272 return -EREMOTEIO;
273 }
274 }
194 275
195 return MODE_OK; 276 DRM_ERROR("aux i2c too many retries, giving up\n");
277 return -EREMOTEIO;
196} 278}
197 279
280/***** general DP utility functions *****/
281
198static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) 282static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
199{ 283{
200 return link_status[r - DP_LANE0_1_STATUS]; 284 return link_status[r - DP_LANE0_1_STATUS];
@@ -242,7 +326,7 @@ static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
242 return true; 326 return true;
243} 327}
244 328
245static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 329static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane) 330 int lane)
247 331
248{ 332{
@@ -255,7 +339,7 @@ static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE]
255 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 339 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
256} 340}
257 341
258static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 342static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
259 int lane) 343 int lane)
260{ 344{
261 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 345 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -267,22 +351,8 @@ static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_
267 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 351 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
268} 352}
269 353
270/* XXX fix me -- chip specific */
271#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 354#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
272static u8 dp_pre_emphasis_max(u8 voltage_swing) 355#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
273{
274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
275 case DP_TRAIN_VOLTAGE_SWING_400:
276 return DP_TRAIN_PRE_EMPHASIS_6;
277 case DP_TRAIN_VOLTAGE_SWING_600:
278 return DP_TRAIN_PRE_EMPHASIS_6;
279 case DP_TRAIN_VOLTAGE_SWING_800:
280 return DP_TRAIN_PRE_EMPHASIS_3_5;
281 case DP_TRAIN_VOLTAGE_SWING_1200:
282 default:
283 return DP_TRAIN_PRE_EMPHASIS_0;
284 }
285}
286 356
287static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], 357static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
288 int lane_count, 358 int lane_count,
@@ -308,10 +378,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
308 } 378 }
309 379
310 if (v >= DP_VOLTAGE_MAX) 380 if (v >= DP_VOLTAGE_MAX)
311 v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 381 v |= DP_TRAIN_MAX_SWING_REACHED;
312 382
313 if (p >= dp_pre_emphasis_max(v)) 383 if (p >= DP_PRE_EMPHASIS_MAX)
314 p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 384 p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
315 385
316 DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", 386 DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
317 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], 387 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
@@ -321,110 +391,109 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
321 train_set[lane] = v | p; 391 train_set[lane] = v | p;
322} 392}
323 393
324union aux_channel_transaction { 394/* convert bits per color to bits per pixel */
325 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; 395/* get bpc from the EDID */
326 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; 396static int convert_bpc_to_bpp(int bpc)
327};
328
329/* radeon aux chan functions */
330bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
331 int num_bytes, u8 *read_byte,
332 u8 read_buf_len, u8 delay)
333{ 397{
334 struct drm_device *dev = chan->dev; 398 if (bpc == 0)
335 struct radeon_device *rdev = dev->dev_private; 399 return 24;
336 union aux_channel_transaction args; 400 else
337 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 401 return bpc * 3;
338 unsigned char *base; 402}
339 int retry_count = 0;
340
341 memset(&args, 0, sizeof(args));
342
343 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
344
345retry:
346 memcpy(base, req_bytes, num_bytes);
347
348 args.v1.lpAuxRequest = 0;
349 args.v1.lpDataOut = 16;
350 args.v1.ucDataOutLen = 0;
351 args.v1.ucChannelID = chan->rec.i2c_id;
352 args.v1.ucDelay = delay / 10;
353 if (ASIC_IS_DCE4(rdev))
354 args.v2.ucHPD_ID = chan->rec.hpd;
355 403
356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 404/* get the max pix clock supported by the link rate and lane num */
405static int dp_get_max_dp_pix_clock(int link_rate,
406 int lane_num,
407 int bpp)
408{
409 return (link_rate * lane_num * 8) / bpp;
410}
357 411
358 if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { 412static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
359 if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) 413{
360 goto retry; 414 switch (dpcd[DP_MAX_LINK_RATE]) {
361 DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 415 case DP_LINK_BW_1_62:
362 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 416 default:
363 chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); 417 return 162000;
364 return false; 418 case DP_LINK_BW_2_7:
419 return 270000;
420 case DP_LINK_BW_5_4:
421 return 540000;
365 } 422 }
423}
366 424
367 if (args.v1.ucDataOutLen && read_byte && read_buf_len) { 425static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
368 if (read_buf_len < args.v1.ucDataOutLen) { 426{
369 DRM_ERROR("Buffer to small for return answer %d %d\n", 427 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
370 read_buf_len, args.v1.ucDataOutLen);
371 return false;
372 }
373 {
374 int len = min(read_buf_len, args.v1.ucDataOutLen);
375 memcpy(read_byte, base + 16, len);
376 }
377 }
378 return true;
379} 428}
380 429
381bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, 430static u8 dp_get_dp_link_rate_coded(int link_rate)
382 uint8_t send_bytes, uint8_t *send)
383{ 431{
384 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 432 switch (link_rate) {
385 u8 msg[20]; 433 case 162000:
386 u8 msg_len, dp_msg_len; 434 default:
387 bool ret; 435 return DP_LINK_BW_1_62;
436 case 270000:
437 return DP_LINK_BW_2_7;
438 case 540000:
439 return DP_LINK_BW_5_4;
440 }
441}
388 442
389 dp_msg_len = 4; 443/***** radeon specific DP functions *****/
390 msg[0] = address;
391 msg[1] = address >> 8;
392 msg[2] = AUX_NATIVE_WRITE << 4;
393 dp_msg_len += send_bytes;
394 msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
395 444
396 if (send_bytes > 16) 445/* First get the min lane# when low rate is used according to pixel clock
397 return false; 446 * (prefer low rate), second check max lane# supported by DP panel,
447 * if the max lane# < low rate lane# then use max lane# instead.
448 */
449static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
450 u8 dpcd[DP_DPCD_SIZE],
451 int pix_clock)
452{
453 int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
454 int max_link_rate = dp_get_max_link_rate(dpcd);
455 int max_lane_num = dp_get_max_lane_number(dpcd);
456 int lane_num;
457 int max_dp_pix_clock;
458
459 for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
460 max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
461 if (pix_clock <= max_dp_pix_clock)
462 break;
463 }
398 464
399 memcpy(&msg[4], send, send_bytes); 465 return lane_num;
400 msg_len = 4 + send_bytes;
401 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
402 return ret;
403} 466}
404 467
405bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, 468static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
406 uint8_t delay, uint8_t expected_bytes, 469 u8 dpcd[DP_DPCD_SIZE],
407 uint8_t *read_p) 470 int pix_clock)
408{ 471{
409 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 472 int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
410 u8 msg[20]; 473 int lane_num, max_pix_clock;
411 u8 msg_len, dp_msg_len; 474
412 bool ret = false; 475 if (radeon_connector_encoder_is_dp_bridge(connector))
413 msg_len = 4; 476 return 270000;
414 dp_msg_len = 4; 477
415 msg[0] = address; 478 lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
416 msg[1] = address >> 8; 479 max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
417 msg[2] = AUX_NATIVE_READ << 4; 480 if (pix_clock <= max_pix_clock)
418 msg[3] = (dp_msg_len) << 4; 481 return 162000;
419 msg[3] |= expected_bytes - 1; 482 max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
483 if (pix_clock <= max_pix_clock)
484 return 270000;
485 if (radeon_connector_is_dp12_capable(connector)) {
486 max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
487 if (pix_clock <= max_pix_clock)
488 return 540000;
489 }
420 490
421 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); 491 return dp_get_max_link_rate(dpcd);
422 return ret;
423} 492}
424 493
425/* radeon dp functions */ 494static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
426static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, 495 int action, int dp_clock,
427 uint8_t ucconfig, uint8_t lane_num) 496 u8 ucconfig, u8 lane_num)
428{ 497{
429 DP_ENCODER_SERVICE_PARAMETERS args; 498 DP_ENCODER_SERVICE_PARAMETERS args;
430 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); 499 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
@@ -454,60 +523,86 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
454{ 523{
455 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 524 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
456 u8 msg[25]; 525 u8 msg[25];
457 int ret; 526 int ret, i;
458 527
459 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); 528 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
460 if (ret) { 529 if (ret > 0) {
461 memcpy(dig_connector->dpcd, msg, 8); 530 memcpy(dig_connector->dpcd, msg, 8);
462 { 531 DRM_DEBUG_KMS("DPCD: ");
463 int i; 532 for (i = 0; i < 8; i++)
464 DRM_DEBUG_KMS("DPCD: "); 533 DRM_DEBUG_KMS("%02x ", msg[i]);
465 for (i = 0; i < 8; i++) 534 DRM_DEBUG_KMS("\n");
466 DRM_DEBUG_KMS("%02x ", msg[i]);
467 DRM_DEBUG_KMS("\n");
468 }
469 return true; 535 return true;
470 } 536 }
471 dig_connector->dpcd[0] = 0; 537 dig_connector->dpcd[0] = 0;
472 return false; 538 return false;
473} 539}
474 540
541static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
542 struct drm_connector *connector)
543{
544 struct drm_device *dev = encoder->dev;
545 struct radeon_device *rdev = dev->dev_private;
546 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
547
548 if (!ASIC_IS_DCE4(rdev))
549 return;
550
551 if (radeon_connector_encoder_is_dp_bridge(connector))
552 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
553
554 atombios_dig_encoder_setup(encoder,
555 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
556 panel_mode);
557}
558
475void radeon_dp_set_link_config(struct drm_connector *connector, 559void radeon_dp_set_link_config(struct drm_connector *connector,
476 struct drm_display_mode *mode) 560 struct drm_display_mode *mode)
477{ 561{
478 struct radeon_connector *radeon_connector; 562 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 struct radeon_connector_atom_dig *dig_connector; 563 struct radeon_connector_atom_dig *dig_connector;
480 564
481 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
482 (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
483 return;
484
485 radeon_connector = to_radeon_connector(connector);
486 if (!radeon_connector->con_priv) 565 if (!radeon_connector->con_priv)
487 return; 566 return;
488 dig_connector = radeon_connector->con_priv; 567 dig_connector = radeon_connector->con_priv;
489 568
490 dig_connector->dp_clock = 569 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
491 dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); 570 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
492 dig_connector->dp_lane_count = 571 dig_connector->dp_clock =
493 dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); 572 radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
573 dig_connector->dp_lane_count =
574 radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
575 }
494} 576}
495 577
496int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, 578int radeon_dp_mode_valid_helper(struct drm_connector *connector,
497 struct drm_display_mode *mode) 579 struct drm_display_mode *mode)
498{ 580{
499 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 581 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
582 struct radeon_connector_atom_dig *dig_connector;
583 int dp_clock;
584
585 if (!radeon_connector->con_priv)
586 return MODE_CLOCK_HIGH;
587 dig_connector = radeon_connector->con_priv;
588
589 dp_clock =
590 radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
591
592 if ((dp_clock == 540000) &&
593 (!radeon_connector_is_dp12_capable(connector)))
594 return MODE_CLOCK_HIGH;
500 595
501 return dp_mode_valid(dig_connector->dpcd, mode->clock); 596 return MODE_OK;
502} 597}
503 598
504static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, 599static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
505 u8 link_status[DP_LINK_STATUS_SIZE]) 600 u8 link_status[DP_LINK_STATUS_SIZE])
506{ 601{
507 int ret; 602 int ret;
508 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, 603 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
509 DP_LINK_STATUS_SIZE, link_status); 604 link_status, DP_LINK_STATUS_SIZE, 100);
510 if (!ret) { 605 if (ret <= 0) {
511 DRM_ERROR("displayport link status failed\n"); 606 DRM_ERROR("displayport link status failed\n");
512 return false; 607 return false;
513 } 608 }
@@ -518,292 +613,309 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
518 return true; 613 return true;
519} 614}
520 615
521bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) 616struct radeon_dp_link_train_info {
522{ 617 struct radeon_device *rdev;
523 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 618 struct drm_encoder *encoder;
619 struct drm_connector *connector;
620 struct radeon_connector *radeon_connector;
621 int enc_id;
622 int dp_clock;
623 int dp_lane_count;
624 int rd_interval;
625 bool tp3_supported;
626 u8 dpcd[8];
627 u8 train_set[4];
524 u8 link_status[DP_LINK_STATUS_SIZE]; 628 u8 link_status[DP_LINK_STATUS_SIZE];
629 u8 tries;
630};
525 631
526 if (!atom_dp_get_link_status(radeon_connector, link_status)) 632static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
527 return false; 633{
528 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) 634 /* set the initial vs/emph on the source */
529 return false; 635 atombios_dig_transmitter_setup(dp_info->encoder,
530 return true; 636 ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
637 0, dp_info->train_set[0]); /* sets all lanes at once */
638
639 /* set the vs/emph on the sink */
640 radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
641 dp_info->train_set, dp_info->dp_lane_count, 0);
531} 642}
532 643
533static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) 644static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
534{ 645{
535 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 646 int rtp = 0;
536 647
537 if (dig_connector->dpcd[0] >= 0x11) { 648 /* set training pattern on the source */
538 radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, 649 if (ASIC_IS_DCE4(dp_info->rdev)) {
539 &power_state); 650 switch (tp) {
651 case DP_TRAINING_PATTERN_1:
652 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
653 break;
654 case DP_TRAINING_PATTERN_2:
655 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
656 break;
657 case DP_TRAINING_PATTERN_3:
658 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
659 break;
660 }
661 atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
662 } else {
663 switch (tp) {
664 case DP_TRAINING_PATTERN_1:
665 rtp = 0;
666 break;
667 case DP_TRAINING_PATTERN_2:
668 rtp = 1;
669 break;
670 }
671 radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
672 dp_info->dp_clock, dp_info->enc_id, rtp);
540 } 673 }
541}
542 674
543static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) 675 /* enable training pattern on the sink */
544{ 676 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
545 radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
546 &downspread);
547} 677}
548 678
549static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, 679static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
550 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
551{ 680{
552 radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, 681 u8 tmp;
553 link_configuration);
554}
555 682
556static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, 683 /* power up the sink */
557 struct drm_encoder *encoder, 684 if (dp_info->dpcd[0] >= 0x11)
558 u8 train_set[4]) 685 radeon_write_dpcd_reg(dp_info->radeon_connector,
559{ 686 DP_SET_POWER, DP_SET_POWER_D0);
560 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 687
561 int i; 688 /* possibly enable downspread on the sink */
689 if (dp_info->dpcd[3] & 0x1)
690 radeon_write_dpcd_reg(dp_info->radeon_connector,
691 DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
692 else
693 radeon_write_dpcd_reg(dp_info->radeon_connector,
694 DP_DOWNSPREAD_CTRL, 0);
562 695
563 for (i = 0; i < dig_connector->dp_lane_count; i++) 696 radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
564 atombios_dig_transmitter_setup(encoder,
565 ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
566 i, train_set[i]);
567 697
568 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, 698 /* set the lane count on the sink */
569 dig_connector->dp_lane_count, train_set); 699 tmp = dp_info->dp_lane_count;
570} 700 if (dp_info->dpcd[0] >= 0x11)
701 tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
702 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
571 703
572static void dp_set_training(struct radeon_connector *radeon_connector, 704 /* set the link rate on the sink */
573 u8 training) 705 tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
574{ 706 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
575 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
576 1, &training);
577}
578 707
579void dp_link_train(struct drm_encoder *encoder, 708 /* start training on the source */
580 struct drm_connector *connector) 709 if (ASIC_IS_DCE4(dp_info->rdev))
581{ 710 atombios_dig_encoder_setup(dp_info->encoder,
582 struct drm_device *dev = encoder->dev; 711 ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
583 struct radeon_device *rdev = dev->dev_private; 712 else
584 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 713 radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
585 struct radeon_encoder_atom_dig *dig; 714 dp_info->dp_clock, dp_info->enc_id, 0);
586 struct radeon_connector *radeon_connector;
587 struct radeon_connector_atom_dig *dig_connector;
588 int enc_id = 0;
589 bool clock_recovery, channel_eq;
590 u8 link_status[DP_LINK_STATUS_SIZE];
591 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
592 u8 tries, voltage;
593 u8 train_set[4];
594 int i;
595 715
596 if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && 716 /* disable the training pattern on the sink */
597 (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) 717 radeon_write_dpcd_reg(dp_info->radeon_connector,
598 return; 718 DP_TRAINING_PATTERN_SET,
719 DP_TRAINING_PATTERN_DISABLE);
599 720
600 if (!radeon_encoder->enc_priv) 721 return 0;
601 return; 722}
602 dig = radeon_encoder->enc_priv;
603 723
604 radeon_connector = to_radeon_connector(connector); 724static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
605 if (!radeon_connector->con_priv) 725{
606 return; 726 udelay(400);
607 dig_connector = radeon_connector->con_priv;
608 727
609 if (dig->dig_encoder) 728 /* disable the training pattern on the sink */
610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 729 radeon_write_dpcd_reg(dp_info->radeon_connector,
611 else 730 DP_TRAINING_PATTERN_SET,
612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 731 DP_TRAINING_PATTERN_DISABLE);
613 if (dig->linkb)
614 enc_id |= ATOM_DP_CONFIG_LINK_B;
615 else
616 enc_id |= ATOM_DP_CONFIG_LINK_A;
617 732
618 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 733 /* disable the training pattern on the source */
619 if (dig_connector->dp_clock == 270000) 734 if (ASIC_IS_DCE4(dp_info->rdev))
620 link_configuration[0] = DP_LINK_BW_2_7; 735 atombios_dig_encoder_setup(dp_info->encoder,
736 ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
621 else 737 else
622 link_configuration[0] = DP_LINK_BW_1_62; 738 radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
623 link_configuration[1] = dig_connector->dp_lane_count; 739 dp_info->dp_clock, dp_info->enc_id, 0);
624 if (dig_connector->dpcd[0] >= 0x11)
625 link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
626 740
627 /* power up the sink */ 741 return 0;
628 dp_set_power(radeon_connector, DP_SET_POWER_D0); 742}
629 /* disable the training pattern on the sink */
630 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
631 /* set link bw and lanes on the sink */
632 dp_set_link_bw_lanes(radeon_connector, link_configuration);
633 /* disable downspread on the sink */
634 dp_set_downspread(radeon_connector, 0);
635 if (ASIC_IS_DCE4(rdev)) {
636 /* start training on the source */
637 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
638 /* set training pattern 1 on the source */
639 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
640 } else {
641 /* start training on the source */
642 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
643 dig_connector->dp_clock, enc_id, 0);
644 /* set training pattern 1 on the source */
645 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
646 dig_connector->dp_clock, enc_id, 0);
647 }
648 743
649 /* set initial vs/emph */ 744static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
650 memset(train_set, 0, 4); 745{
651 udelay(400); 746 bool clock_recovery;
652 /* set training pattern 1 on the sink */ 747 u8 voltage;
653 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); 748 int i;
654 749
655 dp_update_dpvs_emph(radeon_connector, encoder, train_set); 750 radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
751 memset(dp_info->train_set, 0, 4);
752 radeon_dp_update_vs_emph(dp_info);
753
754 udelay(400);
656 755
657 /* clock recovery loop */ 756 /* clock recovery loop */
658 clock_recovery = false; 757 clock_recovery = false;
659 tries = 0; 758 dp_info->tries = 0;
660 voltage = 0xff; 759 voltage = 0xff;
661 for (;;) { 760 while (1) {
662 udelay(100); 761 if (dp_info->rd_interval == 0)
663 if (!atom_dp_get_link_status(radeon_connector, link_status)) 762 udelay(100);
763 else
764 mdelay(dp_info->rd_interval * 4);
765
766 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
664 break; 767 break;
665 768
666 if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { 769 if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
667 clock_recovery = true; 770 clock_recovery = true;
668 break; 771 break;
669 } 772 }
670 773
671 for (i = 0; i < dig_connector->dp_lane_count; i++) { 774 for (i = 0; i < dp_info->dp_lane_count; i++) {
672 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 775 if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
673 break; 776 break;
674 } 777 }
675 if (i == dig_connector->dp_lane_count) { 778 if (i == dp_info->dp_lane_count) {
676 DRM_ERROR("clock recovery reached max voltage\n"); 779 DRM_ERROR("clock recovery reached max voltage\n");
677 break; 780 break;
678 } 781 }
679 782
680 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 783 if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
681 ++tries; 784 ++dp_info->tries;
682 if (tries == 5) { 785 if (dp_info->tries == 5) {
683 DRM_ERROR("clock recovery tried 5 times\n"); 786 DRM_ERROR("clock recovery tried 5 times\n");
684 break; 787 break;
685 } 788 }
686 } else 789 } else
687 tries = 0; 790 dp_info->tries = 0;
688 791
689 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 792 voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
690 793
691 /* Compute new train_set as requested by sink */ 794 /* Compute new train_set as requested by sink */
692 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); 795 dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
693 dp_update_dpvs_emph(radeon_connector, encoder, train_set); 796
797 radeon_dp_update_vs_emph(dp_info);
694 } 798 }
695 if (!clock_recovery) 799 if (!clock_recovery) {
696 DRM_ERROR("clock recovery failed\n"); 800 DRM_ERROR("clock recovery failed\n");
697 else 801 return -1;
802 } else {
698 DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", 803 DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
699 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 804 dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
700 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> 805 (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
701 DP_TRAIN_PRE_EMPHASIS_SHIFT); 806 DP_TRAIN_PRE_EMPHASIS_SHIFT);
807 return 0;
808 }
809}
702 810
811static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
812{
813 bool channel_eq;
703 814
704 /* set training pattern 2 on the sink */ 815 if (dp_info->tp3_supported)
705 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); 816 radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
706 /* set training pattern 2 on the source */
707 if (ASIC_IS_DCE4(rdev))
708 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
709 else 817 else
710 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 818 radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
711 dig_connector->dp_clock, enc_id, 1);
712 819
713 /* channel equalization loop */ 820 /* channel equalization loop */
714 tries = 0; 821 dp_info->tries = 0;
715 channel_eq = false; 822 channel_eq = false;
716 for (;;) { 823 while (1) {
717 udelay(400); 824 if (dp_info->rd_interval == 0)
718 if (!atom_dp_get_link_status(radeon_connector, link_status)) 825 udelay(400);
826 else
827 mdelay(dp_info->rd_interval * 4);
828
829 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
719 break; 830 break;
720 831
721 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { 832 if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
722 channel_eq = true; 833 channel_eq = true;
723 break; 834 break;
724 } 835 }
725 836
726 /* Try 5 times */ 837 /* Try 5 times */
727 if (tries > 5) { 838 if (dp_info->tries > 5) {
728 DRM_ERROR("channel eq failed: 5 tries\n"); 839 DRM_ERROR("channel eq failed: 5 tries\n");
729 break; 840 break;
730 } 841 }
731 842
732 /* Compute new train_set as requested by sink */ 843 /* Compute new train_set as requested by sink */
733 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); 844 dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
734 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
735 845
736 tries++; 846 radeon_dp_update_vs_emph(dp_info);
847 dp_info->tries++;
737 } 848 }
738 849
739 if (!channel_eq) 850 if (!channel_eq) {
740 DRM_ERROR("channel eq failed\n"); 851 DRM_ERROR("channel eq failed\n");
741 else 852 return -1;
853 } else {
742 DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", 854 DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
743 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, 855 dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
744 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) 856 (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 857 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
746 858 return 0;
747 /* disable the training pattern on the sink */ 859 }
748 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
749
750 /* disable the training pattern on the source */
751 if (ASIC_IS_DCE4(rdev))
752 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
753 else
754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
755 dig_connector->dp_clock, enc_id, 0);
756} 860}
757 861
758int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 862void radeon_dp_link_train(struct drm_encoder *encoder,
759 uint8_t write_byte, uint8_t *read_byte) 863 struct drm_connector *connector)
760{ 864{
761 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 865 struct drm_device *dev = encoder->dev;
762 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; 866 struct radeon_device *rdev = dev->dev_private;
763 int ret = 0; 867 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
764 uint16_t address = algo_data->address; 868 struct radeon_encoder_atom_dig *dig;
765 uint8_t msg[5]; 869 struct radeon_connector *radeon_connector;
766 uint8_t reply[2]; 870 struct radeon_connector_atom_dig *dig_connector;
767 int msg_len, dp_msg_len; 871 struct radeon_dp_link_train_info dp_info;
768 int reply_bytes; 872 u8 tmp;
769
770 /* Set up the command byte */
771 if (mode & MODE_I2C_READ)
772 msg[2] = AUX_I2C_READ << 4;
773 else
774 msg[2] = AUX_I2C_WRITE << 4;
775
776 if (!(mode & MODE_I2C_STOP))
777 msg[2] |= AUX_I2C_MOT << 4;
778 873
779 msg[0] = address; 874 if (!radeon_encoder->enc_priv)
780 msg[1] = address >> 8; 875 return;
876 dig = radeon_encoder->enc_priv;
781 877
782 reply_bytes = 1; 878 radeon_connector = to_radeon_connector(connector);
879 if (!radeon_connector->con_priv)
880 return;
881 dig_connector = radeon_connector->con_priv;
783 882
784 msg_len = 4; 883 if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
785 dp_msg_len = 3; 884 (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
786 switch (mode) { 885 return;
787 case MODE_I2C_WRITE:
788 msg[4] = write_byte;
789 msg_len++;
790 dp_msg_len += 2;
791 break;
792 case MODE_I2C_READ:
793 dp_msg_len += 1;
794 break;
795 default:
796 break;
797 }
798 886
799 msg[3] = (dp_msg_len) << 4; 887 dp_info.enc_id = 0;
800 ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); 888 if (dig->dig_encoder)
889 dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
890 else
891 dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
892 if (dig->linkb)
893 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
894 else
895 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
801 896
802 if (ret) { 897 dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
803 if (read_byte) 898 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
804 *read_byte = reply[0]; 899 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
805 return reply_bytes; 900 dp_info.tp3_supported = true;
806 } 901 else
807 return -EREMOTEIO; 902 dp_info.tp3_supported = false;
903
904 memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
905 dp_info.rdev = rdev;
906 dp_info.encoder = encoder;
907 dp_info.connector = connector;
908 dp_info.radeon_connector = radeon_connector;
909 dp_info.dp_lane_count = dig_connector->dp_lane_count;
910 dp_info.dp_clock = dig_connector->dp_clock;
911
912 if (radeon_dp_link_train_init(&dp_info))
913 goto done;
914 if (radeon_dp_link_train_cr(&dp_info))
915 goto done;
916 if (radeon_dp_link_train_ce(&dp_info))
917 goto done;
918done:
919 if (radeon_dp_link_train_finish(&dp_info))
920 return;
808} 921}
809
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 9073e3bfb08c..7c37638095f7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1578,7 +1578,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1578 u32 sq_stack_resource_mgmt_2; 1578 u32 sq_stack_resource_mgmt_2;
1579 u32 sq_stack_resource_mgmt_3; 1579 u32 sq_stack_resource_mgmt_3;
1580 u32 vgt_cache_invalidation; 1580 u32 vgt_cache_invalidation;
1581 u32 hdp_host_path_cntl; 1581 u32 hdp_host_path_cntl, tmp;
1582 int i, j, num_shader_engines, ps_thread_count; 1582 int i, j, num_shader_engines, ps_thread_count;
1583 1583
1584 switch (rdev->family) { 1584 switch (rdev->family) {
@@ -1936,8 +1936,12 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1936 rdev->config.evergreen.tile_config |= (3 << 0); 1936 rdev->config.evergreen.tile_config |= (3 << 0);
1937 break; 1937 break;
1938 } 1938 }
1939 rdev->config.evergreen.tile_config |= 1939 /* num banks is 8 on all fusion asics */
1940 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1940 if (rdev->flags & RADEON_IS_IGP)
1941 rdev->config.evergreen.tile_config |= 8 << 4;
1942 else
1943 rdev->config.evergreen.tile_config |=
1944 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1941 rdev->config.evergreen.tile_config |= 1945 rdev->config.evergreen.tile_config |=
1942 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1946 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1943 rdev->config.evergreen.tile_config |= 1947 rdev->config.evergreen.tile_config |=
@@ -2141,6 +2145,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2141 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) 2145 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2142 WREG32(i, 0); 2146 WREG32(i, 0);
2143 2147
2148 tmp = RREG32(HDP_MISC_CNTL);
2149 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2150 WREG32(HDP_MISC_CNTL, tmp);
2151
2144 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 2152 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2145 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 2153 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2146 2154
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index fc40e0cc3451..f37e91ee8a11 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -64,6 +64,8 @@
64#define GB_BACKEND_MAP 0x98FC 64#define GB_BACKEND_MAP 0x98FC
65#define DMIF_ADDR_CONFIG 0xBD4 65#define DMIF_ADDR_CONFIG 0xBD4
66#define HDP_ADDR_CONFIG 0x2F48 66#define HDP_ADDR_CONFIG 0x2F48
67#define HDP_MISC_CNTL 0x2F4C
68#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
67 69
68#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 70#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
69#define GC_USER_RB_BACKEND_DISABLE 0x9B7C 71#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 3d8a7634bbe9..b205ba1cdd8f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,7 +417,7 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
417 num_shader_engines = 1; 417 num_shader_engines = 1;
418 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 418 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
419 num_shader_engines = rdev->config.cayman.max_shader_engines; 419 num_shader_engines = rdev->config.cayman.max_shader_engines;
420 if (num_backends_per_asic > num_shader_engines) 420 if (num_backends_per_asic < num_shader_engines)
421 num_backends_per_asic = num_shader_engines; 421 num_backends_per_asic = num_shader_engines;
422 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 422 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
423 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 423 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
@@ -829,7 +829,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
829 rdev->config.cayman.tile_config |= 829 rdev->config.cayman.tile_config |=
830 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 830 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
831 rdev->config.cayman.tile_config |= 831 rdev->config.cayman.tile_config |=
832 (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 832 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
833 rdev->config.cayman.tile_config |= 833 rdev->config.cayman.tile_config |=
834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
835 835
@@ -931,6 +931,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
931 WREG32(CB_PERF_CTR3_SEL_0, 0); 931 WREG32(CB_PERF_CTR3_SEL_0, 0);
932 WREG32(CB_PERF_CTR3_SEL_1, 0); 932 WREG32(CB_PERF_CTR3_SEL_1, 0);
933 933
934 tmp = RREG32(HDP_MISC_CNTL);
935 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
936 WREG32(HDP_MISC_CNTL, tmp);
937
934 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 938 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
935 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 939 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
936 940
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 0f9a08b53fbd..9736746da2d6 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -136,6 +136,8 @@
136#define HDP_NONSURFACE_INFO 0x2C08 136#define HDP_NONSURFACE_INFO 0x2C08
137#define HDP_NONSURFACE_SIZE 0x2C0C 137#define HDP_NONSURFACE_SIZE 0x2C0C
138#define HDP_ADDR_CONFIG 0x2F48 138#define HDP_ADDR_CONFIG 0x2F48
139#define HDP_MISC_CNTL 0x2F4C
140#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
139 141
140#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 142#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
141#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C 143#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
@@ -351,7 +353,7 @@
351#define MULTI_GPU_TILE_SIZE_MASK 0x03000000 353#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
352#define MULTI_GPU_TILE_SIZE_SHIFT 24 354#define MULTI_GPU_TILE_SIZE_SHIFT 24
353#define ROW_SIZE(x) ((x) << 28) 355#define ROW_SIZE(x) ((x) << 28)
354#define ROW_SIZE_MASK 0x30000007 356#define ROW_SIZE_MASK 0x30000000
355#define ROW_SIZE_SHIFT 28 357#define ROW_SIZE_SHIFT 28
356#define NUM_LOWER_PIPES(x) ((x) << 30) 358#define NUM_LOWER_PIPES(x) ((x) << 30)
357#define NUM_LOWER_PIPES_MASK 0x40000000 359#define NUM_LOWER_PIPES_MASK 0x40000000
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index ca576191d058..d948265db87e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -782,6 +782,7 @@ static struct radeon_asic evergreen_asic = {
782 .hpd_fini = &evergreen_hpd_fini, 782 .hpd_fini = &evergreen_hpd_fini,
783 .hpd_sense = &evergreen_hpd_sense, 783 .hpd_sense = &evergreen_hpd_sense,
784 .hpd_set_polarity = &evergreen_hpd_set_polarity, 784 .hpd_set_polarity = &evergreen_hpd_set_polarity,
785 .ioctl_wait_idle = r600_ioctl_wait_idle,
785 .gui_idle = &r600_gui_idle, 786 .gui_idle = &r600_gui_idle,
786 .pm_misc = &evergreen_pm_misc, 787 .pm_misc = &evergreen_pm_misc,
787 .pm_prepare = &evergreen_pm_prepare, 788 .pm_prepare = &evergreen_pm_prepare,
@@ -828,6 +829,7 @@ static struct radeon_asic sumo_asic = {
828 .hpd_fini = &evergreen_hpd_fini, 829 .hpd_fini = &evergreen_hpd_fini,
829 .hpd_sense = &evergreen_hpd_sense, 830 .hpd_sense = &evergreen_hpd_sense,
830 .hpd_set_polarity = &evergreen_hpd_set_polarity, 831 .hpd_set_polarity = &evergreen_hpd_set_polarity,
832 .ioctl_wait_idle = r600_ioctl_wait_idle,
831 .gui_idle = &r600_gui_idle, 833 .gui_idle = &r600_gui_idle,
832 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
833 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
@@ -874,6 +876,7 @@ static struct radeon_asic btc_asic = {
874 .hpd_fini = &evergreen_hpd_fini, 876 .hpd_fini = &evergreen_hpd_fini,
875 .hpd_sense = &evergreen_hpd_sense, 877 .hpd_sense = &evergreen_hpd_sense,
876 .hpd_set_polarity = &evergreen_hpd_set_polarity, 878 .hpd_set_polarity = &evergreen_hpd_set_polarity,
879 .ioctl_wait_idle = r600_ioctl_wait_idle,
877 .gui_idle = &r600_gui_idle, 880 .gui_idle = &r600_gui_idle,
878 .pm_misc = &evergreen_pm_misc, 881 .pm_misc = &evergreen_pm_misc,
879 .pm_prepare = &evergreen_pm_prepare, 882 .pm_prepare = &evergreen_pm_prepare,
@@ -920,6 +923,7 @@ static struct radeon_asic cayman_asic = {
920 .hpd_fini = &evergreen_hpd_fini, 923 .hpd_fini = &evergreen_hpd_fini,
921 .hpd_sense = &evergreen_hpd_sense, 924 .hpd_sense = &evergreen_hpd_sense,
922 .hpd_set_polarity = &evergreen_hpd_set_polarity, 925 .hpd_set_polarity = &evergreen_hpd_set_polarity,
926 .ioctl_wait_idle = r600_ioctl_wait_idle,
923 .gui_idle = &r600_gui_idle, 927 .gui_idle = &r600_gui_idle,
924 .pm_misc = &evergreen_pm_misc, 928 .pm_misc = &evergreen_pm_misc,
925 .pm_prepare = &evergreen_pm_prepare, 929 .pm_prepare = &evergreen_pm_prepare,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8caf546c8e92..5b991f7c6e2a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -505,12 +505,18 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
505 * DDC_VGA = RADEON_GPIO_VGA_DDC 505 * DDC_VGA = RADEON_GPIO_VGA_DDC
506 * DDC_LCD = RADEON_GPIOPAD_MASK 506 * DDC_LCD = RADEON_GPIOPAD_MASK
507 * DDC_GPIO = RADEON_MDGPIO_MASK 507 * DDC_GPIO = RADEON_MDGPIO_MASK
508 * r1xx/r2xx 508 * r1xx
509 * DDC_MONID = RADEON_GPIO_MONID 509 * DDC_MONID = RADEON_GPIO_MONID
510 * DDC_CRT2 = RADEON_GPIO_CRT2_DDC 510 * DDC_CRT2 = RADEON_GPIO_CRT2_DDC
511 * r3xx 511 * r200
512 * DDC_MONID = RADEON_GPIO_MONID 512 * DDC_MONID = RADEON_GPIO_MONID
513 * DDC_CRT2 = RADEON_GPIO_DVI_DDC 513 * DDC_CRT2 = RADEON_GPIO_DVI_DDC
514 * r300/r350
515 * DDC_MONID = RADEON_GPIO_DVI_DDC
516 * DDC_CRT2 = RADEON_GPIO_DVI_DDC
517 * rv2xx/rv3xx
518 * DDC_MONID = RADEON_GPIO_MONID
519 * DDC_CRT2 = RADEON_GPIO_MONID
514 * rs3xx/rs4xx 520 * rs3xx/rs4xx
515 * DDC_MONID = RADEON_GPIOPAD_MASK 521 * DDC_MONID = RADEON_GPIOPAD_MASK
516 * DDC_CRT2 = RADEON_GPIO_MONID 522 * DDC_CRT2 = RADEON_GPIO_MONID
@@ -537,17 +543,26 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
537 rdev->family == CHIP_RS400 || 543 rdev->family == CHIP_RS400 ||
538 rdev->family == CHIP_RS480) 544 rdev->family == CHIP_RS480)
539 ddc_line = RADEON_GPIOPAD_MASK; 545 ddc_line = RADEON_GPIOPAD_MASK;
540 else 546 else if (rdev->family == CHIP_R300 ||
547 rdev->family == CHIP_R350) {
548 ddc_line = RADEON_GPIO_DVI_DDC;
549 ddc = DDC_DVI;
550 } else
541 ddc_line = RADEON_GPIO_MONID; 551 ddc_line = RADEON_GPIO_MONID;
542 break; 552 break;
543 case DDC_CRT2: 553 case DDC_CRT2:
544 if (rdev->family == CHIP_RS300 || 554 if (rdev->family == CHIP_R200 ||
545 rdev->family == CHIP_RS400 || 555 rdev->family == CHIP_R300 ||
546 rdev->family == CHIP_RS480) 556 rdev->family == CHIP_R350) {
547 ddc_line = RADEON_GPIO_MONID;
548 else if (rdev->family >= CHIP_R300) {
549 ddc_line = RADEON_GPIO_DVI_DDC; 557 ddc_line = RADEON_GPIO_DVI_DDC;
550 ddc = DDC_DVI; 558 ddc = DDC_DVI;
559 } else if (rdev->family == CHIP_RS300 ||
560 rdev->family == CHIP_RS400 ||
561 rdev->family == CHIP_RS480)
562 ddc_line = RADEON_GPIO_MONID;
563 else if (rdev->family >= CHIP_RV350) {
564 ddc_line = RADEON_GPIO_MONID;
565 ddc = DDC_MONID;
551 } else 566 } else
552 ddc_line = RADEON_GPIO_CRT2_DDC; 567 ddc_line = RADEON_GPIO_CRT2_DDC;
553 break; 568 break;
@@ -709,26 +724,42 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
709 struct drm_device *dev = rdev->ddev; 724 struct drm_device *dev = rdev->ddev;
710 struct radeon_i2c_bus_rec i2c; 725 struct radeon_i2c_bus_rec i2c;
711 726
727 /* actual hw pads
728 * r1xx/rs2xx/rs3xx
729 * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
730 * r200
731 * 0x60, 0x64, 0x68, mm
732 * r300/r350
733 * 0x60, 0x64, mm
734 * rv2xx/rv3xx/rs4xx
735 * 0x60, 0x64, 0x68, gpiopads, mm
736 */
712 737
738 /* 0x60 */
713 i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 739 i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
714 rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC"); 740 rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
715 741 /* 0x64 */
716 i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 742 i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
717 rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC"); 743 rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
718 744
745 /* mm i2c */
719 i2c.valid = true; 746 i2c.valid = true;
720 i2c.hw_capable = true; 747 i2c.hw_capable = true;
721 i2c.mm_i2c = true; 748 i2c.mm_i2c = true;
722 i2c.i2c_id = 0xa0; 749 i2c.i2c_id = 0xa0;
723 rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C"); 750 rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
724 751
725 if (rdev->family == CHIP_RS300 || 752 if (rdev->family == CHIP_R300 ||
726 rdev->family == CHIP_RS400 || 753 rdev->family == CHIP_R350) {
727 rdev->family == CHIP_RS480) { 754 /* only 2 sw i2c pads */
755 } else if (rdev->family == CHIP_RS300 ||
756 rdev->family == CHIP_RS400 ||
757 rdev->family == CHIP_RS480) {
728 u16 offset; 758 u16 offset;
729 u8 id, blocks, clk, data; 759 u8 id, blocks, clk, data;
730 int i; 760 int i;
731 761
762 /* 0x68 */
732 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 763 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
733 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 764 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
734 765
@@ -740,6 +771,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
740 if (id == 136) { 771 if (id == 136) {
741 clk = RBIOS8(offset + 3 + (i * 5) + 3); 772 clk = RBIOS8(offset + 3 + (i * 5) + 3);
742 data = RBIOS8(offset + 3 + (i * 5) + 4); 773 data = RBIOS8(offset + 3 + (i * 5) + 4);
774 /* gpiopad */
743 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 775 i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
744 (1 << clk), (1 << data)); 776 (1 << clk), (1 << data));
745 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); 777 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
@@ -747,14 +779,15 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
747 } 779 }
748 } 780 }
749 } 781 }
750 782 } else if (rdev->family >= CHIP_R200) {
751 } else if (rdev->family >= CHIP_R300) { 783 /* 0x68 */
752 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 784 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
753 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 785 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
754 } else { 786 } else {
787 /* 0x68 */
755 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 788 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
756 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 789 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
757 790 /* 0x6c */
758 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 791 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
759 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC"); 792 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
760 } 793 }
@@ -2504,6 +2537,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2504 return true; 2537 return true;
2505} 2538}
2506 2539
2540static const char *thermal_controller_names[] = {
2541 "NONE",
2542 "lm63",
2543 "adm1032",
2544};
2545
2507void radeon_combios_get_power_modes(struct radeon_device *rdev) 2546void radeon_combios_get_power_modes(struct radeon_device *rdev)
2508{ 2547{
2509 struct drm_device *dev = rdev->ddev; 2548 struct drm_device *dev = rdev->ddev;
@@ -2524,6 +2563,54 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2524 return; 2563 return;
2525 } 2564 }
2526 2565
2566 /* check for a thermal chip */
2567 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
2568 if (offset) {
2569 u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
2570 struct radeon_i2c_bus_rec i2c_bus;
2571
2572 rev = RBIOS8(offset);
2573
2574 if (rev == 0) {
2575 thermal_controller = RBIOS8(offset + 3);
2576 gpio = RBIOS8(offset + 4) & 0x3f;
2577 i2c_addr = RBIOS8(offset + 5);
2578 } else if (rev == 1) {
2579 thermal_controller = RBIOS8(offset + 4);
2580 gpio = RBIOS8(offset + 5) & 0x3f;
2581 i2c_addr = RBIOS8(offset + 6);
2582 } else if (rev == 2) {
2583 thermal_controller = RBIOS8(offset + 4);
2584 gpio = RBIOS8(offset + 5) & 0x3f;
2585 i2c_addr = RBIOS8(offset + 6);
2586 clk_bit = RBIOS8(offset + 0xa);
2587 data_bit = RBIOS8(offset + 0xb);
2588 }
2589 if ((thermal_controller > 0) && (thermal_controller < 3)) {
2590 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
2591 thermal_controller_names[thermal_controller],
2592 i2c_addr >> 1);
2593 if (gpio == DDC_LCD) {
2594 /* MM i2c */
2595 i2c_bus.valid = true;
2596 i2c_bus.hw_capable = true;
2597 i2c_bus.mm_i2c = true;
2598 i2c_bus.i2c_id = 0xa0;
2599 } else if (gpio == DDC_GPIO)
2600 i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
2601 else
2602 i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
2603 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2604 if (rdev->pm.i2c_bus) {
2605 struct i2c_board_info info = { };
2606 const char *name = thermal_controller_names[thermal_controller];
2607 info.addr = i2c_addr >> 1;
2608 strlcpy(info.type, name, sizeof(info.type));
2609 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2610 }
2611 }
2612 }
2613
2527 if (rdev->flags & RADEON_IS_MOBILITY) { 2614 if (rdev->flags & RADEON_IS_MOBILITY) {
2528 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); 2615 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2529 if (offset) { 2616 if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5f45fa12bb8b..ee1dccb3fec9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -50,20 +50,21 @@ void radeon_connector_hotplug(struct drm_connector *connector)
50 struct radeon_device *rdev = dev->dev_private; 50 struct radeon_device *rdev = dev->dev_private;
51 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 51 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
52 52
53 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 53 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
54 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 54
55 55 /* powering up/down the eDP panel generates hpd events which
56 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 56 * can interfere with modesetting.
57 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 57 */
58 if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 58 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
59 (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) { 59 return;
60 if (radeon_dp_needs_link_train(radeon_connector)) {
61 if (connector->encoder)
62 dp_link_train(connector->encoder, connector);
63 }
64 }
65 }
66 60
61 /* pre-r600 did not always have the hpd pins mapped accurately to connectors */
62 if (rdev->family >= CHIP_R600) {
63 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
64 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
65 else
66 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
67 }
67} 68}
68 69
69static void radeon_property_change_mode(struct drm_encoder *encoder) 70static void radeon_property_change_mode(struct drm_encoder *encoder)
@@ -1054,23 +1055,124 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1054 int ret; 1055 int ret;
1055 1056
1056 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1057 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1058 struct drm_encoder *encoder;
1059 struct drm_display_mode *mode;
1060
1057 if (!radeon_dig_connector->edp_on) 1061 if (!radeon_dig_connector->edp_on)
1058 atombios_set_edp_panel_power(connector, 1062 atombios_set_edp_panel_power(connector,
1059 ATOM_TRANSMITTER_ACTION_POWER_ON); 1063 ATOM_TRANSMITTER_ACTION_POWER_ON);
1060 } 1064 ret = radeon_ddc_get_modes(radeon_connector);
1061 ret = radeon_ddc_get_modes(radeon_connector);
1062 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1063 if (!radeon_dig_connector->edp_on) 1065 if (!radeon_dig_connector->edp_on)
1064 atombios_set_edp_panel_power(connector, 1066 atombios_set_edp_panel_power(connector,
1065 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1067 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1066 } 1068
1069 if (ret > 0) {
1070 encoder = radeon_best_single_encoder(connector);
1071 if (encoder) {
1072 radeon_fixup_lvds_native_mode(encoder, connector);
1073 /* add scaled modes */
1074 radeon_add_common_modes(encoder, connector);
1075 }
1076 return ret;
1077 }
1078
1079 encoder = radeon_best_single_encoder(connector);
1080 if (!encoder)
1081 return 0;
1082
1083 /* we have no EDID modes */
1084 mode = radeon_fp_native_mode(encoder);
1085 if (mode) {
1086 ret = 1;
1087 drm_mode_probed_add(connector, mode);
1088 /* add the width/height from vbios tables if available */
1089 connector->display_info.width_mm = mode->width_mm;
1090 connector->display_info.height_mm = mode->height_mm;
1091 /* add scaled modes */
1092 radeon_add_common_modes(encoder, connector);
1093 }
1094 } else
1095 ret = radeon_ddc_get_modes(radeon_connector);
1067 1096
1068 return ret; 1097 return ret;
1069} 1098}
1070 1099
1100bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
1101{
1102 struct drm_mode_object *obj;
1103 struct drm_encoder *encoder;
1104 struct radeon_encoder *radeon_encoder;
1105 int i;
1106 bool found = false;
1107
1108 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1109 if (connector->encoder_ids[i] == 0)
1110 break;
1111
1112 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
1113 if (!obj)
1114 continue;
1115
1116 encoder = obj_to_encoder(obj);
1117 radeon_encoder = to_radeon_encoder(encoder);
1118
1119 switch (radeon_encoder->encoder_id) {
1120 case ENCODER_OBJECT_ID_TRAVIS:
1121 case ENCODER_OBJECT_ID_NUTMEG:
1122 found = true;
1123 break;
1124 default:
1125 break;
1126 }
1127 }
1128
1129 return found;
1130}
1131
1132bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1133{
1134 struct drm_mode_object *obj;
1135 struct drm_encoder *encoder;
1136 struct radeon_encoder *radeon_encoder;
1137 int i;
1138 bool found = false;
1139
1140 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1141 if (connector->encoder_ids[i] == 0)
1142 break;
1143
1144 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
1145 if (!obj)
1146 continue;
1147
1148 encoder = obj_to_encoder(obj);
1149 radeon_encoder = to_radeon_encoder(encoder);
1150 if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
1151 found = true;
1152 }
1153
1154 return found;
1155}
1156
1157bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
1158{
1159 struct drm_device *dev = connector->dev;
1160 struct radeon_device *rdev = dev->dev_private;
1161
1162 if (ASIC_IS_DCE5(rdev) &&
1163 (rdev->clock.dp_extclk >= 53900) &&
1164 radeon_connector_encoder_is_hbr2(connector)) {
1165 return true;
1166 }
1167
1168 return false;
1169}
1170
1071static enum drm_connector_status 1171static enum drm_connector_status
1072radeon_dp_detect(struct drm_connector *connector, bool force) 1172radeon_dp_detect(struct drm_connector *connector, bool force)
1073{ 1173{
1174 struct drm_device *dev = connector->dev;
1175 struct radeon_device *rdev = dev->dev_private;
1074 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1176 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1075 enum drm_connector_status ret = connector_status_disconnected; 1177 enum drm_connector_status ret = connector_status_disconnected;
1076 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1178 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
@@ -1081,6 +1183,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1081 } 1183 }
1082 1184
1083 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1185 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1186 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1187 if (encoder) {
1188 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1189 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
1190
1191 /* check if panel is valid */
1192 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
1193 ret = connector_status_connected;
1194 }
1084 /* eDP is always DP */ 1195 /* eDP is always DP */
1085 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1196 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1086 if (!radeon_dig_connector->edp_on) 1197 if (!radeon_dig_connector->edp_on)
@@ -1093,12 +1204,18 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1093 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1204 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1094 } else { 1205 } else {
1095 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1206 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1096 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 1207 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1097 if (radeon_dp_getdpcd(radeon_connector)) 1208 ret = connector_status_connected;
1098 ret = connector_status_connected; 1209 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
1210 radeon_dp_getdpcd(radeon_connector);
1099 } else { 1211 } else {
1100 if (radeon_ddc_probe(radeon_connector)) 1212 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
1101 ret = connector_status_connected; 1213 if (radeon_dp_getdpcd(radeon_connector))
1214 ret = connector_status_connected;
1215 } else {
1216 if (radeon_ddc_probe(radeon_connector))
1217 ret = connector_status_connected;
1218 }
1102 } 1219 }
1103 } 1220 }
1104 1221
@@ -1114,11 +1231,38 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1114 1231
1115 /* XXX check mode bandwidth */ 1232 /* XXX check mode bandwidth */
1116 1233
1117 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 1234 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1118 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 1235 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1119 return radeon_dp_mode_valid_helper(radeon_connector, mode); 1236
1120 else 1237 if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
1238 return MODE_PANEL;
1239
1240 if (encoder) {
1241 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1242 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
1243
1244 /* AVIVO hardware supports downscaling modes larger than the panel
1245 * to the panel size, but I'm not sure this is desirable.
1246 */
1247 if ((mode->hdisplay > native_mode->hdisplay) ||
1248 (mode->vdisplay > native_mode->vdisplay))
1249 return MODE_PANEL;
1250
1251 /* if scaling is disabled, block non-native modes */
1252 if (radeon_encoder->rmx_type == RMX_OFF) {
1253 if ((mode->hdisplay != native_mode->hdisplay) ||
1254 (mode->vdisplay != native_mode->vdisplay))
1255 return MODE_PANEL;
1256 }
1257 }
1121 return MODE_OK; 1258 return MODE_OK;
1259 } else {
1260 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1261 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
1262 return radeon_dp_mode_valid_helper(connector, mode);
1263 else
1264 return MODE_OK;
1265 }
1122} 1266}
1123 1267
1124struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { 1268struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
@@ -1151,8 +1295,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1151 struct drm_connector *connector; 1295 struct drm_connector *connector;
1152 struct radeon_connector *radeon_connector; 1296 struct radeon_connector *radeon_connector;
1153 struct radeon_connector_atom_dig *radeon_dig_connector; 1297 struct radeon_connector_atom_dig *radeon_dig_connector;
1298 struct drm_encoder *encoder;
1299 struct radeon_encoder *radeon_encoder;
1154 uint32_t subpixel_order = SubPixelNone; 1300 uint32_t subpixel_order = SubPixelNone;
1155 bool shared_ddc = false; 1301 bool shared_ddc = false;
1302 bool is_dp_bridge = false;
1156 1303
1157 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1304 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1158 return; 1305 return;
@@ -1184,6 +1331,21 @@ radeon_add_atom_connector(struct drm_device *dev,
1184 } 1331 }
1185 } 1332 }
1186 1333
1334 /* check if it's a dp bridge */
1335 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1336 radeon_encoder = to_radeon_encoder(encoder);
1337 if (radeon_encoder->devices & supported_device) {
1338 switch (radeon_encoder->encoder_id) {
1339 case ENCODER_OBJECT_ID_TRAVIS:
1340 case ENCODER_OBJECT_ID_NUTMEG:
1341 is_dp_bridge = true;
1342 break;
1343 default:
1344 break;
1345 }
1346 }
1347 }
1348
1187 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); 1349 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
1188 if (!radeon_connector) 1350 if (!radeon_connector)
1189 return; 1351 return;
@@ -1201,61 +1363,39 @@ radeon_add_atom_connector(struct drm_device *dev,
1201 if (!radeon_connector->router_bus) 1363 if (!radeon_connector->router_bus)
1202 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1364 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
1203 } 1365 }
1204 switch (connector_type) { 1366
1205 case DRM_MODE_CONNECTOR_VGA: 1367 if (is_dp_bridge) {
1206 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1207 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1208 if (i2c_bus->valid) {
1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1210 if (!radeon_connector->ddc_bus)
1211 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1212 }
1213 radeon_connector->dac_load_detect = true;
1214 drm_connector_attach_property(&radeon_connector->base,
1215 rdev->mode_info.load_detect_property,
1216 1);
1217 /* no HPD on analog connectors */
1218 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1219 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1220 connector->interlace_allowed = true;
1221 connector->doublescan_allowed = true;
1222 break;
1223 case DRM_MODE_CONNECTOR_DVIA:
1224 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1225 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1226 if (i2c_bus->valid) {
1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1228 if (!radeon_connector->ddc_bus)
1229 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1230 }
1231 radeon_connector->dac_load_detect = true;
1232 drm_connector_attach_property(&radeon_connector->base,
1233 rdev->mode_info.load_detect_property,
1234 1);
1235 /* no HPD on analog connectors */
1236 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1237 connector->interlace_allowed = true;
1238 connector->doublescan_allowed = true;
1239 break;
1240 case DRM_MODE_CONNECTOR_DVII:
1241 case DRM_MODE_CONNECTOR_DVID:
1242 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1368 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1243 if (!radeon_dig_connector) 1369 if (!radeon_dig_connector)
1244 goto failed; 1370 goto failed;
1245 radeon_dig_connector->igp_lane_info = igp_lane_info; 1371 radeon_dig_connector->igp_lane_info = igp_lane_info;
1246 radeon_connector->con_priv = radeon_dig_connector; 1372 radeon_connector->con_priv = radeon_dig_connector;
1247 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1373 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1248 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1374 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1249 if (i2c_bus->valid) { 1375 if (i2c_bus->valid) {
1376 /* add DP i2c bus */
1377 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1378 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1379 else
1380 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1381 if (!radeon_dig_connector->dp_i2c_bus)
1382 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1383 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1251 if (!radeon_connector->ddc_bus) 1384 if (!radeon_connector->ddc_bus)
1252 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1385 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1253 } 1386 }
1254 subpixel_order = SubPixelHorizontalRGB; 1387 switch (connector_type) {
1255 drm_connector_attach_property(&radeon_connector->base, 1388 case DRM_MODE_CONNECTOR_VGA:
1256 rdev->mode_info.coherent_mode_property, 1389 case DRM_MODE_CONNECTOR_DVIA:
1257 1); 1390 default:
1258 if (ASIC_IS_AVIVO(rdev)) { 1391 connector->interlace_allowed = true;
1392 connector->doublescan_allowed = true;
1393 break;
1394 case DRM_MODE_CONNECTOR_DVII:
1395 case DRM_MODE_CONNECTOR_DVID:
1396 case DRM_MODE_CONNECTOR_HDMIA:
1397 case DRM_MODE_CONNECTOR_HDMIB:
1398 case DRM_MODE_CONNECTOR_DisplayPort:
1259 drm_connector_attach_property(&radeon_connector->base, 1399 drm_connector_attach_property(&radeon_connector->base,
1260 rdev->mode_info.underscan_property, 1400 rdev->mode_info.underscan_property,
1261 UNDERSCAN_OFF); 1401 UNDERSCAN_OFF);
@@ -1265,131 +1405,234 @@ radeon_add_atom_connector(struct drm_device *dev,
1265 drm_connector_attach_property(&radeon_connector->base, 1405 drm_connector_attach_property(&radeon_connector->base,
1266 rdev->mode_info.underscan_vborder_property, 1406 rdev->mode_info.underscan_vborder_property,
1267 0); 1407 0);
1408 subpixel_order = SubPixelHorizontalRGB;
1409 connector->interlace_allowed = true;
1410 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1411 connector->doublescan_allowed = true;
1412 else
1413 connector->doublescan_allowed = false;
1414 break;
1415 case DRM_MODE_CONNECTOR_LVDS:
1416 case DRM_MODE_CONNECTOR_eDP:
1417 drm_connector_attach_property(&radeon_connector->base,
1418 dev->mode_config.scaling_mode_property,
1419 DRM_MODE_SCALE_FULLSCREEN);
1420 subpixel_order = SubPixelHorizontalRGB;
1421 connector->interlace_allowed = false;
1422 connector->doublescan_allowed = false;
1423 break;
1268 } 1424 }
1269 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1425 } else {
1426 switch (connector_type) {
1427 case DRM_MODE_CONNECTOR_VGA:
1428 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1429 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1430 if (i2c_bus->valid) {
1431 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1432 if (!radeon_connector->ddc_bus)
1433 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1434 }
1270 radeon_connector->dac_load_detect = true; 1435 radeon_connector->dac_load_detect = true;
1271 drm_connector_attach_property(&radeon_connector->base, 1436 drm_connector_attach_property(&radeon_connector->base,
1272 rdev->mode_info.load_detect_property, 1437 rdev->mode_info.load_detect_property,
1273 1); 1438 1);
1274 } 1439 /* no HPD on analog connectors */
1275 connector->interlace_allowed = true; 1440 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1276 if (connector_type == DRM_MODE_CONNECTOR_DVII) 1441 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1442 connector->interlace_allowed = true;
1277 connector->doublescan_allowed = true; 1443 connector->doublescan_allowed = true;
1278 else 1444 break;
1279 connector->doublescan_allowed = false; 1445 case DRM_MODE_CONNECTOR_DVIA:
1280 break; 1446 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1281 case DRM_MODE_CONNECTOR_HDMIA: 1447 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1282 case DRM_MODE_CONNECTOR_HDMIB: 1448 if (i2c_bus->valid) {
1283 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1449 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1284 if (!radeon_dig_connector) 1450 if (!radeon_connector->ddc_bus)
1285 goto failed; 1451 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1286 radeon_dig_connector->igp_lane_info = igp_lane_info; 1452 }
1287 radeon_connector->con_priv = radeon_dig_connector; 1453 radeon_connector->dac_load_detect = true;
1288 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1289 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1290 if (i2c_bus->valid) {
1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1292 if (!radeon_connector->ddc_bus)
1293 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1294 }
1295 drm_connector_attach_property(&radeon_connector->base,
1296 rdev->mode_info.coherent_mode_property,
1297 1);
1298 if (ASIC_IS_AVIVO(rdev)) {
1299 drm_connector_attach_property(&radeon_connector->base, 1454 drm_connector_attach_property(&radeon_connector->base,
1300 rdev->mode_info.underscan_property, 1455 rdev->mode_info.load_detect_property,
1301 UNDERSCAN_OFF); 1456 1);
1457 /* no HPD on analog connectors */
1458 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1459 connector->interlace_allowed = true;
1460 connector->doublescan_allowed = true;
1461 break;
1462 case DRM_MODE_CONNECTOR_DVII:
1463 case DRM_MODE_CONNECTOR_DVID:
1464 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1465 if (!radeon_dig_connector)
1466 goto failed;
1467 radeon_dig_connector->igp_lane_info = igp_lane_info;
1468 radeon_connector->con_priv = radeon_dig_connector;
1469 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1470 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1471 if (i2c_bus->valid) {
1472 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1473 if (!radeon_connector->ddc_bus)
1474 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1475 }
1476 subpixel_order = SubPixelHorizontalRGB;
1302 drm_connector_attach_property(&radeon_connector->base, 1477 drm_connector_attach_property(&radeon_connector->base,
1303 rdev->mode_info.underscan_hborder_property, 1478 rdev->mode_info.coherent_mode_property,
1304 0); 1479 1);
1480 if (ASIC_IS_AVIVO(rdev)) {
1481 drm_connector_attach_property(&radeon_connector->base,
1482 rdev->mode_info.underscan_property,
1483 UNDERSCAN_OFF);
1484 drm_connector_attach_property(&radeon_connector->base,
1485 rdev->mode_info.underscan_hborder_property,
1486 0);
1487 drm_connector_attach_property(&radeon_connector->base,
1488 rdev->mode_info.underscan_vborder_property,
1489 0);
1490 }
1491 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1492 radeon_connector->dac_load_detect = true;
1493 drm_connector_attach_property(&radeon_connector->base,
1494 rdev->mode_info.load_detect_property,
1495 1);
1496 }
1497 connector->interlace_allowed = true;
1498 if (connector_type == DRM_MODE_CONNECTOR_DVII)
1499 connector->doublescan_allowed = true;
1500 else
1501 connector->doublescan_allowed = false;
1502 break;
1503 case DRM_MODE_CONNECTOR_HDMIA:
1504 case DRM_MODE_CONNECTOR_HDMIB:
1505 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1506 if (!radeon_dig_connector)
1507 goto failed;
1508 radeon_dig_connector->igp_lane_info = igp_lane_info;
1509 radeon_connector->con_priv = radeon_dig_connector;
1510 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1511 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1512 if (i2c_bus->valid) {
1513 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1514 if (!radeon_connector->ddc_bus)
1515 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1516 }
1305 drm_connector_attach_property(&radeon_connector->base, 1517 drm_connector_attach_property(&radeon_connector->base,
1306 rdev->mode_info.underscan_vborder_property, 1518 rdev->mode_info.coherent_mode_property,
1307 0); 1519 1);
1308 } 1520 if (ASIC_IS_AVIVO(rdev)) {
1309 subpixel_order = SubPixelHorizontalRGB; 1521 drm_connector_attach_property(&radeon_connector->base,
1310 connector->interlace_allowed = true; 1522 rdev->mode_info.underscan_property,
1311 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1523 UNDERSCAN_OFF);
1312 connector->doublescan_allowed = true; 1524 drm_connector_attach_property(&radeon_connector->base,
1313 else 1525 rdev->mode_info.underscan_hborder_property,
1314 connector->doublescan_allowed = false; 1526 0);
1315 break; 1527 drm_connector_attach_property(&radeon_connector->base,
1316 case DRM_MODE_CONNECTOR_DisplayPort: 1528 rdev->mode_info.underscan_vborder_property,
1317 case DRM_MODE_CONNECTOR_eDP: 1529 0);
1318 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1530 }
1319 if (!radeon_dig_connector) 1531 subpixel_order = SubPixelHorizontalRGB;
1320 goto failed; 1532 connector->interlace_allowed = true;
1321 radeon_dig_connector->igp_lane_info = igp_lane_info; 1533 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1322 radeon_connector->con_priv = radeon_dig_connector; 1534 connector->doublescan_allowed = true;
1323 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1324 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1325 if (i2c_bus->valid) {
1326 /* add DP i2c bus */
1327 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1328 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1329 else 1535 else
1536 connector->doublescan_allowed = false;
1537 break;
1538 case DRM_MODE_CONNECTOR_DisplayPort:
1539 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1540 if (!radeon_dig_connector)
1541 goto failed;
1542 radeon_dig_connector->igp_lane_info = igp_lane_info;
1543 radeon_connector->con_priv = radeon_dig_connector;
1544 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1545 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1546 if (i2c_bus->valid) {
1547 /* add DP i2c bus */
1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1548 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1331 if (!radeon_dig_connector->dp_i2c_bus) 1549 if (!radeon_dig_connector->dp_i2c_bus)
1332 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1550 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1551 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1334 if (!radeon_connector->ddc_bus) 1552 if (!radeon_connector->ddc_bus)
1335 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1553 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1336 } 1554 }
1337 subpixel_order = SubPixelHorizontalRGB; 1555 subpixel_order = SubPixelHorizontalRGB;
1338 drm_connector_attach_property(&radeon_connector->base,
1339 rdev->mode_info.coherent_mode_property,
1340 1);
1341 if (ASIC_IS_AVIVO(rdev)) {
1342 drm_connector_attach_property(&radeon_connector->base, 1556 drm_connector_attach_property(&radeon_connector->base,
1343 rdev->mode_info.underscan_property, 1557 rdev->mode_info.coherent_mode_property,
1344 UNDERSCAN_OFF); 1558 1);
1559 if (ASIC_IS_AVIVO(rdev)) {
1560 drm_connector_attach_property(&radeon_connector->base,
1561 rdev->mode_info.underscan_property,
1562 UNDERSCAN_OFF);
1563 drm_connector_attach_property(&radeon_connector->base,
1564 rdev->mode_info.underscan_hborder_property,
1565 0);
1566 drm_connector_attach_property(&radeon_connector->base,
1567 rdev->mode_info.underscan_vborder_property,
1568 0);
1569 }
1570 connector->interlace_allowed = true;
1571 /* in theory with a DP to VGA converter... */
1572 connector->doublescan_allowed = false;
1573 break;
1574 case DRM_MODE_CONNECTOR_eDP:
1575 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1576 if (!radeon_dig_connector)
1577 goto failed;
1578 radeon_dig_connector->igp_lane_info = igp_lane_info;
1579 radeon_connector->con_priv = radeon_dig_connector;
1580 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1581 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1582 if (i2c_bus->valid) {
1583 /* add DP i2c bus */
1584 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1585 if (!radeon_dig_connector->dp_i2c_bus)
1586 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1587 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1588 if (!radeon_connector->ddc_bus)
1589 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1590 }
1345 drm_connector_attach_property(&radeon_connector->base, 1591 drm_connector_attach_property(&radeon_connector->base,
1346 rdev->mode_info.underscan_hborder_property, 1592 dev->mode_config.scaling_mode_property,
1347 0); 1593 DRM_MODE_SCALE_FULLSCREEN);
1594 subpixel_order = SubPixelHorizontalRGB;
1595 connector->interlace_allowed = false;
1596 connector->doublescan_allowed = false;
1597 break;
1598 case DRM_MODE_CONNECTOR_SVIDEO:
1599 case DRM_MODE_CONNECTOR_Composite:
1600 case DRM_MODE_CONNECTOR_9PinDIN:
1601 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1602 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1603 radeon_connector->dac_load_detect = true;
1348 drm_connector_attach_property(&radeon_connector->base, 1604 drm_connector_attach_property(&radeon_connector->base,
1349 rdev->mode_info.underscan_vborder_property, 1605 rdev->mode_info.load_detect_property,
1350 0); 1606 1);
1351 } 1607 drm_connector_attach_property(&radeon_connector->base,
1352 connector->interlace_allowed = true; 1608 rdev->mode_info.tv_std_property,
1353 /* in theory with a DP to VGA converter... */ 1609 radeon_atombios_get_tv_info(rdev));
1354 connector->doublescan_allowed = false; 1610 /* no HPD on analog connectors */
1355 break; 1611 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1356 case DRM_MODE_CONNECTOR_SVIDEO: 1612 connector->interlace_allowed = false;
1357 case DRM_MODE_CONNECTOR_Composite: 1613 connector->doublescan_allowed = false;
1358 case DRM_MODE_CONNECTOR_9PinDIN: 1614 break;
1359 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1615 case DRM_MODE_CONNECTOR_LVDS:
1360 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1616 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1361 radeon_connector->dac_load_detect = true; 1617 if (!radeon_dig_connector)
1362 drm_connector_attach_property(&radeon_connector->base, 1618 goto failed;
1363 rdev->mode_info.load_detect_property, 1619 radeon_dig_connector->igp_lane_info = igp_lane_info;
1364 1); 1620 radeon_connector->con_priv = radeon_dig_connector;
1365 drm_connector_attach_property(&radeon_connector->base, 1621 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1366 rdev->mode_info.tv_std_property, 1622 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1367 radeon_atombios_get_tv_info(rdev)); 1623 if (i2c_bus->valid) {
1368 /* no HPD on analog connectors */ 1624 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1369 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1625 if (!radeon_connector->ddc_bus)
1370 connector->interlace_allowed = false; 1626 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1371 connector->doublescan_allowed = false; 1627 }
1372 break; 1628 drm_connector_attach_property(&radeon_connector->base,
1373 case DRM_MODE_CONNECTOR_LVDS: 1629 dev->mode_config.scaling_mode_property,
1374 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1630 DRM_MODE_SCALE_FULLSCREEN);
1375 if (!radeon_dig_connector) 1631 subpixel_order = SubPixelHorizontalRGB;
1376 goto failed; 1632 connector->interlace_allowed = false;
1377 radeon_dig_connector->igp_lane_info = igp_lane_info; 1633 connector->doublescan_allowed = false;
1378 radeon_connector->con_priv = radeon_dig_connector; 1634 break;
1379 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1380 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1381 if (i2c_bus->valid) {
1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1383 if (!radeon_connector->ddc_bus)
1384 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1385 } 1635 }
1386 drm_connector_attach_property(&radeon_connector->base,
1387 dev->mode_config.scaling_mode_property,
1388 DRM_MODE_SCALE_FULLSCREEN);
1389 subpixel_order = SubPixelHorizontalRGB;
1390 connector->interlace_allowed = false;
1391 connector->doublescan_allowed = false;
1392 break;
1393 } 1636 }
1394 1637
1395 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { 1638 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 890217e678d3..5b61364e31f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -923,6 +923,9 @@ int radeon_resume_kms(struct drm_device *dev)
923 radeon_fbdev_set_suspend(rdev, 0); 923 radeon_fbdev_set_suspend(rdev, 0);
924 console_unlock(); 924 console_unlock();
925 925
926 /* init dig PHYs */
927 if (rdev->is_atom_bios)
928 radeon_atom_encoder_init(rdev);
926 /* reset hpd state */ 929 /* reset hpd state */
927 radeon_hpd_init(rdev); 930 radeon_hpd_init(rdev);
928 /* blat the mode back in */ 931 /* blat the mode back in */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bdbab5c43bdc..ae247eec87c0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1087,8 +1087,9 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
1087 *frac_fb_div_p = best_frac_feedback_div; 1087 *frac_fb_div_p = best_frac_feedback_div;
1088 *ref_div_p = best_ref_div; 1088 *ref_div_p = best_ref_div;
1089 *post_div_p = best_post_div; 1089 *post_div_p = best_post_div;
1090 DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1090 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1091 freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1091 (long long)freq,
1092 best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1092 best_ref_div, best_post_div); 1093 best_ref_div, best_post_div);
1093 1094
1094} 1095}
@@ -1344,6 +1345,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
1344 if (!ret) { 1345 if (!ret) {
1345 return ret; 1346 return ret;
1346 } 1347 }
1348
1349 /* init dig PHYs */
1350 if (rdev->is_atom_bios)
1351 radeon_atom_encoder_init(rdev);
1352
1347 /* initialize hpd */ 1353 /* initialize hpd */
1348 radeon_hpd_init(rdev); 1354 radeon_hpd_init(rdev);
1349 1355
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 63d2de8771dc..1d330606292f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -50,9 +50,10 @@
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
53 * 2.10.0 - fusion 2D tiling
53 */ 54 */
54#define KMS_DRIVER_MAJOR 2 55#define KMS_DRIVER_MAJOR 2
55#define KMS_DRIVER_MINOR 9 56#define KMS_DRIVER_MINOR 10
56#define KMS_DRIVER_PATCHLEVEL 0 57#define KMS_DRIVER_PATCHLEVEL 0
57int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 58int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
58int radeon_driver_unload_kms(struct drm_device *dev); 59int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b4274883227f..1b557554696e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -229,6 +229,22 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
229 return NULL; 229 return NULL;
230} 230}
231 231
232static struct drm_connector *
233radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
234{
235 struct drm_device *dev = encoder->dev;
236 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
237 struct drm_connector *connector;
238 struct radeon_connector *radeon_connector;
239
240 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
241 radeon_connector = to_radeon_connector(connector);
242 if (radeon_encoder->devices & radeon_connector->devices)
243 return connector;
244 }
245 return NULL;
246}
247
232struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) 248struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
233{ 249{
234 struct drm_device *dev = encoder->dev; 250 struct drm_device *dev = encoder->dev;
@@ -250,6 +266,25 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder
250 return NULL; 266 return NULL;
251} 267}
252 268
269bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
270{
271 struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
272
273 if (other_encoder) {
274 struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
275
276 switch (radeon_encoder->encoder_id) {
277 case ENCODER_OBJECT_ID_TRAVIS:
278 case ENCODER_OBJECT_ID_NUTMEG:
279 return true;
280 default:
281 return false;
282 }
283 }
284
285 return false;
286}
287
253void radeon_panel_mode_fixup(struct drm_encoder *encoder, 288void radeon_panel_mode_fixup(struct drm_encoder *encoder,
254 struct drm_display_mode *adjusted_mode) 289 struct drm_display_mode *adjusted_mode)
255{ 290{
@@ -621,6 +656,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
621 struct radeon_connector *radeon_connector; 656 struct radeon_connector *radeon_connector;
622 struct radeon_connector_atom_dig *dig_connector; 657 struct radeon_connector_atom_dig *dig_connector;
623 658
659 /* dp bridges are always DP */
660 if (radeon_encoder_is_dp_bridge(encoder))
661 return ATOM_ENCODER_MODE_DP;
662
624 connector = radeon_get_connector_for_encoder(encoder); 663 connector = radeon_get_connector_for_encoder(encoder);
625 if (!connector) { 664 if (!connector) {
626 switch (radeon_encoder->encoder_id) { 665 switch (radeon_encoder->encoder_id) {
@@ -668,7 +707,6 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
668 return ATOM_ENCODER_MODE_LVDS; 707 return ATOM_ENCODER_MODE_LVDS;
669 break; 708 break;
670 case DRM_MODE_CONNECTOR_DisplayPort: 709 case DRM_MODE_CONNECTOR_DisplayPort:
671 case DRM_MODE_CONNECTOR_eDP:
672 dig_connector = radeon_connector->con_priv; 710 dig_connector = radeon_connector->con_priv;
673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 711 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 712 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
@@ -682,6 +720,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
682 } else 720 } else
683 return ATOM_ENCODER_MODE_DVI; 721 return ATOM_ENCODER_MODE_DVI;
684 break; 722 break;
723 case DRM_MODE_CONNECTOR_eDP:
724 return ATOM_ENCODER_MODE_DP;
685 case DRM_MODE_CONNECTOR_DVIA: 725 case DRM_MODE_CONNECTOR_DVIA:
686 case DRM_MODE_CONNECTOR_VGA: 726 case DRM_MODE_CONNECTOR_VGA:
687 return ATOM_ENCODER_MODE_CRT; 727 return ATOM_ENCODER_MODE_CRT;
@@ -747,7 +787,7 @@ union dig_encoder_control {
747}; 787};
748 788
749void 789void
750atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 790atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
751{ 791{
752 struct drm_device *dev = encoder->dev; 792 struct drm_device *dev = encoder->dev;
753 struct radeon_device *rdev = dev->dev_private; 793 struct radeon_device *rdev = dev->dev_private;
@@ -760,6 +800,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
760 int dp_clock = 0; 800 int dp_clock = 0;
761 int dp_lane_count = 0; 801 int dp_lane_count = 0;
762 int hpd_id = RADEON_HPD_NONE; 802 int hpd_id = RADEON_HPD_NONE;
803 int bpc = 8;
763 804
764 if (connector) { 805 if (connector) {
765 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 806 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -769,6 +810,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
769 dp_clock = dig_connector->dp_clock; 810 dp_clock = dig_connector->dp_clock;
770 dp_lane_count = dig_connector->dp_lane_count; 811 dp_lane_count = dig_connector->dp_lane_count;
771 hpd_id = radeon_connector->hpd.hpd; 812 hpd_id = radeon_connector->hpd.hpd;
813 bpc = connector->display_info.bpc;
772 } 814 }
773 815
774 /* no dig encoder assigned */ 816 /* no dig encoder assigned */
@@ -791,7 +833,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
791 833
792 args.v1.ucAction = action; 834 args.v1.ucAction = action;
793 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 835 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
794 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 836 if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
837 args.v3.ucPanelMode = panel_mode;
838 else
839 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
795 840
796 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || 841 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
797 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) 842 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
@@ -810,7 +855,27 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
810 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 855 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
811 } 856 }
812 args.v4.acConfig.ucDigSel = dig->dig_encoder; 857 args.v4.acConfig.ucDigSel = dig->dig_encoder;
813 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; 858 switch (bpc) {
859 case 0:
860 args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
861 break;
862 case 6:
863 args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
864 break;
865 case 8:
866 default:
867 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
868 break;
869 case 10:
870 args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
871 break;
872 case 12:
873 args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
874 break;
875 case 16:
876 args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
877 break;
878 }
814 if (hpd_id == RADEON_HPD_NONE) 879 if (hpd_id == RADEON_HPD_NONE)
815 args.v4.ucHPD_ID = 0; 880 args.v4.ucHPD_ID = 0;
816 else 881 else
@@ -819,7 +884,27 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
819 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) 884 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
820 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; 885 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
821 args.v3.acConfig.ucDigSel = dig->dig_encoder; 886 args.v3.acConfig.ucDigSel = dig->dig_encoder;
822 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; 887 switch (bpc) {
888 case 0:
889 args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
890 break;
891 case 6:
892 args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
893 break;
894 case 8:
895 default:
896 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
897 break;
898 case 10:
899 args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
900 break;
901 case 12:
902 args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
903 break;
904 case 16:
905 args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
906 break;
907 }
823 } else { 908 } else {
824 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) 909 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
825 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; 910 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
@@ -859,7 +944,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
859 struct radeon_device *rdev = dev->dev_private; 944 struct radeon_device *rdev = dev->dev_private;
860 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 945 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
861 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 946 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
862 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 947 struct drm_connector *connector;
863 union dig_transmitter_control args; 948 union dig_transmitter_control args;
864 int index = 0; 949 int index = 0;
865 uint8_t frev, crev; 950 uint8_t frev, crev;
@@ -870,6 +955,11 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
870 int connector_object_id = 0; 955 int connector_object_id = 0;
871 int igp_lane_info = 0; 956 int igp_lane_info = 0;
872 957
958 if (action == ATOM_TRANSMITTER_ACTION_INIT)
959 connector = radeon_get_connector_for_encoder_init(encoder);
960 else
961 connector = radeon_get_connector_for_encoder(encoder);
962
873 if (connector) { 963 if (connector) {
874 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 964 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
875 struct radeon_connector_atom_dig *dig_connector = 965 struct radeon_connector_atom_dig *dig_connector =
@@ -931,10 +1021,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
931 else 1021 else
932 args.v3.ucLaneNum = 4; 1022 args.v3.ucLaneNum = 4;
933 1023
934 if (dig->linkb) { 1024 if (dig->linkb)
935 args.v3.acConfig.ucLinkSel = 1; 1025 args.v3.acConfig.ucLinkSel = 1;
1026 if (dig->dig_encoder & 1)
936 args.v3.acConfig.ucEncoderSel = 1; 1027 args.v3.acConfig.ucEncoderSel = 1;
937 }
938 1028
939 /* Select the PLL for the PHY 1029 /* Select the PLL for the PHY
940 * DP PHY should be clocked from external src if there is 1030 * DP PHY should be clocked from external src if there is
@@ -946,11 +1036,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
946 } 1036 }
947 1037
948 if (ASIC_IS_DCE5(rdev)) { 1038 if (ASIC_IS_DCE5(rdev)) {
949 if (is_dp && rdev->clock.dp_extclk) 1039 /* On DCE5 DCPLL usually generates the DP ref clock */
950 args.v4.acConfig.ucRefClkSource = 3; /* external src */ 1040 if (is_dp) {
951 else 1041 if (rdev->clock.dp_extclk)
1042 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
1043 else
1044 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
1045 } else
952 args.v4.acConfig.ucRefClkSource = pll_id; 1046 args.v4.acConfig.ucRefClkSource = pll_id;
953 } else { 1047 } else {
1048 /* On DCE4, if there is an external clock, it generates the DP ref clock */
954 if (is_dp && rdev->clock.dp_extclk) 1049 if (is_dp && rdev->clock.dp_extclk)
955 args.v3.acConfig.ucRefClkSource = 2; /* external src */ 1050 args.v3.acConfig.ucRefClkSource = 2; /* external src */
956 else 1051 else
@@ -1047,7 +1142,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1047 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1142 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1048} 1143}
1049 1144
1050void 1145bool
1051atombios_set_edp_panel_power(struct drm_connector *connector, int action) 1146atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1052{ 1147{
1053 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1148 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1058,23 +1153,37 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1058 uint8_t frev, crev; 1153 uint8_t frev, crev;
1059 1154
1060 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) 1155 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
1061 return; 1156 goto done;
1062 1157
1063 if (!ASIC_IS_DCE4(rdev)) 1158 if (!ASIC_IS_DCE4(rdev))
1064 return; 1159 goto done;
1065 1160
1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && 1161 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 1162 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1068 return; 1163 goto done;
1069 1164
1070 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 1165 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1071 return; 1166 goto done;
1072 1167
1073 memset(&args, 0, sizeof(args)); 1168 memset(&args, 0, sizeof(args));
1074 1169
1075 args.v1.ucAction = action; 1170 args.v1.ucAction = action;
1076 1171
1077 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1172 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1173
1174 /* wait for the panel to power up */
1175 if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
1176 int i;
1177
1178 for (i = 0; i < 300; i++) {
1179 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1180 return true;
1181 mdelay(1);
1182 }
1183 return false;
1184 }
1185done:
1186 return true;
1078} 1187}
1079 1188
1080union external_encoder_control { 1189union external_encoder_control {
@@ -1092,13 +1201,19 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1092 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1201 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1093 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); 1202 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
1094 union external_encoder_control args; 1203 union external_encoder_control args;
1095 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1204 struct drm_connector *connector;
1096 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); 1205 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
1097 u8 frev, crev; 1206 u8 frev, crev;
1098 int dp_clock = 0; 1207 int dp_clock = 0;
1099 int dp_lane_count = 0; 1208 int dp_lane_count = 0;
1100 int connector_object_id = 0; 1209 int connector_object_id = 0;
1101 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 1210 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1211 int bpc = 8;
1212
1213 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1214 connector = radeon_get_connector_for_encoder_init(encoder);
1215 else
1216 connector = radeon_get_connector_for_encoder(encoder);
1102 1217
1103 if (connector) { 1218 if (connector) {
1104 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1219 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1109,6 +1224,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1109 dp_lane_count = dig_connector->dp_lane_count; 1224 dp_lane_count = dig_connector->dp_lane_count;
1110 connector_object_id = 1225 connector_object_id =
1111 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1226 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1227 bpc = connector->display_info.bpc;
1112 } 1228 }
1113 1229
1114 memset(&args, 0, sizeof(args)); 1230 memset(&args, 0, sizeof(args));
@@ -1166,7 +1282,27 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1166 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; 1282 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1167 break; 1283 break;
1168 } 1284 }
1169 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; 1285 switch (bpc) {
1286 case 0:
1287 args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
1288 break;
1289 case 6:
1290 args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
1291 break;
1292 case 8:
1293 default:
1294 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1295 break;
1296 case 10:
1297 args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
1298 break;
1299 case 12:
1300 args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
1301 break;
1302 case 16:
1303 args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
1304 break;
1305 }
1170 break; 1306 break;
1171 default: 1307 default:
1172 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1308 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
@@ -1307,9 +1443,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1307 ATOM_TRANSMITTER_ACTION_POWER_ON); 1443 ATOM_TRANSMITTER_ACTION_POWER_ON);
1308 radeon_dig_connector->edp_on = true; 1444 radeon_dig_connector->edp_on = true;
1309 } 1445 }
1310 dp_link_train(encoder, connector);
1311 if (ASIC_IS_DCE4(rdev)) 1446 if (ASIC_IS_DCE4(rdev))
1312 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1447 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1448 radeon_dp_link_train(encoder, connector);
1449 if (ASIC_IS_DCE4(rdev))
1450 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1313 } 1451 }
1314 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1452 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1315 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); 1453 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
@@ -1322,7 +1460,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1322 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1460 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1323 1461
1324 if (ASIC_IS_DCE4(rdev)) 1462 if (ASIC_IS_DCE4(rdev))
1325 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1463 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1326 if (connector && 1464 if (connector &&
1327 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 1465 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1328 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1466 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1601,12 +1739,9 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1601 /* DCE4/5 */ 1739 /* DCE4/5 */
1602 if (ASIC_IS_DCE4(rdev)) { 1740 if (ASIC_IS_DCE4(rdev)) {
1603 dig = radeon_encoder->enc_priv; 1741 dig = radeon_encoder->enc_priv;
1604 if (ASIC_IS_DCE41(rdev)) { 1742 if (ASIC_IS_DCE41(rdev))
1605 if (dig->linkb) 1743 return radeon_crtc->crtc_id;
1606 return 1; 1744 else {
1607 else
1608 return 0;
1609 } else {
1610 switch (radeon_encoder->encoder_id) { 1745 switch (radeon_encoder->encoder_id) {
1611 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1746 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1612 if (dig->linkb) 1747 if (dig->linkb)
@@ -1662,6 +1797,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1662 return 1; 1797 return 1;
1663} 1798}
1664 1799
1800/* This only needs to be called once at startup */
1801void
1802radeon_atom_encoder_init(struct radeon_device *rdev)
1803{
1804 struct drm_device *dev = rdev->ddev;
1805 struct drm_encoder *encoder;
1806
1807 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1808 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1809 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1810
1811 switch (radeon_encoder->encoder_id) {
1812 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1813 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1814 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1815 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1816 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1817 break;
1818 default:
1819 break;
1820 }
1821
1822 if (ext_encoder && ASIC_IS_DCE41(rdev))
1823 atombios_external_encoder_setup(encoder, ext_encoder,
1824 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1825 }
1826}
1827
1665static void 1828static void
1666radeon_atom_encoder_mode_set(struct drm_encoder *encoder, 1829radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1667 struct drm_display_mode *mode, 1830 struct drm_display_mode *mode,
@@ -1696,19 +1859,17 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1696 /* disable the transmitter */ 1859 /* disable the transmitter */
1697 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1860 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1698 /* setup and enable the encoder */ 1861 /* setup and enable the encoder */
1699 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP); 1862 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1700 1863
1701 /* init and enable the transmitter */ 1864 /* enable the transmitter */
1702 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1703 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1865 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1704 } else { 1866 } else {
1705 /* disable the encoder and transmitter */ 1867 /* disable the encoder and transmitter */
1706 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1868 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1707 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1869 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1708 1870
1709 /* setup and enable the encoder and transmitter */ 1871 /* setup and enable the encoder and transmitter */
1710 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1872 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1711 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1712 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1873 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1713 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1874 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1714 } 1875 }
@@ -1733,12 +1894,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1733 } 1894 }
1734 1895
1735 if (ext_encoder) { 1896 if (ext_encoder) {
1736 if (ASIC_IS_DCE41(rdev)) { 1897 if (ASIC_IS_DCE41(rdev))
1737 atombios_external_encoder_setup(encoder, ext_encoder,
1738 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1739 atombios_external_encoder_setup(encoder, ext_encoder, 1898 atombios_external_encoder_setup(encoder, ext_encoder,
1740 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); 1899 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1741 } else 1900 else
1742 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1901 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1743 } 1902 }
1744 1903
@@ -1845,8 +2004,9 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1845 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2004 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1846 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 2005 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1847 2006
1848 if (radeon_encoder->active_device & 2007 if ((radeon_encoder->active_device &
1849 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 2008 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2009 radeon_encoder_is_dp_bridge(encoder)) {
1850 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 2010 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1851 if (dig) 2011 if (dig)
1852 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); 2012 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
@@ -1855,11 +2015,17 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1855 radeon_atom_output_lock(encoder, true); 2015 radeon_atom_output_lock(encoder, true);
1856 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 2016 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1857 2017
1858 /* select the clock/data port if it uses a router */
1859 if (connector) { 2018 if (connector) {
1860 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 2019 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2020
2021 /* select the clock/data port if it uses a router */
1861 if (radeon_connector->router.cd_valid) 2022 if (radeon_connector->router.cd_valid)
1862 radeon_router_select_cd_port(radeon_connector); 2023 radeon_router_select_cd_port(radeon_connector);
2024
2025 /* turn eDP panel on for mode set */
2026 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2027 atombios_set_edp_panel_power(connector,
2028 ATOM_TRANSMITTER_ACTION_POWER_ON);
1863 } 2029 }
1864 2030
1865 /* this is needed for the pll/ss setup to work correctly in some cases */ 2031 /* this is needed for the pll/ss setup to work correctly in some cases */
@@ -1914,7 +2080,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1914 else { 2080 else {
1915 /* disable the encoder and transmitter */ 2081 /* disable the encoder and transmitter */
1916 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 2082 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1917 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 2083 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1918 } 2084 }
1919 break; 2085 break;
1920 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2086 case ENCODER_OBJECT_ID_INTERNAL_DDI:
@@ -2116,8 +2282,6 @@ radeon_add_atom_encoder(struct drm_device *dev,
2116 } else { 2282 } else {
2117 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2283 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2118 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2284 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2119 if (ASIC_IS_AVIVO(rdev))
2120 radeon_encoder->underscan_type = UNDERSCAN_AUTO;
2121 } 2285 }
2122 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2286 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2123 break; 2287 break;
@@ -2150,8 +2314,6 @@ radeon_add_atom_encoder(struct drm_device *dev,
2150 } else { 2314 } else {
2151 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2315 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2152 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 2316 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2153 if (ASIC_IS_AVIVO(rdev))
2154 radeon_encoder->underscan_type = UNDERSCAN_AUTO;
2155 } 2317 }
2156 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2318 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2157 break; 2319 break;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 983cbac75af0..781196db792f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -888,6 +888,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
888 888
889 i2c->rec = *rec; 889 i2c->rec = *rec;
890 i2c->adapter.owner = THIS_MODULE; 890 i2c->adapter.owner = THIS_MODULE;
891 i2c->adapter.class = I2C_CLASS_DDC;
891 i2c->dev = dev; 892 i2c->dev = dev;
892 i2c_set_adapdata(&i2c->adapter, i2c); 893 i2c_set_adapdata(&i2c->adapter, i2c);
893 if (rec->mm_i2c || 894 if (rec->mm_i2c ||
@@ -947,6 +948,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
947 948
948 i2c->rec = *rec; 949 i2c->rec = *rec;
949 i2c->adapter.owner = THIS_MODULE; 950 i2c->adapter.owner = THIS_MODULE;
951 i2c->adapter.class = I2C_CLASS_DDC;
950 i2c->dev = dev; 952 i2c->dev = dev;
951 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 953 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
952 "Radeon aux bus %s", name); 954 "Radeon aux bus %s", name);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9c57538231d5..977a341266b6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -464,22 +464,27 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
464extern struct drm_connector * 464extern struct drm_connector *
465radeon_get_connector_for_encoder(struct drm_encoder *encoder); 465radeon_get_connector_for_encoder(struct drm_encoder *encoder);
466 466
467extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
468extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
469extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
470extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
471
467extern void radeon_connector_hotplug(struct drm_connector *connector); 472extern void radeon_connector_hotplug(struct drm_connector *connector);
468extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); 473extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
469extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
470 struct drm_display_mode *mode); 474 struct drm_display_mode *mode);
471extern void radeon_dp_set_link_config(struct drm_connector *connector, 475extern void radeon_dp_set_link_config(struct drm_connector *connector,
472 struct drm_display_mode *mode); 476 struct drm_display_mode *mode);
473extern void dp_link_train(struct drm_encoder *encoder, 477extern void radeon_dp_link_train(struct drm_encoder *encoder,
474 struct drm_connector *connector); 478 struct drm_connector *connector);
475extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 479extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
476extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 480extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
477extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action); 481extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
482extern void radeon_atom_encoder_init(struct radeon_device *rdev);
478extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 483extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
479 int action, uint8_t lane_num, 484 int action, uint8_t lane_num,
480 uint8_t lane_set); 485 uint8_t lane_set);
481extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 486extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
482 uint8_t write_byte, uint8_t *read_byte); 487 u8 write_byte, u8 *read_byte);
483 488
484extern void radeon_i2c_init(struct radeon_device *rdev); 489extern void radeon_i2c_init(struct radeon_device *rdev);
485extern void radeon_i2c_fini(struct radeon_device *rdev); 490extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -545,7 +550,7 @@ struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, i
545extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); 550extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
546extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 551extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
547extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 552extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
548extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); 553extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
549extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 554extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
550 555
551extern void radeon_crtc_load_lut(struct drm_crtc *crtc); 556extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 498b284e5ef9..58434e804d91 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,7 +215,6 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
215/* stage one happens before delay */ 215/* stage one happens before delay */
216static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) 216static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
217{ 217{
218 int ret;
219 int i; 218 int i;
220 struct vga_switcheroo_client *active = NULL; 219 struct vga_switcheroo_client *active = NULL;
221 220
@@ -228,11 +227,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
228 if (!active) 227 if (!active)
229 return 0; 228 return 0;
230 229
231 /* power up the first device */
232 ret = pci_enable_device(new_client->pdev);
233 if (ret)
234 return ret;
235
236 if (new_client->pwr_state == VGA_SWITCHEROO_OFF) 230 if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
237 vga_switchon(new_client); 231 vga_switchon(new_client);
238 232
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index be8d4cb5861c..8a1021f2e319 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -61,7 +61,7 @@ struct vga_device {
61 unsigned int mem_lock_cnt; /* legacy MEM lock count */ 61 unsigned int mem_lock_cnt; /* legacy MEM lock count */
62 unsigned int io_norm_cnt; /* normal IO count */ 62 unsigned int io_norm_cnt; /* normal IO count */
63 unsigned int mem_norm_cnt; /* normal MEM count */ 63 unsigned int mem_norm_cnt; /* normal MEM count */
64 64 bool bridge_has_one_vga;
65 /* allow IRQ enable/disable hook */ 65 /* allow IRQ enable/disable hook */
66 void *cookie; 66 void *cookie;
67 void (*irq_set_state)(void *cookie, bool enable); 67 void (*irq_set_state)(void *cookie, bool enable);
@@ -165,6 +165,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
165 unsigned int wants, legacy_wants, match; 165 unsigned int wants, legacy_wants, match;
166 struct vga_device *conflict; 166 struct vga_device *conflict;
167 unsigned int pci_bits; 167 unsigned int pci_bits;
168 u32 flags = 0;
169
168 /* Account for "normal" resources to lock. If we decode the legacy, 170 /* Account for "normal" resources to lock. If we decode the legacy,
169 * counterpart, we need to request it as well 171 * counterpart, we need to request it as well
170 */ 172 */
@@ -237,16 +239,23 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
237 /* looks like he doesn't have a lock, we can steal 239 /* looks like he doesn't have a lock, we can steal
238 * them from him 240 * them from him
239 */ 241 */
240 vga_irq_set_state(conflict, false);
241 242
243 flags = 0;
242 pci_bits = 0; 244 pci_bits = 0;
243 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
244 pci_bits |= PCI_COMMAND_MEMORY;
245 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
246 pci_bits |= PCI_COMMAND_IO;
247 245
248 pci_set_vga_state(conflict->pdev, false, pci_bits, 246 if (!conflict->bridge_has_one_vga) {
249 change_bridge); 247 vga_irq_set_state(conflict, false);
248 flags |= PCI_VGA_STATE_CHANGE_DECODES;
249 if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
250 pci_bits |= PCI_COMMAND_MEMORY;
251 if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
252 pci_bits |= PCI_COMMAND_IO;
253 }
254
255 if (change_bridge)
256 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
257
258 pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
250 conflict->owns &= ~lwants; 259 conflict->owns &= ~lwants;
251 /* If he also owned non-legacy, that is no longer the case */ 260 /* If he also owned non-legacy, that is no longer the case */
252 if (lwants & VGA_RSRC_LEGACY_MEM) 261 if (lwants & VGA_RSRC_LEGACY_MEM)
@@ -261,14 +270,24 @@ enable_them:
261 * also have in "decodes". We can lock resources we don't decode but 270 * also have in "decodes". We can lock resources we don't decode but
262 * not own them. 271 * not own them.
263 */ 272 */
273 flags = 0;
264 pci_bits = 0; 274 pci_bits = 0;
265 if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
266 pci_bits |= PCI_COMMAND_MEMORY;
267 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
268 pci_bits |= PCI_COMMAND_IO;
269 pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK));
270 275
271 vga_irq_set_state(vgadev, true); 276 if (!vgadev->bridge_has_one_vga) {
277 flags |= PCI_VGA_STATE_CHANGE_DECODES;
278 if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
279 pci_bits |= PCI_COMMAND_MEMORY;
280 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
281 pci_bits |= PCI_COMMAND_IO;
282 }
283 if (!!(wants & VGA_RSRC_LEGACY_MASK))
284 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
285
286 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
287
288 if (!vgadev->bridge_has_one_vga) {
289 vga_irq_set_state(vgadev, true);
290 }
272 vgadev->owns |= (wants & vgadev->decodes); 291 vgadev->owns |= (wants & vgadev->decodes);
273lock_them: 292lock_them:
274 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); 293 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -421,6 +440,62 @@ bail:
421} 440}
422EXPORT_SYMBOL(vga_put); 441EXPORT_SYMBOL(vga_put);
423 442
443/* Rules for using a bridge to control a VGA descendant decoding:
444 if a bridge has only one VGA descendant then it can be used
445 to control the VGA routing for that device.
446 It should always use the bridge closest to the device to control it.
447 If a bridge has a direct VGA descendant, but also have a sub-bridge
448 VGA descendant then we cannot use that bridge to control the direct VGA descendant.
449 So for every device we register, we need to iterate all its parent bridges
450 so we can invalidate any devices using them properly.
451*/
452static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
453{
454 struct vga_device *same_bridge_vgadev;
455 struct pci_bus *new_bus, *bus;
456 struct pci_dev *new_bridge, *bridge;
457
458 vgadev->bridge_has_one_vga = true;
459
460 if (list_empty(&vga_list))
461 return;
462
463 /* okay iterate the new devices bridge hierarachy */
464 new_bus = vgadev->pdev->bus;
465 while (new_bus) {
466 new_bridge = new_bus->self;
467
468 if (new_bridge) {
469 /* go through list of devices already registered */
470 list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
471 bus = same_bridge_vgadev->pdev->bus;
472 bridge = bus->self;
473
474 /* see if the share a bridge with this device */
475 if (new_bridge == bridge) {
476 /* if their direct parent bridge is the same
477 as any bridge of this device then it can't be used
478 for that device */
479 same_bridge_vgadev->bridge_has_one_vga = false;
480 }
481
482 /* now iterate the previous devices bridge hierarchy */
483 /* if the new devices parent bridge is in the other devices
484 hierarchy then we can't use it to control this device */
485 while (bus) {
486 bridge = bus->self;
487 if (bridge) {
488 if (bridge == vgadev->pdev->bus->self)
489 vgadev->bridge_has_one_vga = false;
490 }
491 bus = bus->parent;
492 }
493 }
494 }
495 new_bus = new_bus->parent;
496 }
497}
498
424/* 499/*
425 * Currently, we assume that the "initial" setup of the system is 500 * Currently, we assume that the "initial" setup of the system is
426 * not sane, that is we come up with conflicting devices and let 501 * not sane, that is we come up with conflicting devices and let
@@ -500,6 +575,8 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
500 vga_default = pci_dev_get(pdev); 575 vga_default = pci_dev_get(pdev);
501#endif 576#endif
502 577
578 vga_arbiter_check_bridge_sharing(vgadev);
579
503 /* Add to the list */ 580 /* Add to the list */
504 list_add(&vgadev->list, &vga_list); 581 list_add(&vgadev->list, &vga_list);
505 vga_count++; 582 vga_count++;
@@ -1222,6 +1299,7 @@ static int __init vga_arb_device_init(void)
1222{ 1299{
1223 int rc; 1300 int rc;
1224 struct pci_dev *pdev; 1301 struct pci_dev *pdev;
1302 struct vga_device *vgadev;
1225 1303
1226 rc = misc_register(&vga_arb_device); 1304 rc = misc_register(&vga_arb_device);
1227 if (rc < 0) 1305 if (rc < 0)
@@ -1238,6 +1316,13 @@ static int __init vga_arb_device_init(void)
1238 vga_arbiter_add_pci_device(pdev); 1316 vga_arbiter_add_pci_device(pdev);
1239 1317
1240 pr_info("vgaarb: loaded\n"); 1318 pr_info("vgaarb: loaded\n");
1319
1320 list_for_each_entry(vgadev, &vga_list, list) {
1321 if (vgadev->bridge_has_one_vga)
1322 pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
1323 else
1324 pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
1325 }
1241 return rc; 1326 return rc;
1242} 1327}
1243subsys_initcall(vga_arb_device_init); 1328subsys_initcall(vga_arb_device_init);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 22c9b27fdd8d..56098b3e17c0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3284,31 +3284,34 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3284 * @dev: the PCI device 3284 * @dev: the PCI device
3285 * @decode: true = enable decoding, false = disable decoding 3285 * @decode: true = enable decoding, false = disable decoding
3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3287 * @change_bridge: traverse ancestors and change bridges 3287 * @change_bridge_flags: traverse ancestors and change bridges
3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3288 */ 3289 */
3289int pci_set_vga_state(struct pci_dev *dev, bool decode, 3290int pci_set_vga_state(struct pci_dev *dev, bool decode,
3290 unsigned int command_bits, bool change_bridge) 3291 unsigned int command_bits, u32 flags)
3291{ 3292{
3292 struct pci_bus *bus; 3293 struct pci_bus *bus;
3293 struct pci_dev *bridge; 3294 struct pci_dev *bridge;
3294 u16 cmd; 3295 u16 cmd;
3295 int rc; 3296 int rc;
3296 3297
3297 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 3298 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3298 3299
3299 /* ARCH specific VGA enables */ 3300 /* ARCH specific VGA enables */
3300 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); 3301 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3301 if (rc) 3302 if (rc)
3302 return rc; 3303 return rc;
3303 3304
3304 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3305 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3305 if (decode == true) 3306 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3306 cmd |= command_bits; 3307 if (decode == true)
3307 else 3308 cmd |= command_bits;
3308 cmd &= ~command_bits; 3309 else
3309 pci_write_config_word(dev, PCI_COMMAND, cmd); 3310 cmd &= ~command_bits;
3311 pci_write_config_word(dev, PCI_COMMAND, cmd);
3312 }
3310 3313
3311 if (change_bridge == false) 3314 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3312 return 0; 3315 return 0;
3313 3316
3314 bus = dev->bus; 3317 bus = dev->bus;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 485c09eef424..5cb999b50f95 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -753,4 +753,11 @@ config SAMSUNG_LAPTOP
753 To compile this driver as a module, choose M here: the module 753 To compile this driver as a module, choose M here: the module
754 will be called samsung-laptop. 754 will be called samsung-laptop.
755 755
756config MXM_WMI
757 tristate "WMI support for MXM Laptop Graphics"
758 depends on ACPI_WMI
759 ---help---
760 MXM is a standard for laptop graphics cards, the WMI interface
761 is required for switchable nvidia graphics machines
762
756endif # X86_PLATFORM_DEVICES 763endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 029e8861d086..a7ab3bc7b3a1 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o 42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o 43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
44obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o 44obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
45obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
new file mode 100644
index 000000000000..0aea63b3729a
--- /dev/null
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -0,0 +1,111 @@
1/*
2 * MXM WMI driver
3 *
4 * Copyright(C) 2010 Red Hat.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <acpi/acpi_bus.h>
24#include <acpi/acpi_drivers.h>
25
26MODULE_AUTHOR("Dave Airlie");
27MODULE_DESCRIPTION("MXM WMI Driver");
28MODULE_LICENSE("GPL");
29
30#define MXM_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
31
32MODULE_ALIAS("wmi:"MXM_WMMX_GUID);
33
34#define MXM_WMMX_FUNC_MXDS 0x5344584D /* "MXDS" */
35#define MXM_WMMX_FUNC_MXMX 0x53445344 /* "MXMX" */
36
37struct mxds_args {
38 u32 func;
39 u32 args;
40 u32 xarg;
41};
42
43int mxm_wmi_call_mxds(int adapter)
44{
45 struct mxds_args args = {
46 .func = MXM_WMMX_FUNC_MXDS,
47 .args = 0,
48 .xarg = 1,
49 };
50 struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
51 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
52 acpi_status status;
53
54 printk("calling mux switch %d\n", adapter);
55
56 status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input,
57 &output);
58
59 if (ACPI_FAILURE(status))
60 return status;
61
62 printk("mux switched %d\n", status);
63 return 0;
64
65}
66EXPORT_SYMBOL_GPL(mxm_wmi_call_mxds);
67
68int mxm_wmi_call_mxmx(int adapter)
69{
70 struct mxds_args args = {
71 .func = MXM_WMMX_FUNC_MXMX,
72 .args = 0,
73 .xarg = 1,
74 };
75 struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
76 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
77 acpi_status status;
78
79 printk("calling mux switch %d\n", adapter);
80
81 status = wmi_evaluate_method(MXM_WMMX_GUID, 0x1, adapter, &input,
82 &output);
83
84 if (ACPI_FAILURE(status))
85 return status;
86
87 printk("mux mutex set switched %d\n", status);
88 return 0;
89
90}
91EXPORT_SYMBOL_GPL(mxm_wmi_call_mxmx);
92
93bool mxm_wmi_supported(void)
94{
95 bool guid_valid;
96 guid_valid = wmi_has_guid(MXM_WMMX_GUID);
97 return guid_valid;
98}
99EXPORT_SYMBOL_GPL(mxm_wmi_supported);
100
101static int __init mxm_wmi_init(void)
102{
103 return 0;
104}
105
106static void __exit mxm_wmi_exit(void)
107{
108}
109
110module_init(mxm_wmi_init);
111module_exit(mxm_wmi_exit);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 202424d17ed7..738b3a5faa12 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -122,10 +122,14 @@ struct drm_device;
122 * using the DRM_DEBUG_KMS and DRM_DEBUG. 122 * using the DRM_DEBUG_KMS and DRM_DEBUG.
123 */ 123 */
124 124
125extern void drm_ut_debug_printk(unsigned int request_level, 125extern __attribute__((format (printf, 4, 5)))
126void drm_ut_debug_printk(unsigned int request_level,
126 const char *prefix, 127 const char *prefix,
127 const char *function_name, 128 const char *function_name,
128 const char *format, ...); 129 const char *format, ...);
130extern __attribute__((format (printf, 2, 3)))
131int drm_err(const char *func, const char *format, ...);
132
129/***********************************************************************/ 133/***********************************************************************/
130/** \name DRM template customization defaults */ 134/** \name DRM template customization defaults */
131/*@{*/ 135/*@{*/
@@ -181,21 +185,11 @@ extern void drm_ut_debug_printk(unsigned int request_level,
181 * \param fmt printf() like format string. 185 * \param fmt printf() like format string.
182 * \param arg arguments 186 * \param arg arguments
183 */ 187 */
184#define DRM_ERROR(fmt, arg...) \ 188#define DRM_ERROR(fmt, ...) \
185 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) 189 drm_err(__func__, fmt, ##__VA_ARGS__)
186
187/**
188 * Memory error output.
189 *
190 * \param area memory area where the error occurred.
191 * \param fmt printf() like format string.
192 * \param arg arguments
193 */
194#define DRM_MEM_ERROR(area, fmt, arg...) \
195 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
196 drm_mem_stats[area].name , ##arg)
197 190
198#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) 191#define DRM_INFO(fmt, ...) \
192 printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
199 193
200/** 194/**
201 * Debug output. 195 * Debug output.
@@ -1000,6 +994,22 @@ struct drm_minor {
1000 struct drm_mode_group mode_group; 994 struct drm_mode_group mode_group;
1001}; 995};
1002 996
997/* mode specified on the command line */
998struct drm_cmdline_mode {
999 bool specified;
1000 bool refresh_specified;
1001 bool bpp_specified;
1002 int xres, yres;
1003 int bpp;
1004 int refresh;
1005 bool rb;
1006 bool interlace;
1007 bool cvt;
1008 bool margins;
1009 enum drm_connector_force force;
1010};
1011
1012
1003struct drm_pending_vblank_event { 1013struct drm_pending_vblank_event {
1004 struct drm_pending_event base; 1014 struct drm_pending_event base;
1005 int pipe; 1015 int pipe;
@@ -1395,6 +1405,15 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1395 struct drm_crtc *refcrtc); 1405 struct drm_crtc *refcrtc);
1396extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); 1406extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
1397 1407
1408extern bool
1409drm_mode_parse_command_line_for_connector(const char *mode_option,
1410 struct drm_connector *connector,
1411 struct drm_cmdline_mode *mode);
1412
1413extern struct drm_display_mode *
1414drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1415 struct drm_cmdline_mode *cmd);
1416
1398/* Modesetting support */ 1417/* Modesetting support */
1399extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1418extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1400extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 1419extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index d94684b7ba34..9573e0ce3120 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -183,7 +183,9 @@ enum subpixel_order {
183 SubPixelNone, 183 SubPixelNone,
184}; 184};
185 185
186 186#define DRM_COLOR_FORMAT_RGB444 (1<<0)
187#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
188#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
187/* 189/*
188 * Describes a given display (e.g. CRT or flat panel) and its limitations. 190 * Describes a given display (e.g. CRT or flat panel) and its limitations.
189 */ 191 */
@@ -198,8 +200,10 @@ struct drm_display_info {
198 unsigned int min_vfreq, max_vfreq; 200 unsigned int min_vfreq, max_vfreq;
199 unsigned int min_hfreq, max_hfreq; 201 unsigned int min_hfreq, max_hfreq;
200 unsigned int pixel_clock; 202 unsigned int pixel_clock;
203 unsigned int bpc;
201 204
202 enum subpixel_order subpixel_order; 205 enum subpixel_order subpixel_order;
206 u32 color_formats;
203 207
204 char *raw_edid; /* if any */ 208 char *raw_edid; /* if any */
205}; 209};
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 83a389e44543..91567bbdb027 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -53,6 +53,7 @@
53 53
54#define DP_MAX_LANE_COUNT 0x002 54#define DP_MAX_LANE_COUNT 0x002
55# define DP_MAX_LANE_COUNT_MASK 0x1f 55# define DP_MAX_LANE_COUNT_MASK 0x1f
56# define DP_TPS3_SUPPORTED (1 << 6)
56# define DP_ENHANCED_FRAME_CAP (1 << 7) 57# define DP_ENHANCED_FRAME_CAP (1 << 7)
57 58
58#define DP_MAX_DOWNSPREAD 0x003 59#define DP_MAX_DOWNSPREAD 0x003
@@ -71,10 +72,13 @@
71 72
72#define DP_MAIN_LINK_CHANNEL_CODING 0x006 73#define DP_MAIN_LINK_CHANNEL_CODING 0x006
73 74
75#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
76
74/* link configuration */ 77/* link configuration */
75#define DP_LINK_BW_SET 0x100 78#define DP_LINK_BW_SET 0x100
76# define DP_LINK_BW_1_62 0x06 79# define DP_LINK_BW_1_62 0x06
77# define DP_LINK_BW_2_7 0x0a 80# define DP_LINK_BW_2_7 0x0a
81# define DP_LINK_BW_5_4 0x14
78 82
79#define DP_LANE_COUNT_SET 0x101 83#define DP_LANE_COUNT_SET 0x101
80# define DP_LANE_COUNT_MASK 0x0f 84# define DP_LANE_COUNT_MASK 0x0f
@@ -84,6 +88,7 @@
84# define DP_TRAINING_PATTERN_DISABLE 0 88# define DP_TRAINING_PATTERN_DISABLE 0
85# define DP_TRAINING_PATTERN_1 1 89# define DP_TRAINING_PATTERN_1 1
86# define DP_TRAINING_PATTERN_2 2 90# define DP_TRAINING_PATTERN_2 2
91# define DP_TRAINING_PATTERN_3 3
87# define DP_TRAINING_PATTERN_MASK 0x3 92# define DP_TRAINING_PATTERN_MASK 0x3
88 93
89# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) 94# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 5881fad91faa..eacb415b309a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -155,12 +155,35 @@ struct detailed_timing {
155#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) 155#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
156#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) 156#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
157#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) 157#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
158#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */ 158#define DRM_EDID_INPUT_DIGITAL (1 << 7)
159#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
160#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
161#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
162#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
163#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
164#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
165#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
166#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
167#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
168#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
169#define DRM_EDID_DIGITAL_TYPE_DVI (1)
170#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
171#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
172#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
173#define DRM_EDID_DIGITAL_TYPE_DP (5)
159 174
160#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) 175#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
161#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) 176#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
162#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) 177#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
178/* If analog */
163#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ 179#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
180/* If digital */
181#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
182#define DRM_EDID_FEATURE_RGB (0 << 3)
183#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
184#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
185#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
186
164#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) 187#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
165#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) 188#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
166#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) 189#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index c99c3d3e7811..6e3076ad646e 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -40,20 +40,6 @@ struct drm_fb_helper_crtc {
40 struct drm_display_mode *desired_mode; 40 struct drm_display_mode *desired_mode;
41}; 41};
42 42
43/* mode specified on the command line */
44struct drm_fb_helper_cmdline_mode {
45 bool specified;
46 bool refresh_specified;
47 bool bpp_specified;
48 int xres, yres;
49 int bpp;
50 int refresh;
51 bool rb;
52 bool interlace;
53 bool cvt;
54 bool margins;
55};
56
57struct drm_fb_helper_surface_size { 43struct drm_fb_helper_surface_size {
58 u32 fb_width; 44 u32 fb_width;
59 u32 fb_height; 45 u32 fb_height;
@@ -74,8 +60,8 @@ struct drm_fb_helper_funcs {
74}; 60};
75 61
76struct drm_fb_helper_connector { 62struct drm_fb_helper_connector {
77 struct drm_fb_helper_cmdline_mode cmdline_mode;
78 struct drm_connector *connector; 63 struct drm_connector *connector;
64 struct drm_cmdline_mode cmdline_mode;
79}; 65};
80 66
81struct drm_fb_helper { 67struct drm_fb_helper {
diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h
new file mode 100644
index 000000000000..617a2950523c
--- /dev/null
+++ b/include/linux/mxm-wmi.h
@@ -0,0 +1,33 @@
1/*
2 * MXM WMI driver
3 *
4 * Copyright(C) 2010 Red Hat.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef MXM_WMI_H
22#define MXM_WMI_H
23
24/* discrete adapters */
25#define MXM_MXDS_ADAPTER_0 0x0
26#define MXM_MXDS_ADAPTER_1 0x0
27/* integrated adapter */
28#define MXM_MXDS_ADAPTER_IGD 0x10
29int mxm_wmi_call_mxds(int adapter);
30int mxm_wmi_call_mxmx(int adapter);
31bool mxm_wmi_supported(void);
32
33#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4604d1d5514d..c446b5ca2d38 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -941,8 +941,11 @@ int pci_cfg_space_size_ext(struct pci_dev *dev);
941int pci_cfg_space_size(struct pci_dev *dev); 941int pci_cfg_space_size(struct pci_dev *dev);
942unsigned char pci_bus_max_busnr(struct pci_bus *bus); 942unsigned char pci_bus_max_busnr(struct pci_bus *bus);
943 943
944#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
945#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
946
944int pci_set_vga_state(struct pci_dev *pdev, bool decode, 947int pci_set_vga_state(struct pci_dev *pdev, bool decode,
945 unsigned int command_bits, bool change_bridge); 948 unsigned int command_bits, u32 flags);
946/* kmem_cache style wrapper around pci_alloc_consistent() */ 949/* kmem_cache style wrapper around pci_alloc_consistent() */
947 950
948#include <linux/pci-dma.h> 951#include <linux/pci-dma.h>
@@ -1087,7 +1090,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
1087 1090
1088/* some architectures require additional setup to direct VGA traffic */ 1091/* some architectures require additional setup to direct VGA traffic */
1089typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1092typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1090 unsigned int command_bits, bool change_bridge); 1093 unsigned int command_bits, u32 flags);
1091extern void pci_register_set_vga_state(arch_set_vga_state_t func); 1094extern void pci_register_set_vga_state(arch_set_vga_state_t func);
1092 1095
1093#else /* CONFIG_PCI is not enabled */ 1096#else /* CONFIG_PCI is not enabled */