aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c169
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c275
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c963
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c284
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c89
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c84
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c (renamed from drivers/gpu/drm/nouveau/nv40_mpeg.c)91
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c338
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c402
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c155
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c1473
55 files changed, 4190 insertions, 1593 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 0583677e4581..35ef5b1e3566 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -21,16 +21,17 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
22 nv84_crypt.o \ 22 nv84_crypt.o \
23 nva3_copy.o nvc0_copy.o \ 23 nva3_copy.o nvc0_copy.o \
24 nv40_mpeg.o nv50_mpeg.o \ 24 nv31_mpeg.o nv50_mpeg.o \
25 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 25 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
26 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
27 nv50_cursor.o nv50_display.o \
28 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 26 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
29 nv04_crtc.o nv04_display.o nv04_cursor.o \ 27 nv04_crtc.o nv04_display.o nv04_cursor.o \
28 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
29 nv50_cursor.o nv50_display.o \
30 nvd0_display.o \
30 nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ 31 nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
31 nv10_gpio.o nv50_gpio.o \ 32 nv10_gpio.o nv50_gpio.o \
32 nv50_calc.o \ 33 nv50_calc.o \
33 nv04_pm.o nv50_pm.o nva3_pm.o \ 34 nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
34 nv50_vram.o nvc0_vram.o \ 35 nv50_vram.o nvc0_vram.o \
35 nv50_vm.o nvc0_vm.o 36 nv50_vm.o nvc0_vm.o
36 37
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 00a55dfdba82..fa22b28e8777 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -37,8 +37,10 @@
37#include "nouveau_drv.h" 37#include "nouveau_drv.h"
38#include "nouveau_drm.h" 38#include "nouveau_drm.h"
39#include "nouveau_reg.h" 39#include "nouveau_reg.h"
40#include "nouveau_encoder.h"
40 41
41static int nv40_get_intensity(struct backlight_device *bd) 42static int
43nv40_get_intensity(struct backlight_device *bd)
42{ 44{
43 struct drm_device *dev = bl_get_data(bd); 45 struct drm_device *dev = bl_get_data(bd);
44 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) 46 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
@@ -47,7 +49,8 @@ static int nv40_get_intensity(struct backlight_device *bd)
47 return val; 49 return val;
48} 50}
49 51
50static int nv40_set_intensity(struct backlight_device *bd) 52static int
53nv40_set_intensity(struct backlight_device *bd)
51{ 54{
52 struct drm_device *dev = bl_get_data(bd); 55 struct drm_device *dev = bl_get_data(bd);
53 int val = bd->props.brightness; 56 int val = bd->props.brightness;
@@ -65,30 +68,8 @@ static const struct backlight_ops nv40_bl_ops = {
65 .update_status = nv40_set_intensity, 68 .update_status = nv40_set_intensity,
66}; 69};
67 70
68static int nv50_get_intensity(struct backlight_device *bd) 71static int
69{ 72nv40_backlight_init(struct drm_connector *connector)
70 struct drm_device *dev = bl_get_data(bd);
71
72 return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
73}
74
75static int nv50_set_intensity(struct backlight_device *bd)
76{
77 struct drm_device *dev = bl_get_data(bd);
78 int val = bd->props.brightness;
79
80 nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
81 val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
82 return 0;
83}
84
85static const struct backlight_ops nv50_bl_ops = {
86 .options = BL_CORE_SUSPENDRESUME,
87 .get_brightness = nv50_get_intensity,
88 .update_status = nv50_set_intensity,
89};
90
91static int nouveau_nv40_backlight_init(struct drm_connector *connector)
92{ 73{
93 struct drm_device *dev = connector->dev; 74 struct drm_device *dev = connector->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -113,34 +94,129 @@ static int nouveau_nv40_backlight_init(struct drm_connector *connector)
113 return 0; 94 return 0;
114} 95}
115 96
116static int nouveau_nv50_backlight_init(struct drm_connector *connector) 97static int
98nv50_get_intensity(struct backlight_device *bd)
99{
100 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
101 struct drm_device *dev = nv_encoder->base.base.dev;
102 int or = nv_encoder->or;
103 u32 div = 1025;
104 u32 val;
105
106 val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
107 val &= NV50_PDISP_SOR_PWM_CTL_VAL;
108 return ((val * 100) + (div / 2)) / div;
109}
110
111static int
112nv50_set_intensity(struct backlight_device *bd)
113{
114 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
115 struct drm_device *dev = nv_encoder->base.base.dev;
116 int or = nv_encoder->or;
117 u32 div = 1025;
118 u32 val = (bd->props.brightness * div) / 100;
119
120 nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or),
121 NV50_PDISP_SOR_PWM_CTL_NEW | val);
122 return 0;
123}
124
125static const struct backlight_ops nv50_bl_ops = {
126 .options = BL_CORE_SUSPENDRESUME,
127 .get_brightness = nv50_get_intensity,
128 .update_status = nv50_set_intensity,
129};
130
131static int
132nva3_get_intensity(struct backlight_device *bd)
133{
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct drm_device *dev = nv_encoder->base.base.dev;
136 int or = nv_encoder->or;
137 u32 div, val;
138
139 div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
140 val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
141 val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
142 if (div && div >= val)
143 return ((val * 100) + (div / 2)) / div;
144
145 return 100;
146}
147
148static int
149nva3_set_intensity(struct backlight_device *bd)
150{
151 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
152 struct drm_device *dev = nv_encoder->base.base.dev;
153 int or = nv_encoder->or;
154 u32 div, val;
155
156 div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
157 val = (bd->props.brightness * div) / 100;
158 if (div) {
159 nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val |
160 NV50_PDISP_SOR_PWM_CTL_NEW |
161 NVA3_PDISP_SOR_PWM_CTL_UNK);
162 return 0;
163 }
164
165 return -EINVAL;
166}
167
168static const struct backlight_ops nva3_bl_ops = {
169 .options = BL_CORE_SUSPENDRESUME,
170 .get_brightness = nva3_get_intensity,
171 .update_status = nva3_set_intensity,
172};
173
174static int
175nv50_backlight_init(struct drm_connector *connector)
117{ 176{
118 struct drm_device *dev = connector->dev; 177 struct drm_device *dev = connector->dev;
119 struct drm_nouveau_private *dev_priv = dev->dev_private; 178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 struct nouveau_encoder *nv_encoder;
120 struct backlight_properties props; 180 struct backlight_properties props;
121 struct backlight_device *bd; 181 struct backlight_device *bd;
182 const struct backlight_ops *ops;
183
184 nv_encoder = find_encoder(connector, OUTPUT_LVDS);
185 if (!nv_encoder) {
186 nv_encoder = find_encoder(connector, OUTPUT_DP);
187 if (!nv_encoder)
188 return -ENODEV;
189 }
122 190
123 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) 191 if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
124 return 0; 192 return 0;
125 193
194 if (dev_priv->chipset <= 0xa0 ||
195 dev_priv->chipset == 0xaa ||
196 dev_priv->chipset == 0xac)
197 ops = &nv50_bl_ops;
198 else
199 ops = &nva3_bl_ops;
200
126 memset(&props, 0, sizeof(struct backlight_properties)); 201 memset(&props, 0, sizeof(struct backlight_properties));
127 props.type = BACKLIGHT_RAW; 202 props.type = BACKLIGHT_RAW;
128 props.max_brightness = 1025; 203 props.max_brightness = 100;
129 bd = backlight_device_register("nv_backlight", &connector->kdev, dev, 204 bd = backlight_device_register("nv_backlight", &connector->kdev,
130 &nv50_bl_ops, &props); 205 nv_encoder, ops, &props);
131 if (IS_ERR(bd)) 206 if (IS_ERR(bd))
132 return PTR_ERR(bd); 207 return PTR_ERR(bd);
133 208
134 dev_priv->backlight = bd; 209 dev_priv->backlight = bd;
135 bd->props.brightness = nv50_get_intensity(bd); 210 bd->props.brightness = bd->ops->get_brightness(bd);
136 backlight_update_status(bd); 211 backlight_update_status(bd);
137 return 0; 212 return 0;
138} 213}
139 214
140int nouveau_backlight_init(struct drm_connector *connector) 215int
216nouveau_backlight_init(struct drm_device *dev)
141{ 217{
142 struct drm_device *dev = connector->dev;
143 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct drm_connector *connector;
144 220
145#ifdef CONFIG_ACPI 221#ifdef CONFIG_ACPI
146 if (acpi_video_backlight_support()) { 222 if (acpi_video_backlight_support()) {
@@ -150,21 +226,28 @@ int nouveau_backlight_init(struct drm_connector *connector)
150 } 226 }
151#endif 227#endif
152 228
153 switch (dev_priv->card_type) { 229 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
154 case NV_40: 230 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
155 return nouveau_nv40_backlight_init(connector); 231 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
156 case NV_50: 232 continue;
157 return nouveau_nv50_backlight_init(connector); 233
158 default: 234 switch (dev_priv->card_type) {
159 break; 235 case NV_40:
236 return nv40_backlight_init(connector);
237 case NV_50:
238 return nv50_backlight_init(connector);
239 default:
240 break;
241 }
160 } 242 }
161 243
244
162 return 0; 245 return 0;
163} 246}
164 247
165void nouveau_backlight_exit(struct drm_connector *connector) 248void
249nouveau_backlight_exit(struct drm_device *dev)
166{ 250{
167 struct drm_device *dev = connector->dev;
168 struct drm_nouveau_private *dev_priv = dev->dev_private; 251 struct drm_nouveau_private *dev_priv = dev->dev_private;
169 252
170 if (dev_priv->backlight) { 253 if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b311faba34f8..032a82098136 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -296,6 +296,11 @@ munge_reg(struct nvbios *bios, uint32_t reg)
296 if (dev_priv->card_type < NV_50) 296 if (dev_priv->card_type < NV_50)
297 return reg; 297 return reg;
298 298
299 if (reg & 0x80000000) {
300 BUG_ON(bios->display.crtc < 0);
301 reg += bios->display.crtc * 0x800;
302 }
303
299 if (reg & 0x40000000) { 304 if (reg & 0x40000000) {
300 BUG_ON(!dcbent); 305 BUG_ON(!dcbent);
301 306
@@ -304,7 +309,7 @@ munge_reg(struct nvbios *bios, uint32_t reg)
304 reg += 0x00000080; 309 reg += 0x00000080;
305 } 310 }
306 311
307 reg &= ~0x60000000; 312 reg &= ~0xe0000000;
308 return reg; 313 return reg;
309} 314}
310 315
@@ -1174,22 +1179,19 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1174 * 1179 *
1175 */ 1180 */
1176 1181
1177 struct bit_displayport_encoder_table *dpe = NULL;
1178 struct dcb_entry *dcb = bios->display.output; 1182 struct dcb_entry *dcb = bios->display.output;
1179 struct drm_device *dev = bios->dev; 1183 struct drm_device *dev = bios->dev;
1180 uint8_t cond = bios->data[offset + 1]; 1184 uint8_t cond = bios->data[offset + 1];
1181 int dummy; 1185 uint8_t *table, *entry;
1182 1186
1183 BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); 1187 BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
1184 1188
1185 if (!iexec->execute) 1189 if (!iexec->execute)
1186 return 3; 1190 return 3;
1187 1191
1188 dpe = nouveau_bios_dp_table(dev, dcb, &dummy); 1192 table = nouveau_dp_bios_data(dev, dcb, &entry);
1189 if (!dpe) { 1193 if (!table)
1190 NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
1191 return 3; 1194 return 3;
1192 }
1193 1195
1194 switch (cond) { 1196 switch (cond) {
1195 case 0: 1197 case 0:
@@ -1203,7 +1205,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1203 break; 1205 break;
1204 case 1: 1206 case 1:
1205 case 2: 1207 case 2:
1206 if (!(dpe->unknown & cond)) 1208 if (!(entry[5] & cond))
1207 iexec->execute = false; 1209 iexec->execute = false;
1208 break; 1210 break;
1209 case 5: 1211 case 5:
@@ -3221,6 +3223,49 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3221 return 1; 3223 return 1;
3222} 3224}
3223 3225
3226static void
3227init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
3228{
3229 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
3230 u32 r, s, v;
3231
3232 /* Not a clue, needs de-magicing */
3233 r = nv50_gpio_ctl[gpio->line >> 4];
3234 s = (gpio->line & 0x0f);
3235 v = bios_rd32(bios, r) & ~(0x00010001 << s);
3236 switch ((gpio->entry & 0x06000000) >> 25) {
3237 case 1:
3238 v |= (0x00000001 << s);
3239 break;
3240 case 2:
3241 v |= (0x00010000 << s);
3242 break;
3243 default:
3244 break;
3245 }
3246
3247 bios_wr32(bios, r, v);
3248}
3249
3250static void
3251init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
3252{
3253 u32 v, i;
3254
3255 v = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
3256 v &= 0xffffff00;
3257 v |= (gpio->entry & 0x00ff0000) >> 16;
3258 bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
3259
3260 i = (gpio->entry & 0x1f000000) >> 24;
3261 if (i) {
3262 v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
3263 v &= 0xffffff00;
3264 v |= gpio->line;
3265 bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
3266 }
3267}
3268
3224static int 3269static int
3225init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 3270init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3226{ 3271{
@@ -3235,7 +3280,6 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3235 3280
3236 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 3281 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
3237 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 3282 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
3238 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
3239 int i; 3283 int i;
3240 3284
3241 if (dev_priv->card_type < NV_50) { 3285 if (dev_priv->card_type < NV_50) {
@@ -3248,33 +3292,20 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3248 3292
3249 for (i = 0; i < bios->dcb.gpio.entries; i++) { 3293 for (i = 0; i < bios->dcb.gpio.entries; i++) {
3250 struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; 3294 struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
3251 uint32_t r, s, v;
3252 3295
3253 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); 3296 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
3254 3297
3255 BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", 3298 BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
3256 offset, gpio->tag, gpio->state_default); 3299 offset, gpio->tag, gpio->state_default);
3257 if (bios->execute)
3258 pgpio->set(bios->dev, gpio->tag, gpio->state_default);
3259 3300
3260 /* The NVIDIA binary driver doesn't appear to actually do 3301 if (!bios->execute)
3261 * any of this, my VBIOS does however. 3302 continue;
3262 */ 3303
3263 /* Not a clue, needs de-magicing */ 3304 pgpio->set(bios->dev, gpio->tag, gpio->state_default);
3264 r = nv50_gpio_ctl[gpio->line >> 4]; 3305 if (dev_priv->card_type < NV_D0)
3265 s = (gpio->line & 0x0f); 3306 init_gpio_unknv50(bios, gpio);
3266 v = bios_rd32(bios, r) & ~(0x00010001 << s); 3307 else
3267 switch ((gpio->entry & 0x06000000) >> 25) { 3308 init_gpio_unknvd0(bios, gpio);
3268 case 1:
3269 v |= (0x00000001 << s);
3270 break;
3271 case 2:
3272 v |= (0x00010000 << s);
3273 break;
3274 default:
3275 break;
3276 }
3277 bios_wr32(bios, r, v);
3278 } 3309 }
3279 3310
3280 return 1; 3311 return 1;
@@ -3737,6 +3768,10 @@ parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3737 int count = 0, i, ret; 3768 int count = 0, i, ret;
3738 uint8_t id; 3769 uint8_t id;
3739 3770
3771 /* catch NULL script pointers */
3772 if (offset == 0)
3773 return 0;
3774
3740 /* 3775 /*
3741 * Loop until INIT_DONE causes us to break out of the loop 3776 * Loop until INIT_DONE causes us to break out of the loop
3742 * (or until offset > bios length just in case... ) 3777 * (or until offset > bios length just in case... )
@@ -4389,86 +4424,37 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4389 return 0; 4424 return 0;
4390} 4425}
4391 4426
4392static uint8_t * 4427/* BIT 'U'/'d' table encoder subtables have hashes matching them to
4393bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, 4428 * a particular set of encoders.
4394 uint16_t record, int record_len, int record_nr, 4429 *
4395 bool match_link) 4430 * This function returns true if a particular DCB entry matches.
4431 */
4432bool
4433bios_encoder_match(struct dcb_entry *dcb, u32 hash)
4396{ 4434{
4397 struct drm_nouveau_private *dev_priv = dev->dev_private; 4435 if ((hash & 0x000000f0) != (dcb->location << 4))
4398 struct nvbios *bios = &dev_priv->vbios; 4436 return false;
4399 uint32_t entry; 4437 if ((hash & 0x0000000f) != dcb->type)
4400 uint16_t table; 4438 return false;
4401 int i, v; 4439 if (!(hash & (dcb->or << 16)))
4440 return false;
4402 4441
4403 switch (dcbent->type) { 4442 switch (dcb->type) {
4404 case OUTPUT_TMDS: 4443 case OUTPUT_TMDS:
4405 case OUTPUT_LVDS: 4444 case OUTPUT_LVDS:
4406 case OUTPUT_DP: 4445 case OUTPUT_DP:
4407 break; 4446 if (hash & 0x00c00000) {
4408 default: 4447 if (!(hash & (dcb->sorconf.link << 22)))
4409 match_link = false; 4448 return false;
4410 break;
4411 }
4412
4413 for (i = 0; i < record_nr; i++, record += record_len) {
4414 table = ROM16(bios->data[record]);
4415 if (!table)
4416 continue;
4417 entry = ROM32(bios->data[table]);
4418
4419 if (match_link) {
4420 v = (entry & 0x00c00000) >> 22;
4421 if (!(v & dcbent->sorconf.link))
4422 continue;
4423 } 4449 }
4424 4450 default:
4425 v = (entry & 0x000f0000) >> 16; 4451 return true;
4426 if (!(v & dcbent->or))
4427 continue;
4428
4429 v = (entry & 0x000000f0) >> 4;
4430 if (v != dcbent->location)
4431 continue;
4432
4433 v = (entry & 0x0000000f);
4434 if (v != dcbent->type)
4435 continue;
4436
4437 return &bios->data[table];
4438 }
4439
4440 return NULL;
4441}
4442
4443void *
4444nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
4445 int *length)
4446{
4447 struct drm_nouveau_private *dev_priv = dev->dev_private;
4448 struct nvbios *bios = &dev_priv->vbios;
4449 uint8_t *table;
4450
4451 if (!bios->display.dp_table_ptr) {
4452 NV_ERROR(dev, "No pointer to DisplayPort table\n");
4453 return NULL;
4454 }
4455 table = &bios->data[bios->display.dp_table_ptr];
4456
4457 if (table[0] != 0x20 && table[0] != 0x21) {
4458 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
4459 table[0]);
4460 return NULL;
4461 } 4452 }
4462
4463 *length = table[4];
4464 return bios_output_config_match(dev, dcbent,
4465 bios->display.dp_table_ptr + table[1],
4466 table[2], table[3], table[0] >= 0x21);
4467} 4453}
4468 4454
4469int 4455int
4470nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, 4456nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4471 uint32_t sub, int pxclk) 4457 struct dcb_entry *dcbent, int crtc)
4472{ 4458{
4473 /* 4459 /*
4474 * The display script table is located by the BIT 'U' table. 4460 * The display script table is located by the BIT 'U' table.
@@ -4498,7 +4484,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4498 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 4484 uint8_t *table = &bios->data[bios->display.script_table_ptr];
4499 uint8_t *otable = NULL; 4485 uint8_t *otable = NULL;
4500 uint16_t script; 4486 uint16_t script;
4501 int i = 0; 4487 int i;
4502 4488
4503 if (!bios->display.script_table_ptr) { 4489 if (!bios->display.script_table_ptr) {
4504 NV_ERROR(dev, "No pointer to output script table\n"); 4490 NV_ERROR(dev, "No pointer to output script table\n");
@@ -4550,30 +4536,33 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4550 4536
4551 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", 4537 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
4552 dcbent->type, dcbent->location, dcbent->or); 4538 dcbent->type, dcbent->location, dcbent->or);
4553 otable = bios_output_config_match(dev, dcbent, table[1] + 4539 for (i = 0; i < table[3]; i++) {
4554 bios->display.script_table_ptr, 4540 otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
4555 table[2], table[3], table[0] >= 0x21); 4541 if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
4542 break;
4543 }
4544
4556 if (!otable) { 4545 if (!otable) {
4557 NV_DEBUG_KMS(dev, "failed to match any output table\n"); 4546 NV_DEBUG_KMS(dev, "failed to match any output table\n");
4558 return 1; 4547 return 1;
4559 } 4548 }
4560 4549
4561 if (pxclk < -2 || pxclk > 0) { 4550 if (pclk < -2 || pclk > 0) {
4562 /* Try to find matching script table entry */ 4551 /* Try to find matching script table entry */
4563 for (i = 0; i < otable[5]; i++) { 4552 for (i = 0; i < otable[5]; i++) {
4564 if (ROM16(otable[table[4] + i*6]) == sub) 4553 if (ROM16(otable[table[4] + i*6]) == type)
4565 break; 4554 break;
4566 } 4555 }
4567 4556
4568 if (i == otable[5]) { 4557 if (i == otable[5]) {
4569 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, " 4558 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
4570 "using first\n", 4559 "using first\n",
4571 sub, dcbent->type, dcbent->or); 4560 type, dcbent->type, dcbent->or);
4572 i = 0; 4561 i = 0;
4573 } 4562 }
4574 } 4563 }
4575 4564
4576 if (pxclk == 0) { 4565 if (pclk == 0) {
4577 script = ROM16(otable[6]); 4566 script = ROM16(otable[6]);
4578 if (!script) { 4567 if (!script) {
4579 NV_DEBUG_KMS(dev, "output script 0 not found\n"); 4568 NV_DEBUG_KMS(dev, "output script 0 not found\n");
@@ -4581,9 +4570,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4581 } 4570 }
4582 4571
4583 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); 4572 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
4584 nouveau_bios_run_init_table(dev, script, dcbent); 4573 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4585 } else 4574 } else
4586 if (pxclk == -1) { 4575 if (pclk == -1) {
4587 script = ROM16(otable[8]); 4576 script = ROM16(otable[8]);
4588 if (!script) { 4577 if (!script) {
4589 NV_DEBUG_KMS(dev, "output script 1 not found\n"); 4578 NV_DEBUG_KMS(dev, "output script 1 not found\n");
@@ -4591,9 +4580,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4591 } 4580 }
4592 4581
4593 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); 4582 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
4594 nouveau_bios_run_init_table(dev, script, dcbent); 4583 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4595 } else 4584 } else
4596 if (pxclk == -2) { 4585 if (pclk == -2) {
4597 if (table[4] >= 12) 4586 if (table[4] >= 12)
4598 script = ROM16(otable[10]); 4587 script = ROM16(otable[10]);
4599 else 4588 else
@@ -4604,31 +4593,31 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4604 } 4593 }
4605 4594
4606 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); 4595 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
4607 nouveau_bios_run_init_table(dev, script, dcbent); 4596 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4608 } else 4597 } else
4609 if (pxclk > 0) { 4598 if (pclk > 0) {
4610 script = ROM16(otable[table[4] + i*6 + 2]); 4599 script = ROM16(otable[table[4] + i*6 + 2]);
4611 if (script) 4600 if (script)
4612 script = clkcmptable(bios, script, pxclk); 4601 script = clkcmptable(bios, script, pclk);
4613 if (!script) { 4602 if (!script) {
4614 NV_DEBUG_KMS(dev, "clock script 0 not found\n"); 4603 NV_DEBUG_KMS(dev, "clock script 0 not found\n");
4615 return 1; 4604 return 1;
4616 } 4605 }
4617 4606
4618 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); 4607 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
4619 nouveau_bios_run_init_table(dev, script, dcbent); 4608 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4620 } else 4609 } else
4621 if (pxclk < 0) { 4610 if (pclk < 0) {
4622 script = ROM16(otable[table[4] + i*6 + 4]); 4611 script = ROM16(otable[table[4] + i*6 + 4]);
4623 if (script) 4612 if (script)
4624 script = clkcmptable(bios, script, -pxclk); 4613 script = clkcmptable(bios, script, -pclk);
4625 if (!script) { 4614 if (!script) {
4626 NV_DEBUG_KMS(dev, "clock script 1 not found\n"); 4615 NV_DEBUG_KMS(dev, "clock script 1 not found\n");
4627 return 1; 4616 return 1;
4628 } 4617 }
4629 4618
4630 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); 4619 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
4631 nouveau_bios_run_init_table(dev, script, dcbent); 4620 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4632 } 4621 }
4633 4622
4634 return 0; 4623 return 0;
@@ -5478,14 +5467,6 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5478 return 0; 5467 return 0;
5479} 5468}
5480 5469
5481static int
5482parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5483 struct bit_entry *bitentry)
5484{
5485 bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
5486 return 0;
5487}
5488
5489struct bit_table { 5470struct bit_table {
5490 const char id; 5471 const char id;
5491 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); 5472 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -5559,7 +5540,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
5559 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); 5540 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
5560 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); 5541 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
5561 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U)); 5542 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
5562 parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
5563 5543
5564 return 0; 5544 return 0;
5565} 5545}
@@ -5884,9 +5864,15 @@ parse_dcb_gpio_table(struct nvbios *bios)
5884 } 5864 }
5885 5865
5886 e->line = (e->entry & 0x0000001f) >> 0; 5866 e->line = (e->entry & 0x0000001f) >> 0;
5887 e->state_default = (e->entry & 0x01000000) >> 24; 5867 if (gpio[0] == 0x40) {
5888 e->state[0] = (e->entry & 0x18000000) >> 27; 5868 e->state_default = (e->entry & 0x01000000) >> 24;
5889 e->state[1] = (e->entry & 0x60000000) >> 29; 5869 e->state[0] = (e->entry & 0x18000000) >> 27;
5870 e->state[1] = (e->entry & 0x60000000) >> 29;
5871 } else {
5872 e->state_default = (e->entry & 0x00000080) >> 7;
5873 e->state[0] = (entry[4] >> 4) & 3;
5874 e->state[1] = (entry[4] >> 6) & 3;
5875 }
5890 } 5876 }
5891 } 5877 }
5892 5878
@@ -6156,7 +6142,14 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
6156 } 6142 }
6157 case OUTPUT_DP: 6143 case OUTPUT_DP:
6158 entry->dpconf.sor.link = (conf & 0x00000030) >> 4; 6144 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
6159 entry->dpconf.link_bw = (conf & 0x00e00000) >> 21; 6145 switch ((conf & 0x00e00000) >> 21) {
6146 case 0:
6147 entry->dpconf.link_bw = 162000;
6148 break;
6149 default:
6150 entry->dpconf.link_bw = 270000;
6151 break;
6152 }
6160 switch ((conf & 0x0f000000) >> 24) { 6153 switch ((conf & 0x0f000000) >> 24) {
6161 case 0xf: 6154 case 0xf:
6162 entry->dpconf.link_nr = 4; 6155 entry->dpconf.link_nr = 4;
@@ -6769,7 +6762,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
6769 6762
6770void 6763void
6771nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, 6764nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
6772 struct dcb_entry *dcbent) 6765 struct dcb_entry *dcbent, int crtc)
6773{ 6766{
6774 struct drm_nouveau_private *dev_priv = dev->dev_private; 6767 struct drm_nouveau_private *dev_priv = dev->dev_private;
6775 struct nvbios *bios = &dev_priv->vbios; 6768 struct nvbios *bios = &dev_priv->vbios;
@@ -6777,11 +6770,22 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
6777 6770
6778 spin_lock_bh(&bios->lock); 6771 spin_lock_bh(&bios->lock);
6779 bios->display.output = dcbent; 6772 bios->display.output = dcbent;
6773 bios->display.crtc = crtc;
6780 parse_init_table(bios, table, &iexec); 6774 parse_init_table(bios, table, &iexec);
6781 bios->display.output = NULL; 6775 bios->display.output = NULL;
6782 spin_unlock_bh(&bios->lock); 6776 spin_unlock_bh(&bios->lock);
6783} 6777}
6784 6778
6779void
6780nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
6781{
6782 struct drm_nouveau_private *dev_priv = dev->dev_private;
6783 struct nvbios *bios = &dev_priv->vbios;
6784 struct init_exec iexec = { true, false };
6785
6786 parse_init_table(bios, table, &iexec);
6787}
6788
6785static bool NVInitVBIOS(struct drm_device *dev) 6789static bool NVInitVBIOS(struct drm_device *dev)
6786{ 6790{
6787 struct drm_nouveau_private *dev_priv = dev->dev_private; 6791 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -6863,9 +6867,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
6863 6867
6864 if (dev_priv->card_type >= NV_50) { 6868 if (dev_priv->card_type >= NV_50) {
6865 for (i = 0; i < bios->dcb.entries; i++) { 6869 for (i = 0; i < bios->dcb.entries; i++) {
6866 nouveau_bios_run_display_table(dev, 6870 nouveau_bios_run_display_table(dev, 0, 0,
6867 &bios->dcb.entry[i], 6871 &bios->dcb.entry[i], -1);
6868 0, 0);
6869 } 6872 }
6870 } 6873 }
6871 6874
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 050c314119df..8adb69e4a6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -289,8 +289,8 @@ struct nvbios {
289 289
290 struct { 290 struct {
291 struct dcb_entry *output; 291 struct dcb_entry *output;
292 int crtc;
292 uint16_t script_table_ptr; 293 uint16_t script_table_ptr;
293 uint16_t dp_table_ptr;
294 } display; 294 } display;
295 295
296 struct { 296 struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 890d50e4d682..7226f419e178 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -956,7 +956,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
956 break; 956 break;
957 } 957 }
958 958
959 if (dev_priv->card_type == NV_C0) 959 if (dev_priv->card_type >= NV_C0)
960 page_shift = node->page_shift; 960 page_shift = node->page_shift;
961 else 961 else
962 page_shift = 12; 962 page_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index b0d753f45bbd..a319d5646ea9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -411,13 +411,17 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
411 return ret; 411 return ret;
412 init->channel = chan->id; 412 init->channel = chan->id;
413 413
414 if (chan->dma.ib_max) 414 if (nouveau_vram_pushbuf == 0) {
415 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 415 if (chan->dma.ib_max)
416 NOUVEAU_GEM_DOMAIN_GART; 416 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
417 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) 417 NOUVEAU_GEM_DOMAIN_GART;
418 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
419 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
420 else
421 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
422 } else {
418 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 423 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
419 else 424 }
420 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
421 425
422 if (dev_priv->card_type < NV_C0) { 426 if (dev_priv->card_type < NV_C0) {
423 init->subchan[0].handle = NvM2MF; 427 init->subchan[0].handle = NvM2MF;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 939d4df07777..e0d275e1c96c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -39,7 +39,7 @@
39 39
40static void nouveau_connector_hotplug(void *, int); 40static void nouveau_connector_hotplug(void *, int);
41 41
42static struct nouveau_encoder * 42struct nouveau_encoder *
43find_encoder(struct drm_connector *connector, int type) 43find_encoder(struct drm_connector *connector, int type)
44{ 44{
45 struct drm_device *dev = connector->dev; 45 struct drm_device *dev = connector->dev;
@@ -116,10 +116,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
116 nouveau_connector_hotplug, connector); 116 nouveau_connector_hotplug, connector);
117 } 117 }
118 118
119 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
120 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
121 nouveau_backlight_exit(connector);
122
123 kfree(nv_connector->edid); 119 kfree(nv_connector->edid);
124 drm_sysfs_connector_remove(connector); 120 drm_sysfs_connector_remove(connector);
125 drm_connector_cleanup(connector); 121 drm_connector_cleanup(connector);
@@ -712,11 +708,8 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
712 case OUTPUT_TV: 708 case OUTPUT_TV:
713 return get_slave_funcs(encoder)->mode_valid(encoder, mode); 709 return get_slave_funcs(encoder)->mode_valid(encoder, mode);
714 case OUTPUT_DP: 710 case OUTPUT_DP:
715 if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7) 711 max_clock = nv_encoder->dp.link_nr;
716 max_clock = nv_encoder->dp.link_nr * 270000; 712 max_clock *= nv_encoder->dp.link_bw;
717 else
718 max_clock = nv_encoder->dp.link_nr * 162000;
719
720 clock = clock * nouveau_connector_bpp(connector) / 8; 713 clock = clock * nouveau_connector_bpp(connector) / 8;
721 break; 714 break;
722 default: 715 default:
@@ -871,7 +864,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
871 dev->mode_config.scaling_mode_property, 864 dev->mode_config.scaling_mode_property,
872 nv_connector->scaling_mode); 865 nv_connector->scaling_mode);
873 } 866 }
874 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
875 /* fall-through */ 867 /* fall-through */
876 case DCB_CONNECTOR_TV_0: 868 case DCB_CONNECTOR_TV_0:
877 case DCB_CONNECTOR_TV_1: 869 case DCB_CONNECTOR_TV_1:
@@ -888,27 +880,20 @@ nouveau_connector_create(struct drm_device *dev, int index)
888 dev->mode_config.dithering_mode_property, 880 dev->mode_config.dithering_mode_property,
889 nv_connector->use_dithering ? 881 nv_connector->use_dithering ?
890 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 882 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
891
892 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
893 if (dev_priv->card_type >= NV_50)
894 connector->polled = DRM_CONNECTOR_POLL_HPD;
895 else
896 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
897 }
898 break; 883 break;
899 } 884 }
900 885
901 if (pgpio->irq_register) { 886 if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
902 pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, 887 pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
903 nouveau_connector_hotplug, connector); 888 nouveau_connector_hotplug, connector);
889
890 connector->polled = DRM_CONNECTOR_POLL_HPD;
891 } else {
892 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
904 } 893 }
905 894
906 drm_sysfs_connector_add(connector); 895 drm_sysfs_connector_add(connector);
907 896
908 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
909 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
910 nouveau_backlight_init(connector);
911
912 dcb->drm = connector; 897 dcb->drm = connector;
913 return dcb->drm; 898 return dcb->drm;
914 899
@@ -925,22 +910,13 @@ nouveau_connector_hotplug(void *data, int plugged)
925 struct drm_connector *connector = data; 910 struct drm_connector *connector = data;
926 struct drm_device *dev = connector->dev; 911 struct drm_device *dev = connector->dev;
927 912
928 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", 913 NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un",
929 drm_get_connector_name(connector)); 914 drm_get_connector_name(connector));
930
931 if (connector->encoder && connector->encoder->crtc &&
932 connector->encoder->crtc->enabled) {
933 struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
934 struct drm_encoder_helper_funcs *helper =
935 connector->encoder->helper_private;
936 915
937 if (nv_encoder->dcb->type == OUTPUT_DP) { 916 if (plugged)
938 if (plugged) 917 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
939 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); 918 else
940 else 919 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
941 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
942 }
943 }
944 920
945 drm_helper_hpd_irq_event(dev); 921 drm_helper_hpd_irq_event(dev);
946} 922}
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index cb1ce2a09162..bf8e1289953d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,14 +82,13 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
82} 82}
83 83
84int nv50_crtc_create(struct drm_device *dev, int index); 84int nv50_crtc_create(struct drm_device *dev, int index);
85int nv50_cursor_init(struct nouveau_crtc *);
86void nv50_cursor_fini(struct nouveau_crtc *);
87int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv, 85int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
88 uint32_t buffer_handle, uint32_t width, 86 uint32_t buffer_handle, uint32_t width,
89 uint32_t height); 87 uint32_t height);
90int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y); 88int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
91 89
92int nv04_cursor_init(struct nouveau_crtc *); 90int nv04_cursor_init(struct nouveau_crtc *);
91int nv50_cursor_init(struct nouveau_crtc *);
93 92
94struct nouveau_connector * 93struct nouveau_connector *
95nouveau_crtc_connector_get(struct nouveau_crtc *crtc); 94nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index eb514ea29377..ddbabefb4273 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -105,9 +105,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
105 if (dev_priv->chipset == 0x50) 105 if (dev_priv->chipset == 0x50)
106 nv_fb->r_format |= (tile_flags << 8); 106 nv_fb->r_format |= (tile_flags << 8);
107 107
108 if (!tile_flags) 108 if (!tile_flags) {
109 nv_fb->r_pitch = 0x00100000 | fb->pitch; 109 if (dev_priv->card_type < NV_D0)
110 else { 110 nv_fb->r_pitch = 0x00100000 | fb->pitch;
111 else
112 nv_fb->r_pitch = 0x01000000 | fb->pitch;
113 } else {
111 u32 mode = nvbo->tile_mode; 114 u32 mode = nvbo->tile_mode;
112 if (dev_priv->card_type >= NV_C0) 115 if (dev_priv->card_type >= NV_C0)
113 mode >>= 4; 116 mode >>= 4;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7beb82a0315d..de5efe71fefd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -28,418 +28,619 @@
28#include "nouveau_i2c.h" 28#include "nouveau_i2c.h"
29#include "nouveau_connector.h" 29#include "nouveau_connector.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h"
32
33/******************************************************************************
34 * aux channel util functions
35 *****************************************************************************/
36#define AUX_DBG(fmt, args...) do { \
37 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \
38 NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \
39 } \
40} while (0)
41#define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args)
42
43static void
44auxch_fini(struct drm_device *dev, int ch)
45{
46 nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
47}
31 48
32static int 49static int
33auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size) 50auxch_init(struct drm_device *dev, int ch)
34{ 51{
35 struct drm_device *dev = encoder->dev; 52 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
36 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 53 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
37 struct nouveau_i2c_chan *auxch; 54 const u32 urep = unksel ? 0x01000000 : 0x02000000;
38 int ret; 55 u32 ctrl, timeout;
39 56
40 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 57 /* wait up to 1ms for any previous transaction to be done... */
41 if (!auxch) 58 timeout = 1000;
42 return -ENODEV; 59 do {
43 60 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
44 ret = nouveau_dp_auxch(auxch, 9, address, buf, size); 61 udelay(1);
45 if (ret) 62 if (!timeout--) {
46 return ret; 63 AUX_ERR("begin idle timeout 0x%08x", ctrl);
64 return -EBUSY;
65 }
66 } while (ctrl & 0x03010000);
67
68 /* set some magic, and wait up to 1ms for it to appear */
69 nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
70 timeout = 1000;
71 do {
72 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
73 udelay(1);
74 if (!timeout--) {
75 AUX_ERR("magic wait 0x%08x\n", ctrl);
76 auxch_fini(dev, ch);
77 return -EBUSY;
78 }
79 } while ((ctrl & 0x03000000) != urep);
47 80
48 return 0; 81 return 0;
49} 82}
50 83
51static int 84static int
52auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size) 85auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size)
53{ 86{
54 struct drm_device *dev = encoder->dev; 87 u32 ctrl, stat, timeout, retries;
55 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 88 u32 xbuf[4] = {};
56 struct nouveau_i2c_chan *auxch; 89 int ret, i;
57 int ret;
58 90
59 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 91 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
60 if (!auxch)
61 return -ENODEV;
62 92
63 ret = nouveau_dp_auxch(auxch, 8, address, buf, size); 93 ret = auxch_init(dev, ch);
64 return ret; 94 if (ret)
65} 95 goto out;
66 96
67static int 97 stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50));
68nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd) 98 if (!(stat & 0x10000000)) {
69{ 99 AUX_DBG("sink not detected\n");
70 struct drm_device *dev = encoder->dev; 100 ret = -ENXIO;
71 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 101 goto out;
72 uint32_t tmp; 102 }
73 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
74
75 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
76 tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
77 NV50_SOR_DP_CTRL_LANE_MASK);
78 tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
79 if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
80 tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
81 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
82
83 return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
84}
85 103
86static int 104 if (!(type & 1)) {
87nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd) 105 memcpy(xbuf, data, size);
88{ 106 for (i = 0; i < 16; i += 4) {
89 struct drm_device *dev = encoder->dev; 107 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
90 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 108 nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
91 uint32_t tmp; 109 }
92 int reg = 0x614300 + (nv_encoder->or * 0x800); 110 }
93 111
94 tmp = nv_rd32(dev, reg); 112 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
95 tmp &= 0xfff3ffff; 113 ctrl &= ~0x0001f0ff;
96 if (cmd == DP_LINK_BW_2_7) 114 ctrl |= type << 12;
97 tmp |= 0x00040000; 115 ctrl |= size - 1;
98 nv_wr32(dev, reg, tmp); 116 nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr);
117
118 /* retry transaction a number of times on failure... */
119 ret = -EREMOTEIO;
120 for (retries = 0; retries < 32; retries++) {
121 /* reset, and delay a while if this is a retry */
122 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
123 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
124 if (retries)
125 udelay(400);
126
127 /* transaction request, wait up to 1ms for it to complete */
128 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
129
130 timeout = 1000;
131 do {
132 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
133 udelay(1);
134 if (!timeout--) {
135 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
136 goto out;
137 }
138 } while (ctrl & 0x00010000);
99 139
100 return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1); 140 /* read status, and check if transaction completed ok */
101} 141 stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0);
142 if (!(stat & 0x000f0f00)) {
143 ret = 0;
144 break;
145 }
102 146
103static int 147 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
104nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern) 148 }
105{
106 struct drm_device *dev = encoder->dev;
107 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
108 uint32_t tmp;
109 uint8_t cmd;
110 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
111 int ret;
112 149
113 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); 150 if (type & 1) {
114 tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN; 151 for (i = 0; i < 16; i += 4) {
115 tmp |= (pattern << 24); 152 xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i);
116 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); 153 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
154 }
155 memcpy(data, xbuf, size);
156 }
117 157
118 ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1); 158out:
119 if (ret) 159 auxch_fini(dev, ch);
120 return ret; 160 return ret;
121 cmd &= ~DP_TRAINING_PATTERN_MASK;
122 cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
123 return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
124} 161}
125 162
126static int 163static u32
127nouveau_dp_max_voltage_swing(struct drm_encoder *encoder) 164dp_link_bw_get(struct drm_device *dev, int or, int link)
128{ 165{
129 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 166 u32 ctrl = nv_rd32(dev, 0x614300 + (or * 0x800));
130 struct drm_device *dev = encoder->dev; 167 if (!(ctrl & 0x000c0000))
131 struct bit_displayport_encoder_table_entry *dpse; 168 return 162000;
132 struct bit_displayport_encoder_table *dpe; 169 return 270000;
133 int i, dpe_headerlen, max_vs = 0; 170}
134
135 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
136 if (!dpe)
137 return false;
138 dpse = (void *)((char *)dpe + dpe_headerlen);
139 171
140 for (i = 0; i < dpe_headerlen; i++, dpse++) { 172static int
141 if (dpse->vs_level > max_vs) 173dp_lane_count_get(struct drm_device *dev, int or, int link)
142 max_vs = dpse->vs_level; 174{
175 u32 ctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
176 switch (ctrl & 0x000f0000) {
177 case 0x00010000: return 1;
178 case 0x00030000: return 2;
179 default:
180 return 4;
143 } 181 }
144
145 return max_vs;
146} 182}
147 183
148static int 184void
149nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs) 185nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
150{ 186{
151 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 187 const u32 symbol = 100000;
152 struct drm_device *dev = encoder->dev; 188 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
153 struct bit_displayport_encoder_table_entry *dpse; 189 int TU, VTUi, VTUf, VTUa;
154 struct bit_displayport_encoder_table *dpe; 190 u64 link_data_rate, link_ratio, unk;
155 int i, dpe_headerlen, max_pre = 0; 191 u32 best_diff = 64 * symbol;
192 u32 link_nr, link_bw, r;
193
194 /* calculate packed data rate for each lane */
195 link_nr = dp_lane_count_get(dev, or, link);
196 link_data_rate = (clk * bpp / 8) / link_nr;
197
198 /* calculate ratio of packed data rate to link symbol rate */
199 link_bw = dp_link_bw_get(dev, or, link);
200 link_ratio = link_data_rate * symbol;
201 r = do_div(link_ratio, link_bw);
202
203 for (TU = 64; TU >= 32; TU--) {
204 /* calculate average number of valid symbols in each TU */
205 u32 tu_valid = link_ratio * TU;
206 u32 calc, diff;
207
208 /* find a hw representation for the fraction.. */
209 VTUi = tu_valid / symbol;
210 calc = VTUi * symbol;
211 diff = tu_valid - calc;
212 if (diff) {
213 if (diff >= (symbol / 2)) {
214 VTUf = symbol / (symbol - diff);
215 if (symbol - (VTUf * diff))
216 VTUf++;
217
218 if (VTUf <= 15) {
219 VTUa = 1;
220 calc += symbol - (symbol / VTUf);
221 } else {
222 VTUa = 0;
223 VTUf = 1;
224 calc += symbol;
225 }
226 } else {
227 VTUa = 0;
228 VTUf = min((int)(symbol / diff), 15);
229 calc += symbol / VTUf;
230 }
156 231
157 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); 232 diff = calc - tu_valid;
158 if (!dpe) 233 } else {
159 return false; 234 /* no remainder, but the hw doesn't like the fractional
160 dpse = (void *)((char *)dpe + dpe_headerlen); 235 * part to be zero. decrement the integer part and
236 * have the fraction add a whole symbol back
237 */
238 VTUa = 0;
239 VTUf = 1;
240 VTUi--;
241 }
161 242
162 for (i = 0; i < dpe_headerlen; i++, dpse++) { 243 if (diff < best_diff) {
163 if (dpse->vs_level != vs) 244 best_diff = diff;
164 continue; 245 bestTU = TU;
246 bestVTUa = VTUa;
247 bestVTUf = VTUf;
248 bestVTUi = VTUi;
249 if (diff == 0)
250 break;
251 }
252 }
165 253
166 if (dpse->pre_level > max_pre) 254 if (!bestTU) {
167 max_pre = dpse->pre_level; 255 NV_ERROR(dev, "DP: unable to find suitable config\n");
256 return;
168 } 257 }
169 258
170 return max_pre; 259 /* XXX close to vbios numbers, but not right */
260 unk = (symbol - link_ratio) * bestTU;
261 unk *= link_ratio;
262 r = do_div(unk, symbol);
263 r = do_div(unk, symbol);
264 unk += 6;
265
266 nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
267 nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
268 bestVTUf << 16 |
269 bestVTUi << 8 |
270 unk);
171} 271}
172 272
173static bool 273u8 *
174nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config) 274nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
175{ 275{
176 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 276 struct drm_nouveau_private *dev_priv = dev->dev_private;
177 struct drm_device *dev = encoder->dev; 277 struct nvbios *bios = &dev_priv->vbios;
178 struct bit_displayport_encoder_table *dpe; 278 struct bit_entry d;
179 int ret, i, dpe_headerlen, vs = 0, pre = 0; 279 u8 *table;
180 uint8_t request[2]; 280 int i;
181 281
182 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); 282 if (bit_table(dev, 'd', &d)) {
183 if (!dpe) 283 NV_ERROR(dev, "BIT 'd' table not found\n");
184 return false; 284 return NULL;
185 285 }
186 ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
187 if (ret)
188 return false;
189
190 NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
191
192 /* Keep all lanes at the same level.. */
193 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
194 int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
195 int lane_vs = lane_req & 3;
196 int lane_pre = (lane_req >> 2) & 3;
197 286
198 if (lane_vs > vs) 287 if (d.version != 1) {
199 vs = lane_vs; 288 NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version);
200 if (lane_pre > pre) 289 return NULL;
201 pre = lane_pre;
202 } 290 }
203 291
204 if (vs >= nouveau_dp_max_voltage_swing(encoder)) { 292 table = ROMPTR(bios, d.data[0]);
205 vs = nouveau_dp_max_voltage_swing(encoder); 293 if (!table) {
206 vs |= 4; 294 NV_ERROR(dev, "displayport table pointer invalid\n");
295 return NULL;
207 } 296 }
208 297
209 if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) { 298 switch (table[0]) {
210 pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3); 299 case 0x20:
211 pre |= 4; 300 case 0x21:
301 case 0x30:
302 break;
303 default:
304 NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]);
305 return NULL;
212 } 306 }
213 307
214 /* Update the configuration for all lanes.. */ 308 for (i = 0; i < table[3]; i++) {
215 for (i = 0; i < nv_encoder->dp.link_nr; i++) 309 *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
216 config[i] = (pre << 3) | vs; 310 if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
311 return table;
312 }
217 313
218 return true; 314 NV_ERROR(dev, "displayport encoder table not found\n");
315 return NULL;
219} 316}
220 317
221static bool 318/******************************************************************************
222nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config) 319 * link training
223{ 320 *****************************************************************************/
224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 321struct dp_state {
225 struct drm_device *dev = encoder->dev; 322 struct dcb_entry *dcb;
226 struct bit_displayport_encoder_table_entry *dpse; 323 u8 *table;
227 struct bit_displayport_encoder_table *dpe; 324 u8 *entry;
228 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); 325 int auxch;
229 int dpe_headerlen, ret, i; 326 int crtc;
327 int or;
328 int link;
329 u8 *dpcd;
330 int link_nr;
331 u32 link_bw;
332 u8 stat[6];
333 u8 conf[4];
334};
230 335
231 NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n", 336static void
232 config[0], config[1], config[2], config[3]); 337dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 int or = dp->or, link = dp->link;
341 u8 *entry, sink[2];
342 u32 dp_ctrl;
343 u16 script;
344
345 NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
346
347 /* set selected link rate on source */
348 switch (dp->link_bw) {
349 case 270000:
350 nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00040000);
351 sink[0] = DP_LINK_BW_2_7;
352 break;
353 default:
354 nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00000000);
355 sink[0] = DP_LINK_BW_1_62;
356 break;
357 }
233 358
234 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); 359 /* offset +0x0a of each dp encoder table entry is a pointer to another
235 if (!dpe) 360 * table, that has (among other things) pointers to more scripts that
236 return false; 361 * need to be executed, this time depending on link speed.
237 dpse = (void *)((char *)dpe + dpe_headerlen); 362 */
363 entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
364 if (entry) {
365 if (dp->table[0] < 0x30) {
366 while (dp->link_bw < (ROM16(entry[0]) * 10))
367 entry += 4;
368 script = ROM16(entry[2]);
369 } else {
370 while (dp->link_bw < (entry[0] * 27000))
371 entry += 3;
372 script = ROM16(entry[1]);
373 }
238 374
239 for (i = 0; i < dpe->record_nr; i++, dpse++) { 375 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
240 if (dpse->vs_level == (config[0] & 3) &&
241 dpse->pre_level == ((config[0] >> 3) & 3))
242 break;
243 } 376 }
244 BUG_ON(i == dpe->record_nr); 377
245 378 /* configure lane count on the source */
246 for (i = 0; i < nv_encoder->dp.link_nr; i++) { 379 dp_ctrl = ((1 << dp->link_nr) - 1) << 16;
247 const int shift[4] = { 16, 8, 0, 24 }; 380 sink[1] = dp->link_nr;
248 uint32_t mask = 0xff << shift[i]; 381 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) {
249 uint32_t reg0, reg1, reg2; 382 dp_ctrl |= 0x00004000;
250 383 sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
251 reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
252 reg0 |= (dpse->reg0 << shift[i]);
253 reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
254 reg1 |= (dpse->reg1 << shift[i]);
255 reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
256 reg2 |= (dpse->reg2 << 8);
257 nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
258 nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
259 nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
260 } 384 }
261 385
262 ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4); 386 nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x001f4000, dp_ctrl);
263 if (ret)
264 return false;
265 387
266 return true; 388 /* inform the sink of the new configuration */
389 auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2);
267} 390}
268 391
269bool 392static void
270nouveau_dp_link_train(struct drm_encoder *encoder) 393dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 tp)
271{ 394{
272 struct drm_device *dev = encoder->dev; 395 u8 sink_tp;
273 struct drm_nouveau_private *dev_priv = dev->dev_private;
274 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
275 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
276 struct nouveau_connector *nv_connector;
277 struct bit_displayport_encoder_table *dpe;
278 int dpe_headerlen;
279 uint8_t config[4], status[3];
280 bool cr_done, cr_max_vs, eq_done, hpd_state;
281 int ret = 0, i, tries, voltage;
282 396
283 NV_DEBUG_KMS(dev, "link training!!\n"); 397 NV_DEBUG_KMS(dev, "training pattern %d\n", tp);
284 398
285 nv_connector = nouveau_encoder_connector_get(nv_encoder); 399 nv_mask(dev, NV50_SOR_DP_CTRL(dp->or, dp->link), 0x0f000000, tp << 24);
286 if (!nv_connector)
287 return false;
288 400
289 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); 401 auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
290 if (!dpe) { 402 sink_tp &= ~DP_TRAINING_PATTERN_MASK;
291 NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); 403 sink_tp |= tp;
292 return false; 404 auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
293 } 405}
294 406
295 /* disable hotplug detect, this flips around on some panels during 407static const u8 nv50_lane_map[] = { 16, 8, 0, 24 };
296 * link training. 408static const u8 nvaf_lane_map[] = { 24, 16, 8, 0 };
297 */ 409
298 hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); 410static int
411dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
412{
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 u32 mask = 0, drv = 0, pre = 0, unk = 0;
415 const u8 *shifts;
416 int link = dp->link;
417 int or = dp->or;
418 int i;
419
420 if (dev_priv->chipset != 0xaf)
421 shifts = nv50_lane_map;
422 else
423 shifts = nvaf_lane_map;
424
425 for (i = 0; i < dp->link_nr; i++) {
426 u8 *conf = dp->entry + dp->table[4];
427 u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
428 u8 lpre = (lane & 0x0c) >> 2;
429 u8 lvsw = (lane & 0x03) >> 0;
430
431 mask |= 0xff << shifts[i];
432 unk |= 1 << (shifts[i] >> 3);
433
434 dp->conf[i] = (lpre << 3) | lvsw;
435 if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
436 dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
437 if (lpre == DP_TRAIN_PRE_EMPHASIS_9_5)
438 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
439
440 NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]);
441
442 if (dp->table[0] < 0x30) {
443 u8 *last = conf + (dp->entry[4] * dp->table[5]);
444 while (lvsw != conf[0] || lpre != conf[1]) {
445 conf += dp->table[5];
446 if (conf >= last)
447 return -EINVAL;
448 }
449
450 conf += 2;
451 } else {
452 /* no lookup table anymore, set entries for each
453 * combination of voltage swing and pre-emphasis
454 * level allowed by the DP spec.
455 */
456 switch (lvsw) {
457 case 0: lpre += 0; break;
458 case 1: lpre += 4; break;
459 case 2: lpre += 7; break;
460 case 3: lpre += 9; break;
461 }
462
463 conf = conf + (lpre * dp->table[5]);
464 conf++;
465 }
299 466
300 if (dpe->script0) { 467 drv |= conf[0] << shifts[i];
301 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); 468 pre |= conf[1] << shifts[i];
302 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), 469 unk = (unk & ~0x0000ff00) | (conf[2] << 8);
303 nv_encoder->dcb);
304 } 470 }
305 471
306train: 472 nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, drv);
307 cr_done = eq_done = false; 473 nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, pre);
474 nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff0f, unk);
308 475
309 /* set link configuration */ 476 return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4);
310 NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n", 477}
311 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
312 478
313 ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw); 479static int
314 if (ret) 480dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
315 return false; 481{
482 int ret;
316 483
317 config[0] = nv_encoder->dp.link_nr; 484 udelay(delay);
318 if (nv_encoder->dp.dpcd_version >= 0x11 &&
319 nv_encoder->dp.enhanced_frame)
320 config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
321 485
322 ret = nouveau_dp_lane_count_set(encoder, config[0]); 486 ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6);
323 if (ret) 487 if (ret)
324 return false; 488 return ret;
325 489
326 /* clock recovery */ 490 NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n",
327 NV_DEBUG_KMS(dev, "\tbegin cr\n"); 491 dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
328 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1); 492 dp->stat[4], dp->stat[5]);
329 if (ret) 493 return 0;
330 goto stop; 494}
331 495
332 tries = 0; 496static int
333 voltage = -1; 497dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
334 memset(config, 0x00, sizeof(config)); 498{
335 for (;;) { 499 bool cr_done = false, abort = false;
336 if (!nouveau_dp_link_train_commit(encoder, config)) 500 int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
337 break; 501 int tries = 0, i;
338 502
339 udelay(100); 503 dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
340 504
341 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2); 505 do {
342 if (ret) 506 if (dp_link_train_commit(dev, dp) ||
507 dp_link_train_update(dev, dp, 100))
343 break; 508 break;
344 NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
345 status[0], status[1]);
346 509
347 cr_done = true; 510 cr_done = true;
348 cr_max_vs = false; 511 for (i = 0; i < dp->link_nr; i++) {
349 for (i = 0; i < nv_encoder->dp.link_nr; i++) { 512 u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
350 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
351
352 if (!(lane & DP_LANE_CR_DONE)) { 513 if (!(lane & DP_LANE_CR_DONE)) {
353 cr_done = false; 514 cr_done = false;
354 if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED) 515 if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
355 cr_max_vs = true; 516 abort = true;
356 break; 517 break;
357 } 518 }
358 } 519 }
359 520
360 if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 521 if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
361 voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 522 voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
362 tries = 0; 523 tries = 0;
363 } 524 }
525 } while (!cr_done && !abort && ++tries < 5);
364 526
365 if (cr_done || cr_max_vs || (++tries == 5)) 527 return cr_done ? 0 : -1;
366 break; 528}
367
368 if (!nouveau_dp_link_train_adjust(encoder, config))
369 break;
370 }
371
372 if (!cr_done)
373 goto stop;
374 529
375 /* channel equalisation */ 530static int
376 NV_DEBUG_KMS(dev, "\tbegin eq\n"); 531dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
377 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2); 532{
378 if (ret) 533 bool eq_done, cr_done = true;
379 goto stop; 534 int tries = 0, i;
380 535
381 for (tries = 0; tries <= 5; tries++) { 536 dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
382 udelay(400);
383 537
384 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3); 538 do {
385 if (ret) 539 if (dp_link_train_update(dev, dp, 400))
386 break; 540 break;
387 NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
388 status[0], status[1]);
389 541
390 eq_done = true; 542 eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
391 if (!(status[2] & DP_INTERLANE_ALIGN_DONE)) 543 for (i = 0; i < dp->link_nr && eq_done; i++) {
392 eq_done = false; 544 u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
393 545 if (!(lane & DP_LANE_CR_DONE))
394 for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
395 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
396
397 if (!(lane & DP_LANE_CR_DONE)) {
398 cr_done = false; 546 cr_done = false;
399 break;
400 }
401
402 if (!(lane & DP_LANE_CHANNEL_EQ_DONE) || 547 if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
403 !(lane & DP_LANE_SYMBOL_LOCKED)) { 548 !(lane & DP_LANE_SYMBOL_LOCKED))
404 eq_done = false; 549 eq_done = false;
405 break;
406 }
407 } 550 }
408 551
409 if (eq_done || !cr_done) 552 if (dp_link_train_commit(dev, dp))
410 break; 553 break;
554 } while (!eq_done && cr_done && ++tries <= 5);
411 555
412 if (!nouveau_dp_link_train_adjust(encoder, config) || 556 return eq_done ? 0 : -1;
413 !nouveau_dp_link_train_commit(encoder, config)) 557}
414 break;
415 }
416 558
417stop: 559bool
418 /* end link training */ 560nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
419 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE); 561{
420 if (ret) 562 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
563 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
564 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
565 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
566 struct nouveau_connector *nv_connector =
567 nouveau_encoder_connector_get(nv_encoder);
568 struct drm_device *dev = encoder->dev;
569 struct nouveau_i2c_chan *auxch;
570 const u32 bw_list[] = { 270000, 162000, 0 };
571 const u32 *link_bw = bw_list;
572 struct dp_state dp;
573
574 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
575 if (!auxch)
421 return false; 576 return false;
422 577
423 /* retry at a lower setting, if possible */ 578 dp.table = nouveau_dp_bios_data(dev, nv_encoder->dcb, &dp.entry);
424 if (!ret && !(eq_done && cr_done)) { 579 if (!dp.table)
425 NV_DEBUG_KMS(dev, "\twe failed\n"); 580 return -EINVAL;
426 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) { 581
427 NV_DEBUG_KMS(dev, "retry link training at low rate\n"); 582 dp.dcb = nv_encoder->dcb;
428 nv_encoder->dp.link_bw = DP_LINK_BW_1_62; 583 dp.crtc = nv_crtc->index;
429 goto train; 584 dp.auxch = auxch->rd;
430 } 585 dp.or = nv_encoder->or;
586 dp.link = !(nv_encoder->dcb->sorconf.link & 1);
587 dp.dpcd = nv_encoder->dp.dpcd;
588
589 /* some sinks toggle hotplug in response to some of the actions
590 * we take during link training (DP_SET_POWER is one), we need
591 * to ignore them for the moment to avoid races.
592 */
593 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
594
595 /* enable down-spreading, if possible */
596 if (dp.table[1] >= 16) {
597 u16 script = ROM16(dp.entry[14]);
598 if (nv_encoder->dp.dpcd[3] & 1)
599 script = ROM16(dp.entry[12]);
600
601 nouveau_bios_run_init_table(dev, script, dp.dcb, dp.crtc);
431 } 602 }
432 603
433 if (dpe->script1) { 604 /* execute pre-train script from vbios */
434 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); 605 nouveau_bios_run_init_table(dev, ROM16(dp.entry[6]), dp.dcb, dp.crtc);
435 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), 606
436 nv_encoder->dcb); 607 /* start off at highest link rate supported by encoder and display */
608 while (*link_bw > nv_encoder->dp.link_bw)
609 link_bw++;
610
611 while (link_bw[0]) {
612 /* find minimum required lane count at this link rate */
613 dp.link_nr = nv_encoder->dp.link_nr;
614 while ((dp.link_nr >> 1) * link_bw[0] > datarate)
615 dp.link_nr >>= 1;
616
617 /* drop link rate to minimum with this lane count */
618 while ((link_bw[1] * dp.link_nr) > datarate)
619 link_bw++;
620 dp.link_bw = link_bw[0];
621
622 /* program selected link configuration */
623 dp_set_link_config(dev, &dp);
624
625 /* attempt to train the link at this configuration */
626 memset(dp.stat, 0x00, sizeof(dp.stat));
627 if (!dp_link_train_cr(dev, &dp) &&
628 !dp_link_train_eq(dev, &dp))
629 break;
630
631 /* retry at lower rate */
632 link_bw++;
437 } 633 }
438 634
439 /* re-enable hotplug detect */ 635 /* finish link training */
440 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state); 636 dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
441 637
442 return eq_done; 638 /* execute post-train script from vbios */
639 nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
640
641 /* re-enable hotplug detect */
642 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
643 return true;
443} 644}
444 645
445bool 646bool
@@ -447,31 +648,34 @@ nouveau_dp_detect(struct drm_encoder *encoder)
447{ 648{
448 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 649 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
449 struct drm_device *dev = encoder->dev; 650 struct drm_device *dev = encoder->dev;
450 uint8_t dpcd[4]; 651 struct nouveau_i2c_chan *auxch;
652 u8 *dpcd = nv_encoder->dp.dpcd;
451 int ret; 653 int ret;
452 654
453 ret = auxch_rd(encoder, 0x0000, dpcd, 4); 655 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
454 if (ret) 656 if (!auxch)
455 return false; 657 return false;
456 658
457 NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n" 659 ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
458 "display: link_bw %d, link_nr %d version 0x%02x\n", 660 if (ret)
459 nv_encoder->dcb->dpconf.link_bw, 661 return false;
460 nv_encoder->dcb->dpconf.link_nr,
461 dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
462 662
463 nv_encoder->dp.dpcd_version = dpcd[0]; 663 nv_encoder->dp.link_bw = 27000 * dpcd[1];
664 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
464 665
465 nv_encoder->dp.link_bw = dpcd[1]; 666 NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n",
466 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 && 667 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
467 !nv_encoder->dcb->dpconf.link_bw) 668 NV_DEBUG_KMS(dev, "encoder: %dx%d\n",
468 nv_encoder->dp.link_bw = DP_LINK_BW_1_62; 669 nv_encoder->dcb->dpconf.link_nr,
670 nv_encoder->dcb->dpconf.link_bw);
469 671
470 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; 672 if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
471 if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
472 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; 673 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
674 if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
675 nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
473 676
474 nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP); 677 NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
678 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
475 679
476 return true; 680 return true;
477} 681}
@@ -480,105 +684,13 @@ int
480nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, 684nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
481 uint8_t *data, int data_nr) 685 uint8_t *data, int data_nr)
482{ 686{
483 struct drm_device *dev = auxch->dev; 687 return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
484 uint32_t tmp, ctrl, stat = 0, data32[4] = {};
485 int ret = 0, i, index = auxch->rd;
486
487 NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
488
489 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
490 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
491 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
492 if (!(tmp & 0x01000000)) {
493 NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
494 ret = -EIO;
495 goto out;
496 }
497
498 for (i = 0; i < 3; i++) {
499 tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
500 if (tmp & NV50_AUXCH_STAT_STATE_READY)
501 break;
502 udelay(100);
503 }
504
505 if (i == 3) {
506 ret = -EBUSY;
507 goto out;
508 }
509
510 if (!(cmd & 1)) {
511 memcpy(data32, data, data_nr);
512 for (i = 0; i < 4; i++) {
513 NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
514 nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
515 }
516 }
517
518 nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
519 ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
520 ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
521 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
522 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
523
524 for (i = 0; i < 16; i++) {
525 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
526 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
527 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
528 if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
529 0x00010000, 0x00000000)) {
530 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
531 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
532 ret = -EBUSY;
533 goto out;
534 }
535
536 udelay(400);
537
538 stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
539 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
540 NV50_AUXCH_STAT_REPLY_AUX_DEFER)
541 break;
542 }
543
544 if (i == 16) {
545 NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
546 ret = -EREMOTEIO;
547 goto out;
548 }
549
550 if (cmd & 1) {
551 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
552 ret = -EREMOTEIO;
553 goto out;
554 }
555
556 for (i = 0; i < 4; i++) {
557 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
558 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
559 }
560 memcpy(data, data32, data_nr);
561 }
562
563out:
564 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
565 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
566 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
567 if (tmp & 0x01000000) {
568 NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
569 ret = -EIO;
570 }
571
572 udelay(400);
573
574 return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
575} 688}
576 689
577static int 690static int
578nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 691nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
579{ 692{
580 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap; 693 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
581 struct drm_device *dev = auxch->dev;
582 struct i2c_msg *msg = msgs; 694 struct i2c_msg *msg = msgs;
583 int ret, mcnt = num; 695 int ret, mcnt = num;
584 696
@@ -602,19 +714,6 @@ nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
602 if (ret < 0) 714 if (ret < 0)
603 return ret; 715 return ret;
604 716
605 switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
606 case NV50_AUXCH_STAT_REPLY_I2C_ACK:
607 break;
608 case NV50_AUXCH_STAT_REPLY_I2C_NACK:
609 return -EREMOTEIO;
610 case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
611 udelay(100);
612 continue;
613 default:
614 NV_ERROR(dev, "bad auxch reply: 0x%08x\n", ret);
615 return -EREMOTEIO;
616 }
617
618 ptr += cnt; 717 ptr += cnt;
619 remaining -= cnt; 718 remaining -= cnt;
620 } 719 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index b30ddd8d2e2a..c1e01f37b9d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -41,7 +41,7 @@ int nouveau_agpmode = -1;
41module_param_named(agpmode, nouveau_agpmode, int, 0400); 41module_param_named(agpmode, nouveau_agpmode, int, 0400);
42 42
43MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); 43MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
44static int nouveau_modeset = -1; /* kms */ 44int nouveau_modeset = -1;
45module_param_named(modeset, nouveau_modeset, int, 0400); 45module_param_named(modeset, nouveau_modeset, int, 0400);
46 46
47MODULE_PARM_DESC(vbios, "Override default VBIOS location"); 47MODULE_PARM_DESC(vbios, "Override default VBIOS location");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d7d51deb34b6..29837da1098b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -414,12 +414,13 @@ struct nouveau_gpio_engine {
414}; 414};
415 415
416struct nouveau_pm_voltage_level { 416struct nouveau_pm_voltage_level {
417 u8 voltage; 417 u32 voltage; /* microvolts */
418 u8 vid; 418 u8 vid;
419}; 419};
420 420
421struct nouveau_pm_voltage { 421struct nouveau_pm_voltage {
422 bool supported; 422 bool supported;
423 u8 version;
423 u8 vid_mask; 424 u8 vid_mask;
424 425
425 struct nouveau_pm_voltage_level *level; 426 struct nouveau_pm_voltage_level *level;
@@ -428,17 +429,48 @@ struct nouveau_pm_voltage {
428 429
429struct nouveau_pm_memtiming { 430struct nouveau_pm_memtiming {
430 int id; 431 int id;
431 u32 reg_100220; 432 u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */
432 u32 reg_100224; 433 u32 reg_1;
433 u32 reg_100228; 434 u32 reg_2;
434 u32 reg_10022c; 435 u32 reg_3;
435 u32 reg_100230; 436 u32 reg_4;
436 u32 reg_100234; 437 u32 reg_5;
437 u32 reg_100238; 438 u32 reg_6;
438 u32 reg_10023c; 439 u32 reg_7;
439 u32 reg_100240; 440 u32 reg_8;
441 /* To be written to 0x1002c0 */
442 u8 CL;
443 u8 WR;
440}; 444};
441 445
446struct nouveau_pm_tbl_header{
447 u8 version;
448 u8 header_len;
449 u8 entry_cnt;
450 u8 entry_len;
451};
452
453struct nouveau_pm_tbl_entry{
454 u8 tWR;
455 u8 tUNK_1;
456 u8 tCL;
457 u8 tRP; /* Byte 3 */
458 u8 empty_4;
459 u8 tRAS; /* Byte 5 */
460 u8 empty_6;
461 u8 tRFC; /* Byte 7 */
462 u8 empty_8;
463 u8 tRC; /* Byte 9 */
464 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
465 u8 empty_15,empty_16,empty_17;
466 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
467};
468
469/* nouveau_mem.c */
470void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
471 struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
472 struct nouveau_pm_memtiming *timing);
473
442#define NOUVEAU_PM_MAX_LEVEL 8 474#define NOUVEAU_PM_MAX_LEVEL 8
443struct nouveau_pm_level { 475struct nouveau_pm_level {
444 struct device_attribute dev_attr; 476 struct device_attribute dev_attr;
@@ -448,11 +480,19 @@ struct nouveau_pm_level {
448 u32 core; 480 u32 core;
449 u32 memory; 481 u32 memory;
450 u32 shader; 482 u32 shader;
451 u32 unk05; 483 u32 rop;
452 u32 unk0a; 484 u32 copy;
453 485 u32 daemon;
454 u8 voltage; 486 u32 vdec;
455 u8 fanspeed; 487 u32 unk05; /* nv50:nva3, roughly.. */
488 u32 unka0; /* nva3:nvc0 */
489 u32 hub01; /* nvc0- */
490 u32 hub06; /* nvc0- */
491 u32 hub07; /* nvc0- */
492
493 u32 volt_min; /* microvolts */
494 u32 volt_max;
495 u8 fanspeed;
456 496
457 u16 memscript; 497 u16 memscript;
458 struct nouveau_pm_memtiming *timing; 498 struct nouveau_pm_memtiming *timing;
@@ -496,6 +536,11 @@ struct nouveau_pm_engine {
496 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, 536 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
497 u32 id, int khz); 537 u32 id, int khz);
498 void (*clock_set)(struct drm_device *, void *); 538 void (*clock_set)(struct drm_device *, void *);
539
540 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
541 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
542 void (*clocks_set)(struct drm_device *, void *);
543
499 int (*voltage_get)(struct drm_device *); 544 int (*voltage_get)(struct drm_device *);
500 int (*voltage_set)(struct drm_device *, int voltage); 545 int (*voltage_set)(struct drm_device *, int voltage);
501 int (*fanspeed_get)(struct drm_device *); 546 int (*fanspeed_get)(struct drm_device *);
@@ -504,7 +549,7 @@ struct nouveau_pm_engine {
504}; 549};
505 550
506struct nouveau_vram_engine { 551struct nouveau_vram_engine {
507 struct nouveau_mm *mm; 552 struct nouveau_mm mm;
508 553
509 int (*init)(struct drm_device *); 554 int (*init)(struct drm_device *);
510 void (*takedown)(struct drm_device *dev); 555 void (*takedown)(struct drm_device *dev);
@@ -623,6 +668,7 @@ enum nouveau_card_type {
623 NV_40 = 0x40, 668 NV_40 = 0x40,
624 NV_50 = 0x50, 669 NV_50 = 0x50,
625 NV_C0 = 0xc0, 670 NV_C0 = 0xc0,
671 NV_D0 = 0xd0
626}; 672};
627 673
628struct drm_nouveau_private { 674struct drm_nouveau_private {
@@ -633,8 +679,8 @@ struct drm_nouveau_private {
633 enum nouveau_card_type card_type; 679 enum nouveau_card_type card_type;
634 /* exact chipset, derived from NV_PMC_BOOT_0 */ 680 /* exact chipset, derived from NV_PMC_BOOT_0 */
635 int chipset; 681 int chipset;
636 int stepping;
637 int flags; 682 int flags;
683 u32 crystal;
638 684
639 void __iomem *mmio; 685 void __iomem *mmio;
640 686
@@ -721,7 +767,6 @@ struct drm_nouveau_private {
721 uint64_t vram_size; 767 uint64_t vram_size;
722 uint64_t vram_sys_base; 768 uint64_t vram_sys_base;
723 769
724 uint64_t fb_phys;
725 uint64_t fb_available_size; 770 uint64_t fb_available_size;
726 uint64_t fb_mappable_pages; 771 uint64_t fb_mappable_pages;
727 uint64_t fb_aper_free; 772 uint64_t fb_aper_free;
@@ -784,6 +829,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
784} 829}
785 830
786/* nouveau_drv.c */ 831/* nouveau_drv.c */
832extern int nouveau_modeset;
787extern int nouveau_agpmode; 833extern int nouveau_agpmode;
788extern int nouveau_duallink; 834extern int nouveau_duallink;
789extern int nouveau_uscript_lvds; 835extern int nouveau_uscript_lvds;
@@ -824,6 +870,8 @@ extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
824 uint32_t reg, uint32_t mask, uint32_t val); 870 uint32_t reg, uint32_t mask, uint32_t val);
825extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, 871extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
826 uint32_t reg, uint32_t mask, uint32_t val); 872 uint32_t reg, uint32_t mask, uint32_t val);
873extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
874 bool (*cond)(void *), void *);
827extern bool nouveau_wait_for_idle(struct drm_device *); 875extern bool nouveau_wait_for_idle(struct drm_device *);
828extern int nouveau_card_init(struct drm_device *); 876extern int nouveau_card_init(struct drm_device *);
829 877
@@ -1006,15 +1054,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
1006 1054
1007/* nouveau_backlight.c */ 1055/* nouveau_backlight.c */
1008#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1056#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1009extern int nouveau_backlight_init(struct drm_connector *); 1057extern int nouveau_backlight_init(struct drm_device *);
1010extern void nouveau_backlight_exit(struct drm_connector *); 1058extern void nouveau_backlight_exit(struct drm_device *);
1011#else 1059#else
1012static inline int nouveau_backlight_init(struct drm_connector *dev) 1060static inline int nouveau_backlight_init(struct drm_device *dev)
1013{ 1061{
1014 return 0; 1062 return 0;
1015} 1063}
1016 1064
1017static inline void nouveau_backlight_exit(struct drm_connector *dev) { } 1065static inline void nouveau_backlight_exit(struct drm_device *dev) { }
1018#endif 1066#endif
1019 1067
1020/* nouveau_bios.c */ 1068/* nouveau_bios.c */
@@ -1022,7 +1070,8 @@ extern int nouveau_bios_init(struct drm_device *);
1022extern void nouveau_bios_takedown(struct drm_device *dev); 1070extern void nouveau_bios_takedown(struct drm_device *dev);
1023extern int nouveau_run_vbios_init(struct drm_device *); 1071extern int nouveau_run_vbios_init(struct drm_device *);
1024extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table, 1072extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
1025 struct dcb_entry *); 1073 struct dcb_entry *, int crtc);
1074extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
1026extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, 1075extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
1027 enum dcb_gpio_tag); 1076 enum dcb_gpio_tag);
1028extern struct dcb_connector_table_entry * 1077extern struct dcb_connector_table_entry *
@@ -1030,11 +1079,8 @@ nouveau_bios_connector_entry(struct drm_device *, int index);
1030extern u32 get_pll_register(struct drm_device *, enum pll_types); 1079extern u32 get_pll_register(struct drm_device *, enum pll_types);
1031extern int get_pll_limits(struct drm_device *, uint32_t limit_match, 1080extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
1032 struct pll_lims *); 1081 struct pll_lims *);
1033extern int nouveau_bios_run_display_table(struct drm_device *, 1082extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
1034 struct dcb_entry *, 1083 struct dcb_entry *, int crtc);
1035 uint32_t script, int pxclk);
1036extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
1037 int *length);
1038extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); 1084extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
1039extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *); 1085extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
1040extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, 1086extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -1043,6 +1089,7 @@ extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
1043 int head, int pxclk); 1089 int head, int pxclk);
1044extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head, 1090extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
1045 enum LVDS_script, int pxclk); 1091 enum LVDS_script, int pxclk);
1092bool bios_encoder_match(struct dcb_entry *, u32 hash);
1046 1093
1047/* nouveau_ttm.c */ 1094/* nouveau_ttm.c */
1048int nouveau_ttm_global_init(struct drm_nouveau_private *); 1095int nouveau_ttm_global_init(struct drm_nouveau_private *);
@@ -1053,7 +1100,9 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
1053int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, 1100int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
1054 uint8_t *data, int data_nr); 1101 uint8_t *data, int data_nr);
1055bool nouveau_dp_detect(struct drm_encoder *); 1102bool nouveau_dp_detect(struct drm_encoder *);
1056bool nouveau_dp_link_train(struct drm_encoder *); 1103bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate);
1104void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32);
1105u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
1057 1106
1058/* nv04_fb.c */ 1107/* nv04_fb.c */
1059extern int nv04_fb_init(struct drm_device *); 1108extern int nv04_fb_init(struct drm_device *);
@@ -1179,8 +1228,8 @@ extern int nva3_copy_create(struct drm_device *dev);
1179/* nvc0_copy.c */ 1228/* nvc0_copy.c */
1180extern int nvc0_copy_create(struct drm_device *dev, int engine); 1229extern int nvc0_copy_create(struct drm_device *dev, int engine);
1181 1230
1182/* nv40_mpeg.c */ 1231/* nv31_mpeg.c */
1183extern int nv40_mpeg_create(struct drm_device *dev); 1232extern int nv31_mpeg_create(struct drm_device *dev);
1184 1233
1185/* nv50_mpeg.c */ 1234/* nv50_mpeg.c */
1186extern int nv50_mpeg_create(struct drm_device *dev); 1235extern int nv50_mpeg_create(struct drm_device *dev);
@@ -1265,6 +1314,11 @@ extern int nv04_display_create(struct drm_device *);
1265extern int nv04_display_init(struct drm_device *); 1314extern int nv04_display_init(struct drm_device *);
1266extern void nv04_display_destroy(struct drm_device *); 1315extern void nv04_display_destroy(struct drm_device *);
1267 1316
1317/* nvd0_display.c */
1318extern int nvd0_display_create(struct drm_device *);
1319extern int nvd0_display_init(struct drm_device *);
1320extern void nvd0_display_destroy(struct drm_device *);
1321
1268/* nv04_crtc.c */ 1322/* nv04_crtc.c */
1269extern int nv04_crtc_create(struct drm_device *, int index); 1323extern int nv04_crtc_create(struct drm_device *, int index);
1270 1324
@@ -1374,6 +1428,8 @@ int nv50_gpio_init(struct drm_device *dev);
1374void nv50_gpio_fini(struct drm_device *dev); 1428void nv50_gpio_fini(struct drm_device *dev);
1375int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1429int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1376int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1430int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1431int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1432int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1377int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, 1433int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
1378 void (*)(void *, int), void *); 1434 void (*)(void *, int), void *);
1379void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, 1435void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
@@ -1448,6 +1504,8 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1448 nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) 1504 nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
1449#define nv_wait_ne(dev, reg, mask, val) \ 1505#define nv_wait_ne(dev, reg, mask, val) \
1450 nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) 1506 nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
1507#define nv_wait_cb(dev, func, data) \
1508 nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
1451 1509
1452/* PRAMIN access */ 1510/* PRAMIN access */
1453static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) 1511static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1514,6 +1572,7 @@ enum {
1514 NOUVEAU_REG_DEBUG_RMVIO = 0x80, 1572 NOUVEAU_REG_DEBUG_RMVIO = 0x80,
1515 NOUVEAU_REG_DEBUG_VGAATTR = 0x100, 1573 NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
1516 NOUVEAU_REG_DEBUG_EVO = 0x200, 1574 NOUVEAU_REG_DEBUG_EVO = 0x200,
1575 NOUVEAU_REG_DEBUG_AUXCH = 0x400
1517}; 1576};
1518 1577
1519#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \ 1578#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index ae69b61d93db..e5d6e3faff3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -49,17 +49,17 @@ struct nouveau_encoder {
49 49
50 union { 50 union {
51 struct { 51 struct {
52 int mc_unknown; 52 u8 dpcd[8];
53 uint32_t unk0;
54 uint32_t unk1;
55 int dpcd_version;
56 int link_nr; 53 int link_nr;
57 int link_bw; 54 int link_bw;
58 bool enhanced_frame; 55 u32 datarate;
59 } dp; 56 } dp;
60 }; 57 };
61}; 58};
62 59
60struct nouveau_encoder *
61find_encoder(struct drm_connector *connector, int type);
62
63static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc) 63static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
64{ 64{
65 struct drm_encoder_slave *slave = to_encoder_slave(enc); 65 struct drm_encoder_slave *slave = to_encoder_slave(enc);
@@ -83,21 +83,4 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
83int nv50_sor_create(struct drm_connector *, struct dcb_entry *); 83int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
84int nv50_dac_create(struct drm_connector *, struct dcb_entry *); 84int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
85 85
86struct bit_displayport_encoder_table {
87 uint32_t match;
88 uint8_t record_nr;
89 uint8_t unknown;
90 uint16_t script0;
91 uint16_t script1;
92 uint16_t unknown_table;
93} __attribute__ ((packed));
94
95struct bit_displayport_encoder_table_entry {
96 uint8_t vs_level;
97 uint8_t pre_level;
98 uint8_t reg0;
99 uint8_t reg1;
100 uint8_t reg2;
101} __attribute__ ((packed));
102
103#endif /* __NOUVEAU_ENCODER_H__ */ 86#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c919cfc8f2fd..81116cfea275 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -519,7 +519,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
519 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { 519 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
520 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; 520 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
521 521
522 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 522 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
523 mem->start << PAGE_SHIFT, 523 mem->start << PAGE_SHIFT,
524 mem->size, NV_MEM_ACCESS_RW, 524 mem->size, NV_MEM_ACCESS_RW,
525 NV_MEM_TARGET_VRAM, &obj); 525 NV_MEM_TARGET_VRAM, &obj);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index cb389d014326..f6a27fabcfe0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -107,6 +107,13 @@ nv4e_i2c_getsda(void *data)
107 return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); 107 return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
108} 108}
109 109
110static const uint32_t nv50_i2c_port[] = {
111 0x00e138, 0x00e150, 0x00e168, 0x00e180,
112 0x00e254, 0x00e274, 0x00e764, 0x00e780,
113 0x00e79c, 0x00e7b8
114};
115#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
116
110static int 117static int
111nv50_i2c_getscl(void *data) 118nv50_i2c_getscl(void *data)
112{ 119{
@@ -130,28 +137,32 @@ static void
130nv50_i2c_setscl(void *data, int state) 137nv50_i2c_setscl(void *data, int state)
131{ 138{
132 struct nouveau_i2c_chan *i2c = data; 139 struct nouveau_i2c_chan *i2c = data;
133 struct drm_device *dev = i2c->dev;
134 140
135 nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); 141 nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
136} 142}
137 143
138static void 144static void
139nv50_i2c_setsda(void *data, int state) 145nv50_i2c_setsda(void *data, int state)
140{ 146{
141 struct nouveau_i2c_chan *i2c = data; 147 struct nouveau_i2c_chan *i2c = data;
142 struct drm_device *dev = i2c->dev;
143 148
144 nv_wr32(dev, i2c->wr, 149 nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
145 (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
146 i2c->data = state; 150 i2c->data = state;
147} 151}
148 152
149static const uint32_t nv50_i2c_port[] = { 153static int
150 0x00e138, 0x00e150, 0x00e168, 0x00e180, 154nvd0_i2c_getscl(void *data)
151 0x00e254, 0x00e274, 0x00e764, 0x00e780, 155{
152 0x00e79c, 0x00e7b8 156 struct nouveau_i2c_chan *i2c = data;
153}; 157 return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
154#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port) 158}
159
160static int
161nvd0_i2c_getsda(void *data)
162{
163 struct nouveau_i2c_chan *i2c = data;
164 return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
165}
155 166
156int 167int
157nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) 168nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
@@ -163,7 +174,8 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
163 if (entry->chan) 174 if (entry->chan)
164 return -EEXIST; 175 return -EEXIST;
165 176
166 if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) { 177 if (dev_priv->card_type >= NV_50 &&
178 dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read); 179 NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
168 return -EINVAL; 180 return -EINVAL;
169 } 181 }
@@ -192,10 +204,17 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
192 case 5: 204 case 5:
193 i2c->bit.setsda = nv50_i2c_setsda; 205 i2c->bit.setsda = nv50_i2c_setsda;
194 i2c->bit.setscl = nv50_i2c_setscl; 206 i2c->bit.setscl = nv50_i2c_setscl;
195 i2c->bit.getsda = nv50_i2c_getsda; 207 if (dev_priv->card_type < NV_D0) {
196 i2c->bit.getscl = nv50_i2c_getscl; 208 i2c->bit.getsda = nv50_i2c_getsda;
197 i2c->rd = nv50_i2c_port[entry->read]; 209 i2c->bit.getscl = nv50_i2c_getscl;
198 i2c->wr = i2c->rd; 210 i2c->rd = nv50_i2c_port[entry->read];
211 i2c->wr = i2c->rd;
212 } else {
213 i2c->bit.getsda = nvd0_i2c_getsda;
214 i2c->bit.getscl = nvd0_i2c_getscl;
215 i2c->rd = 0x00d014 + (entry->read * 0x20);
216 i2c->wr = i2c->rd;
217 }
199 break; 218 break;
200 case 6: 219 case 6:
201 i2c->rd = entry->read; 220 i2c->rd = entry->read;
@@ -267,7 +286,10 @@ nouveau_i2c_find(struct drm_device *dev, int index)
267 val = 0xe001; 286 val = 0xe001;
268 } 287 }
269 288
270 nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); 289 /* nfi, but neither auxch or i2c work if it's 1 */
290 nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
291 /* nfi, but switches auxch vs normal i2c */
292 nv_mask(dev, reg + 0x00, 0x0000f003, val);
271 } 293 }
272 294
273 if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) 295 if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index f9ae2fc3d6f1..36bec4807701 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -408,8 +408,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
408 if (ret) 408 if (ret)
409 return ret; 409 return ret;
410 410
411 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
412
413 ret = nouveau_ttm_global_init(dev_priv); 411 ret = nouveau_ttm_global_init(dev_priv);
414 if (ret) 412 if (ret)
415 return ret; 413 return ret;
@@ -504,35 +502,146 @@ nouveau_mem_gart_init(struct drm_device *dev)
504 return 0; 502 return 0;
505} 503}
506 504
505/* XXX: For now a dummy. More samples required, possibly even a card
506 * Called from nouveau_perf.c */
507void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
508 struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
509 struct nouveau_pm_memtiming *timing) {
510
511 NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers");
512}
513
514void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
515 struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
516 struct nouveau_pm_memtiming *timing) {
517
518 timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
519
520 /* XXX: I don't trust the -1's and +1's... they must come
521 * from somewhere! */
522 timing->reg_1 = (e->tWR + 2 + magic_number) << 24 |
523 1 << 16 |
524 (e->tUNK_1 + 2 + magic_number) << 8 |
525 (e->tCL + 2 - magic_number);
526 timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
527 timing->reg_2 |= 0x20200000;
528
529 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id,
530 timing->reg_0, timing->reg_1,timing->reg_2);
531}
532
533void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr,
534 struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) {
535 struct drm_nouveau_private *dev_priv = dev->dev_private;
536
537 uint8_t unk18 = 1,
538 unk19 = 1,
539 unk20 = 0,
540 unk21 = 0;
541
542 switch (min(hdr->entry_len, (u8) 22)) {
543 case 22:
544 unk21 = e->tUNK_21;
545 case 21:
546 unk20 = e->tUNK_20;
547 case 20:
548 unk19 = e->tUNK_19;
549 case 19:
550 unk18 = e->tUNK_18;
551 break;
552 }
553
554 timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
555
556 /* XXX: I don't trust the -1's and +1's... they must come
557 * from somewhere! */
558 timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 |
559 max(unk18, (u8) 1) << 16 |
560 (e->tUNK_1 + unk19 + 1 + magic_number) << 8;
561 if (dev_priv->chipset == 0xa8) {
562 timing->reg_1 |= (e->tCL - 1);
563 } else {
564 timing->reg_1 |= (e->tCL + 2 - magic_number);
565 }
566 timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
567
568 timing->reg_5 = (e->tRAS << 24 | e->tRC);
569 timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16;
570
571 if (P->version == 1) {
572 timing->reg_2 |= magic_number << 24;
573 timing->reg_3 = (0x14 + e->tCL) << 24 |
574 0x16 << 16 |
575 (e->tCL - 1) << 8 |
576 (e->tCL - 1);
577 timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8 | e->tUNK_13;
578 timing->reg_5 |= (e->tCL + 2) << 8;
579 timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16;
580 } else {
581 timing->reg_2 |= (unk19 - 1) << 24;
582 /* XXX: reg_10022c for recentish cards pretty much unknown*/
583 timing->reg_3 = e->tCL - 1;
584 timing->reg_4 = (unk20 << 24 | unk21 << 16 |
585 e->tUNK_13 << 8 | e->tUNK_13);
586 /* XXX: +6? */
587 timing->reg_5 |= (unk19 + 6) << 8;
588
589 /* XXX: reg_10023c currently unknown
590 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
591 timing->reg_7 = 0x202;
592 }
593
594 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id,
595 timing->reg_0, timing->reg_1,
596 timing->reg_2, timing->reg_3);
597 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
598 timing->reg_4, timing->reg_5,
599 timing->reg_6, timing->reg_7);
600 NV_DEBUG(dev, " 240: %08x\n", timing->reg_8);
601}
602
603void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
604 struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) {
605 timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP);
606 timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f);
607 timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8;
608 timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13;
609 timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15;
610 NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id,
611 timing->reg_0, timing->reg_1,
612 timing->reg_2, timing->reg_3);
613 NV_DEBUG(dev, " 2a0: %08x %08x %08x %08x\n",
614 timing->reg_4, timing->reg_5,
615 timing->reg_6, timing->reg_7);
616}
617
618/**
619 * Processes the Memory Timing BIOS table, stores generated
620 * register values
621 * @pre init scripts were run, memtiming regs are initialized
622 */
507void 623void
508nouveau_mem_timing_init(struct drm_device *dev) 624nouveau_mem_timing_init(struct drm_device *dev)
509{ 625{
510 /* cards < NVC0 only */
511 struct drm_nouveau_private *dev_priv = dev->dev_private; 626 struct drm_nouveau_private *dev_priv = dev->dev_private;
512 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 627 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
513 struct nouveau_pm_memtimings *memtimings = &pm->memtimings; 628 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
514 struct nvbios *bios = &dev_priv->vbios; 629 struct nvbios *bios = &dev_priv->vbios;
515 struct bit_entry P; 630 struct bit_entry P;
516 u8 tUNK_0, tUNK_1, tUNK_2; 631 struct nouveau_pm_tbl_header *hdr = NULL;
517 u8 tRP; /* Byte 3 */ 632 uint8_t magic_number;
518 u8 tRAS; /* Byte 5 */ 633 u8 *entry;
519 u8 tRFC; /* Byte 7 */ 634 int i;
520 u8 tRC; /* Byte 9 */
521 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
522 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
523 u8 magic_number = 0; /* Yeah... sorry*/
524 u8 *mem = NULL, *entry;
525 int i, recordlen, entries;
526 635
527 if (bios->type == NVBIOS_BIT) { 636 if (bios->type == NVBIOS_BIT) {
528 if (bit_table(dev, 'P', &P)) 637 if (bit_table(dev, 'P', &P))
529 return; 638 return;
530 639
531 if (P.version == 1) 640 if (P.version == 1)
532 mem = ROMPTR(bios, P.data[4]); 641 hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
533 else 642 else
534 if (P.version == 2) 643 if (P.version == 2)
535 mem = ROMPTR(bios, P.data[8]); 644 hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
536 else { 645 else {
537 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); 646 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
538 } 647 }
@@ -541,150 +650,56 @@ nouveau_mem_timing_init(struct drm_device *dev)
541 return; 650 return;
542 } 651 }
543 652
544 if (!mem) { 653 if (!hdr) {
545 NV_DEBUG(dev, "memory timing table pointer invalid\n"); 654 NV_DEBUG(dev, "memory timing table pointer invalid\n");
546 return; 655 return;
547 } 656 }
548 657
549 if (mem[0] != 0x10) { 658 if (hdr->version != 0x10) {
550 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]); 659 NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version);
551 return; 660 return;
552 } 661 }
553 662
554 /* validate record length */ 663 /* validate record length */
555 entries = mem[2]; 664 if (hdr->entry_len < 15) {
556 recordlen = mem[3]; 665 NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len);
557 if (recordlen < 15) {
558 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
559 return; 666 return;
560 } 667 }
561 668
562 /* parse vbios entries into common format */ 669 /* parse vbios entries into common format */
563 memtimings->timing = 670 memtimings->timing =
564 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 671 kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL);
565 if (!memtimings->timing) 672 if (!memtimings->timing)
566 return; 673 return;
567 674
568 /* Get "some number" from the timing reg for NV_40 and NV_50 675 /* Get "some number" from the timing reg for NV_40 and NV_50
569 * Used in calculations later */ 676 * Used in calculations later... source unknown */
570 if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) { 677 magic_number = 0;
678 if (P.version == 1) {
571 magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24; 679 magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
572 } 680 }
573 681
574 entry = mem + mem[1]; 682 entry = (u8*) hdr + hdr->header_len;
575 for (i = 0; i < entries; i++, entry += recordlen) { 683 for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) {
576 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; 684 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
577 if (entry[0] == 0) 685 if (entry[0] == 0)
578 continue; 686 continue;
579 687
580 tUNK_18 = 1;
581 tUNK_19 = 1;
582 tUNK_20 = 0;
583 tUNK_21 = 0;
584 switch (min(recordlen, 22)) {
585 case 22:
586 tUNK_21 = entry[21];
587 case 21:
588 tUNK_20 = entry[20];
589 case 20:
590 tUNK_19 = entry[19];
591 case 19:
592 tUNK_18 = entry[18];
593 default:
594 tUNK_0 = entry[0];
595 tUNK_1 = entry[1];
596 tUNK_2 = entry[2];
597 tRP = entry[3];
598 tRAS = entry[5];
599 tRFC = entry[7];
600 tRC = entry[9];
601 tUNK_10 = entry[10];
602 tUNK_11 = entry[11];
603 tUNK_12 = entry[12];
604 tUNK_13 = entry[13];
605 tUNK_14 = entry[14];
606 break;
607 }
608
609 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
610
611 /* XXX: I don't trust the -1's and +1's... they must come
612 * from somewhere! */
613 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
614 max(tUNK_18, (u8) 1) << 16 |
615 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
616 if (dev_priv->chipset == 0xa8) {
617 timing->reg_100224 |= (tUNK_2 - 1);
618 } else {
619 timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
620 }
621
622 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
623 if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa)
624 timing->reg_100228 |= (tUNK_19 - 1) << 24;
625 else
626 timing->reg_100228 |= magic_number << 24;
627
628 if (dev_priv->card_type == NV_40) {
629 /* NV40: don't know what the rest of the regs are..
630 * And don't need to know either */
631 timing->reg_100228 |= 0x20200000;
632 } else if (dev_priv->card_type >= NV_50) {
633 if (dev_priv->chipset < 0x98 ||
634 (dev_priv->chipset == 0x98 &&
635 dev_priv->stepping <= 0xa1)) {
636 timing->reg_10022c = (0x14 + tUNK_2) << 24 |
637 0x16 << 16 |
638 (tUNK_2 - 1) << 8 |
639 (tUNK_2 - 1);
640 } else {
641 /* XXX: reg_10022c for recentish cards */
642 timing->reg_10022c = tUNK_2 - 1;
643 }
644
645 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
646 tUNK_13 << 8 | tUNK_13);
647
648 timing->reg_100234 = (tRAS << 24 | tRC);
649 timing->reg_100234 += max(tUNK_10, tUNK_11) << 16;
650
651 if (dev_priv->chipset < 0x98 ||
652 (dev_priv->chipset == 0x98 &&
653 dev_priv->stepping <= 0xa1)) {
654 timing->reg_100234 |= (tUNK_2 + 2) << 8;
655 } else {
656 /* XXX: +6? */
657 timing->reg_100234 |= (tUNK_19 + 6) << 8;
658 }
659
660 /* XXX; reg_100238
661 * reg_100238: 0x00?????? */
662 timing->reg_10023c = 0x202;
663 if (dev_priv->chipset < 0x98 ||
664 (dev_priv->chipset == 0x98 &&
665 dev_priv->stepping <= 0xa1)) {
666 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
667 } else {
668 /* XXX: reg_10023c
669 * currently unknown
670 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
671 }
672
673 /* XXX: reg_100240? */
674 }
675 timing->id = i; 688 timing->id = i;
676 689 timing->WR = entry[0];
677 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 690 timing->CL = entry[2];
678 timing->reg_100220, timing->reg_100224, 691
679 timing->reg_100228, timing->reg_10022c); 692 if(dev_priv->card_type <= NV_40) {
680 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 693 nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
681 timing->reg_100230, timing->reg_100234, 694 } else if(dev_priv->card_type == NV_50){
682 timing->reg_100238, timing->reg_10023c); 695 nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
683 NV_DEBUG(dev, " 240: %08x\n", timing->reg_100240); 696 } else if(dev_priv->card_type == NV_C0) {
697 nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]);
698 }
684 } 699 }
685 700
686 memtimings->nr_timing = entries; 701 memtimings->nr_timing = hdr->entry_cnt;
687 memtimings->supported = (dev_priv->chipset <= 0x98); 702 memtimings->supported = P.version == 1;
688} 703}
689 704
690void 705void
@@ -693,7 +708,10 @@ nouveau_mem_timing_fini(struct drm_device *dev)
693 struct drm_nouveau_private *dev_priv = dev->dev_private; 708 struct drm_nouveau_private *dev_priv = dev->dev_private;
694 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; 709 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
695 710
696 kfree(mem->timing); 711 if(mem->timing) {
712 kfree(mem->timing);
713 mem->timing = NULL;
714 }
697} 715}
698 716
699static int 717static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 1640dec3b823..b29ffb3d1408 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -27,7 +27,7 @@
27#include "nouveau_mm.h" 27#include "nouveau_mm.h"
28 28
29static inline void 29static inline void
30region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) 30region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
31{ 31{
32 list_del(&a->nl_entry); 32 list_del(&a->nl_entry);
33 list_del(&a->fl_entry); 33 list_del(&a->fl_entry);
@@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
35} 35}
36 36
37static struct nouveau_mm_node * 37static struct nouveau_mm_node *
38region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) 38region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
39{ 39{
40 struct nouveau_mm_node *b; 40 struct nouveau_mm_node *b;
41 41
@@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
57 return b; 57 return b;
58} 58}
59 59
60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ 60#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) 61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
62 62
63void 63void
64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 64nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
65{ 65{
66 struct nouveau_mm_node *prev = node(this, prev); 66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next); 67 struct nouveau_mm_node *next = node(this, next);
68 68
69 list_add(&this->fl_entry, &rmm->free); 69 list_add(&this->fl_entry, &mm->free);
70 this->type = 0; 70 this->type = 0;
71 71
72 if (prev && prev->type == 0) { 72 if (prev && prev->type == 0) {
73 prev->length += this->length; 73 prev->length += this->length;
74 region_put(rmm, this); 74 region_put(mm, this);
75 this = prev; 75 this = prev;
76 } 76 }
77 77
78 if (next && next->type == 0) { 78 if (next && next->type == 0) {
79 next->offset = this->offset; 79 next->offset = this->offset;
80 next->length += this->length; 80 next->length += this->length;
81 region_put(rmm, this); 81 region_put(mm, this);
82 } 82 }
83} 83}
84 84
85int 85int
86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 86nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
87 u32 align, struct nouveau_mm_node **pnode) 87 u32 align, struct nouveau_mm_node **pnode)
88{ 88{
89 struct nouveau_mm_node *prev, *this, *next; 89 struct nouveau_mm_node *prev, *this, *next;
@@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
92 u32 splitoff; 92 u32 splitoff;
93 u32 s, e; 93 u32 s, e;
94 94
95 list_for_each_entry(this, &rmm->free, fl_entry) { 95 list_for_each_entry(this, &mm->free, fl_entry) {
96 e = this->offset + this->length; 96 e = this->offset + this->length;
97 s = this->offset; 97 s = this->offset;
98 98
99 prev = node(this, prev); 99 prev = node(this, prev);
100 if (prev && prev->type != type) 100 if (prev && prev->type != type)
101 s = roundup(s, rmm->block_size); 101 s = roundup(s, mm->block_size);
102 102
103 next = node(this, next); 103 next = node(this, next);
104 if (next && next->type != type) 104 if (next && next->type != type)
105 e = rounddown(e, rmm->block_size); 105 e = rounddown(e, mm->block_size);
106 106
107 s = (s + align_mask) & ~align_mask; 107 s = (s + align_mask) & ~align_mask;
108 e &= ~align_mask; 108 e &= ~align_mask;
@@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
110 continue; 110 continue;
111 111
112 splitoff = s - this->offset; 112 splitoff = s - this->offset;
113 if (splitoff && !region_split(rmm, this, splitoff)) 113 if (splitoff && !region_split(mm, this, splitoff))
114 return -ENOMEM; 114 return -ENOMEM;
115 115
116 this = region_split(rmm, this, min(size, e - s)); 116 this = region_split(mm, this, min(size, e - s));
117 if (!this) 117 if (!this)
118 return -ENOMEM; 118 return -ENOMEM;
119 119
@@ -127,52 +127,49 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
127} 127}
128 128
129int 129int
130nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) 130nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
131{ 131{
132 struct nouveau_mm *rmm; 132 struct nouveau_mm_node *node;
133 struct nouveau_mm_node *heap; 133
134 if (block) {
135 mutex_init(&mm->mutex);
136 INIT_LIST_HEAD(&mm->nodes);
137 INIT_LIST_HEAD(&mm->free);
138 mm->block_size = block;
139 mm->heap_nodes = 0;
140 }
134 141
135 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 142 node = kzalloc(sizeof(*node), GFP_KERNEL);
136 if (!heap) 143 if (!node)
137 return -ENOMEM; 144 return -ENOMEM;
138 heap->offset = roundup(offset, block); 145 node->offset = roundup(offset, mm->block_size);
139 heap->length = rounddown(offset + length, block) - heap->offset; 146 node->length = rounddown(offset + length, mm->block_size) - node->offset;
140 147
141 rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); 148 list_add_tail(&node->nl_entry, &mm->nodes);
142 if (!rmm) { 149 list_add_tail(&node->fl_entry, &mm->free);
143 kfree(heap); 150 mm->heap_nodes++;
144 return -ENOMEM;
145 }
146 rmm->block_size = block;
147 mutex_init(&rmm->mutex);
148 INIT_LIST_HEAD(&rmm->nodes);
149 INIT_LIST_HEAD(&rmm->free);
150 list_add(&heap->nl_entry, &rmm->nodes);
151 list_add(&heap->fl_entry, &rmm->free);
152
153 *prmm = rmm;
154 return 0; 151 return 0;
155} 152}
156 153
157int 154int
158nouveau_mm_fini(struct nouveau_mm **prmm) 155nouveau_mm_fini(struct nouveau_mm *mm)
159{ 156{
160 struct nouveau_mm *rmm = *prmm;
161 struct nouveau_mm_node *node, *heap = 157 struct nouveau_mm_node *node, *heap =
162 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); 158 list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
163 159 int nodes = 0;
164 if (!list_is_singular(&rmm->nodes)) { 160
165 printk(KERN_ERR "nouveau_mm not empty at destroy time!\n"); 161 list_for_each_entry(node, &mm->nodes, nl_entry) {
166 list_for_each_entry(node, &rmm->nodes, nl_entry) { 162 if (nodes++ == mm->heap_nodes) {
167 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n", 163 printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
168 node->type, node->offset, node->length); 164 list_for_each_entry(node, &mm->nodes, nl_entry) {
165 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
166 node->type, node->offset, node->length);
167 }
168 WARN_ON(1);
169 return -EBUSY;
169 } 170 }
170 WARN_ON(1);
171 return -EBUSY;
172 } 171 }
173 172
174 kfree(heap); 173 kfree(heap);
175 kfree(rmm);
176 *prmm = NULL;
177 return 0; 174 return 0;
178} 175}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index b9c016d21553..57a600c35c95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -42,10 +42,11 @@ struct nouveau_mm {
42 struct mutex mutex; 42 struct mutex mutex;
43 43
44 u32 block_size; 44 u32 block_size;
45 int heap_nodes;
45}; 46};
46 47
47int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); 48int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
48int nouveau_mm_fini(struct nouveau_mm **); 49int nouveau_mm_fini(struct nouveau_mm *);
49int nouveau_mm_pre(struct nouveau_mm *); 50int nouveau_mm_pre(struct nouveau_mm *);
50int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, 51int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
51 u32 align, struct nouveau_mm_node **); 52 u32 align, struct nouveau_mm_node **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 159b7c437d3f..02222c540aee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -693,6 +693,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
693static int 693static int
694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) 694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
695{ 695{
696 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
696 struct drm_device *dev = chan->dev; 697 struct drm_device *dev = chan->dev;
697 struct nouveau_gpuobj *pgd = NULL; 698 struct nouveau_gpuobj *pgd = NULL;
698 struct nouveau_vm_pgd *vpgd; 699 struct nouveau_vm_pgd *vpgd;
@@ -722,6 +723,9 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
722 nv_wo32(chan->ramin, 0x020c, 0x000000ff); 723 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
723 724
724 /* map display semaphore buffers into channel's vm */ 725 /* map display semaphore buffers into channel's vm */
726 if (dev_priv->card_type >= NV_D0)
727 return 0;
728
725 for (i = 0; i < 2; i++) { 729 for (i = 0; i < 2; i++) {
726 struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; 730 struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
727 731
@@ -746,7 +750,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
746 int ret, i; 750 int ret, i;
747 751
748 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 752 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
749 if (dev_priv->card_type == NV_C0) 753 if (dev_priv->card_type >= NV_C0)
750 return nvc0_gpuobj_channel_init(chan, vm); 754 return nvc0_gpuobj_channel_init(chan, vm);
751 755
752 /* Allocate a chunk of memory for per-channel object storage */ 756 /* Allocate a chunk of memory for per-channel object storage */
@@ -793,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
793 return ret; 797 return ret;
794 798
795 /* dma objects for display sync channel semaphore blocks */ 799 /* dma objects for display sync channel semaphore blocks */
796 for (i = 0; i < 2; i++) { 800 for (i = 0; i < dev->mode_config.num_crtc; i++) {
797 struct nouveau_gpuobj *sem = NULL; 801 struct nouveau_gpuobj *sem = NULL;
798 struct nv50_display_crtc *dispc = 802 struct nv50_display_crtc *dispc =
799 &nv50_display(dev)->crtc[i]; 803 &nv50_display(dev)->crtc[i];
@@ -875,18 +879,18 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
875 879
876 NV_DEBUG(dev, "ch%d\n", chan->id); 880 NV_DEBUG(dev, "ch%d\n", chan->id);
877 881
878 if (dev_priv->card_type >= NV_50) { 882 if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
879 struct nv50_display *disp = nv50_display(dev); 883 struct nv50_display *disp = nv50_display(dev);
880 884
881 for (i = 0; i < 2; i++) { 885 for (i = 0; i < dev->mode_config.num_crtc; i++) {
882 struct nv50_display_crtc *dispc = &disp->crtc[i]; 886 struct nv50_display_crtc *dispc = &disp->crtc[i];
883 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]); 887 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
884 } 888 }
885
886 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
887 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
888 } 889 }
889 890
891 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
892 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
893
890 if (drm_mm_initialized(&chan->ramin_heap)) 894 if (drm_mm_initialized(&chan->ramin_heap))
891 drm_mm_takedown(&chan->ramin_heap); 895 drm_mm_takedown(&chan->ramin_heap);
892 nouveau_gpuobj_ref(NULL, &chan->ramin); 896 nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ef9dec0e6f8b..9f178aa94162 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -127,13 +127,57 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
127 127
128 entry += ramcfg * recordlen; 128 entry += ramcfg * recordlen;
129 if (entry[1] >= pm->memtimings.nr_timing) { 129 if (entry[1] >= pm->memtimings.nr_timing) {
130 NV_WARN(dev, "timingset %d does not exist\n", entry[1]); 130 if (entry[1] != 0xff)
131 NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
131 return NULL; 132 return NULL;
132 } 133 }
133 134
134 return &pm->memtimings.timing[entry[1]]; 135 return &pm->memtimings.timing[entry[1]];
135} 136}
136 137
138static void
139nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
140 struct nouveau_pm_level *perflvl)
141{
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 struct nvbios *bios = &dev_priv->vbios;
144 u8 *vmap;
145 int id;
146
147 id = perflvl->volt_min;
148 perflvl->volt_min = 0;
149
150 /* boards using voltage table version <0x40 store the voltage
151 * level directly in the perflvl entry as a multiple of 10mV
152 */
153 if (dev_priv->engine.pm.voltage.version < 0x40) {
154 perflvl->volt_min = id * 10000;
155 perflvl->volt_max = perflvl->volt_min;
156 return;
157 }
158
159 /* on newer ones, the perflvl stores an index into yet another
160 * vbios table containing a min/max voltage value for the perflvl
161 */
162 if (P->version != 2 || P->length < 34) {
163 NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
164 P->version, P->length);
165 return;
166 }
167
168 vmap = ROMPTR(bios, P->data[32]);
169 if (!vmap) {
170 NV_DEBUG(dev, "volt map table pointer invalid\n");
171 return;
172 }
173
174 if (id < vmap[3]) {
175 vmap += vmap[1] + (vmap[2] * id);
176 perflvl->volt_min = ROM32(vmap[0]);
177 perflvl->volt_max = ROM32(vmap[4]);
178 }
179}
180
137void 181void
138nouveau_perf_init(struct drm_device *dev) 182nouveau_perf_init(struct drm_device *dev)
139{ 183{
@@ -141,6 +185,8 @@ nouveau_perf_init(struct drm_device *dev)
141 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 185 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
142 struct nvbios *bios = &dev_priv->vbios; 186 struct nvbios *bios = &dev_priv->vbios;
143 struct bit_entry P; 187 struct bit_entry P;
188 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
189 struct nouveau_pm_tbl_header mt_hdr;
144 u8 version, headerlen, recordlen, entries; 190 u8 version, headerlen, recordlen, entries;
145 u8 *perf, *entry; 191 u8 *perf, *entry;
146 int vid, i; 192 int vid, i;
@@ -188,6 +234,22 @@ nouveau_perf_init(struct drm_device *dev)
188 } 234 }
189 235
190 entry = perf + headerlen; 236 entry = perf + headerlen;
237
238 /* For version 0x15, initialize memtiming table */
239 if(version == 0x15) {
240 memtimings->timing =
241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
242 if(!memtimings) {
243 NV_WARN(dev,"Could not allocate memtiming table\n");
244 return;
245 }
246
247 mt_hdr.entry_cnt = entries;
248 mt_hdr.entry_len = 14;
249 mt_hdr.version = version;
250 mt_hdr.header_len = 4;
251 }
252
191 for (i = 0; i < entries; i++) { 253 for (i = 0; i < entries; i++) {
192 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 254 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
193 255
@@ -203,7 +265,8 @@ nouveau_perf_init(struct drm_device *dev)
203 case 0x13: 265 case 0x13:
204 case 0x15: 266 case 0x15:
205 perflvl->fanspeed = entry[55]; 267 perflvl->fanspeed = entry[55];
206 perflvl->voltage = (recordlen > 56) ? entry[56] : 0; 268 if (recordlen > 56)
269 perflvl->volt_min = entry[56];
207 perflvl->core = ROM32(entry[1]) * 10; 270 perflvl->core = ROM32(entry[1]) * 10;
208 perflvl->memory = ROM32(entry[5]) * 20; 271 perflvl->memory = ROM32(entry[5]) * 20;
209 break; 272 break;
@@ -211,9 +274,10 @@ nouveau_perf_init(struct drm_device *dev)
211 case 0x23: 274 case 0x23:
212 case 0x24: 275 case 0x24:
213 perflvl->fanspeed = entry[4]; 276 perflvl->fanspeed = entry[4];
214 perflvl->voltage = entry[5]; 277 perflvl->volt_min = entry[5];
215 perflvl->core = ROM16(entry[6]) * 1000; 278 perflvl->shader = ROM16(entry[6]) * 1000;
216 279 perflvl->core = perflvl->shader;
280 perflvl->core += (signed char)entry[8] * 1000;
217 if (dev_priv->chipset == 0x49 || 281 if (dev_priv->chipset == 0x49 ||
218 dev_priv->chipset == 0x4b) 282 dev_priv->chipset == 0x4b)
219 perflvl->memory = ROM16(entry[11]) * 1000; 283 perflvl->memory = ROM16(entry[11]) * 1000;
@@ -223,7 +287,7 @@ nouveau_perf_init(struct drm_device *dev)
223 break; 287 break;
224 case 0x25: 288 case 0x25:
225 perflvl->fanspeed = entry[4]; 289 perflvl->fanspeed = entry[4];
226 perflvl->voltage = entry[5]; 290 perflvl->volt_min = entry[5];
227 perflvl->core = ROM16(entry[6]) * 1000; 291 perflvl->core = ROM16(entry[6]) * 1000;
228 perflvl->shader = ROM16(entry[10]) * 1000; 292 perflvl->shader = ROM16(entry[10]) * 1000;
229 perflvl->memory = ROM16(entry[12]) * 1000; 293 perflvl->memory = ROM16(entry[12]) * 1000;
@@ -232,7 +296,7 @@ nouveau_perf_init(struct drm_device *dev)
232 perflvl->memscript = ROM16(entry[2]); 296 perflvl->memscript = ROM16(entry[2]);
233 case 0x35: 297 case 0x35:
234 perflvl->fanspeed = entry[6]; 298 perflvl->fanspeed = entry[6];
235 perflvl->voltage = entry[7]; 299 perflvl->volt_min = entry[7];
236 perflvl->core = ROM16(entry[8]) * 1000; 300 perflvl->core = ROM16(entry[8]) * 1000;
237 perflvl->shader = ROM16(entry[10]) * 1000; 301 perflvl->shader = ROM16(entry[10]) * 1000;
238 perflvl->memory = ROM16(entry[12]) * 1000; 302 perflvl->memory = ROM16(entry[12]) * 1000;
@@ -240,30 +304,34 @@ nouveau_perf_init(struct drm_device *dev)
240 perflvl->unk05 = ROM16(entry[16]) * 1000; 304 perflvl->unk05 = ROM16(entry[16]) * 1000;
241 break; 305 break;
242 case 0x40: 306 case 0x40:
243#define subent(n) entry[perf[2] + ((n) * perf[3])] 307#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
244 perflvl->fanspeed = 0; /*XXX*/ 308 perflvl->fanspeed = 0; /*XXX*/
245 perflvl->voltage = entry[2]; 309 perflvl->volt_min = entry[2];
246 if (dev_priv->card_type == NV_50) { 310 if (dev_priv->card_type == NV_50) {
247 perflvl->core = ROM16(subent(0)) & 0xfff; 311 perflvl->core = subent(0);
248 perflvl->shader = ROM16(subent(1)) & 0xfff; 312 perflvl->shader = subent(1);
249 perflvl->memory = ROM16(subent(2)) & 0xfff; 313 perflvl->memory = subent(2);
314 perflvl->vdec = subent(3);
315 perflvl->unka0 = subent(4);
250 } else { 316 } else {
251 perflvl->shader = ROM16(subent(3)) & 0xfff; 317 perflvl->hub06 = subent(0);
318 perflvl->hub01 = subent(1);
319 perflvl->copy = subent(2);
320 perflvl->shader = subent(3);
321 perflvl->rop = subent(4);
322 perflvl->memory = subent(5);
323 perflvl->vdec = subent(6);
324 perflvl->daemon = subent(10);
325 perflvl->hub07 = subent(11);
252 perflvl->core = perflvl->shader / 2; 326 perflvl->core = perflvl->shader / 2;
253 perflvl->unk0a = ROM16(subent(4)) & 0xfff;
254 perflvl->memory = ROM16(subent(5)) & 0xfff;
255 } 327 }
256
257 perflvl->core *= 1000;
258 perflvl->shader *= 1000;
259 perflvl->memory *= 1000;
260 perflvl->unk0a *= 1000;
261 break; 328 break;
262 } 329 }
263 330
264 /* make sure vid is valid */ 331 /* make sure vid is valid */
265 if (pm->voltage.supported && perflvl->voltage) { 332 nouveau_perf_voltage(dev, &P, perflvl);
266 vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); 333 if (pm->voltage.supported && perflvl->volt_min) {
334 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
267 if (vid < 0) { 335 if (vid < 0) {
268 NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); 336 NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
269 entry += recordlen; 337 entry += recordlen;
@@ -272,7 +340,11 @@ nouveau_perf_init(struct drm_device *dev)
272 } 340 }
273 341
274 /* get the corresponding memory timings */ 342 /* get the corresponding memory timings */
275 if (version > 0x15) { 343 if (version == 0x15) {
344 memtimings->timing[i].id = i;
345 nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]);
346 perflvl->timing = &memtimings->timing[i];
347 } else if (version > 0x15) {
276 /* last 3 args are for < 0x40, ignored for >= 0x40 */ 348 /* last 3 args are for < 0x40, ignored for >= 0x40 */
277 perflvl->timing = 349 perflvl->timing =
278 nouveau_perf_timing(dev, &P, 350 nouveau_perf_timing(dev, &P,
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index da8d994d5e8a..a539fd257921 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -64,18 +64,26 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
64 if (perflvl == pm->cur) 64 if (perflvl == pm->cur)
65 return 0; 65 return 0;
66 66
67 if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) { 67 if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
68 ret = pm->voltage_set(dev, perflvl->voltage); 68 ret = pm->voltage_set(dev, perflvl->volt_min);
69 if (ret) { 69 if (ret) {
70 NV_ERROR(dev, "voltage_set %d failed: %d\n", 70 NV_ERROR(dev, "voltage_set %d failed: %d\n",
71 perflvl->voltage, ret); 71 perflvl->volt_min, ret);
72 } 72 }
73 } 73 }
74 74
75 nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); 75 if (pm->clocks_pre) {
76 nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader); 76 void *state = pm->clocks_pre(dev, perflvl);
77 nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory); 77 if (IS_ERR(state))
78 nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05); 78 return PTR_ERR(state);
79 pm->clocks_set(dev, state);
80 } else
81 if (pm->clock_set) {
82 nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
83 nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
84 nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
85 nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
86 }
79 87
80 pm->cur = perflvl; 88 pm->cur = perflvl;
81 return 0; 89 return 0;
@@ -92,9 +100,6 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
92 if (nouveau_perflvl_wr != 7777) 100 if (nouveau_perflvl_wr != 7777)
93 return -EPERM; 101 return -EPERM;
94 102
95 if (!pm->clock_set)
96 return -EINVAL;
97
98 if (!strncmp(profile, "boot", 4)) 103 if (!strncmp(profile, "boot", 4))
99 perflvl = &pm->boot; 104 perflvl = &pm->boot;
100 else { 105 else {
@@ -123,31 +128,37 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
123 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 128 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
124 int ret; 129 int ret;
125 130
126 if (!pm->clock_get)
127 return -EINVAL;
128
129 memset(perflvl, 0, sizeof(*perflvl)); 131 memset(perflvl, 0, sizeof(*perflvl));
130 132
131 ret = pm->clock_get(dev, PLL_CORE); 133 if (pm->clocks_get) {
132 if (ret > 0) 134 ret = pm->clocks_get(dev, perflvl);
133 perflvl->core = ret; 135 if (ret)
136 return ret;
137 } else
138 if (pm->clock_get) {
139 ret = pm->clock_get(dev, PLL_CORE);
140 if (ret > 0)
141 perflvl->core = ret;
134 142
135 ret = pm->clock_get(dev, PLL_MEMORY); 143 ret = pm->clock_get(dev, PLL_MEMORY);
136 if (ret > 0) 144 if (ret > 0)
137 perflvl->memory = ret; 145 perflvl->memory = ret;
138 146
139 ret = pm->clock_get(dev, PLL_SHADER); 147 ret = pm->clock_get(dev, PLL_SHADER);
140 if (ret > 0) 148 if (ret > 0)
141 perflvl->shader = ret; 149 perflvl->shader = ret;
142 150
143 ret = pm->clock_get(dev, PLL_UNK05); 151 ret = pm->clock_get(dev, PLL_UNK05);
144 if (ret > 0) 152 if (ret > 0)
145 perflvl->unk05 = ret; 153 perflvl->unk05 = ret;
154 }
146 155
147 if (pm->voltage.supported && pm->voltage_get) { 156 if (pm->voltage.supported && pm->voltage_get) {
148 ret = pm->voltage_get(dev); 157 ret = pm->voltage_get(dev);
149 if (ret > 0) 158 if (ret > 0) {
150 perflvl->voltage = ret; 159 perflvl->volt_min = ret;
160 perflvl->volt_max = ret;
161 }
151 } 162 }
152 163
153 return 0; 164 return 0;
@@ -156,7 +167,7 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
156static void 167static void
157nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) 168nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
158{ 169{
159 char c[16], s[16], v[16], f[16], t[16]; 170 char c[16], s[16], v[32], f[16], t[16], m[16];
160 171
161 c[0] = '\0'; 172 c[0] = '\0';
162 if (perflvl->core) 173 if (perflvl->core)
@@ -166,9 +177,19 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
166 if (perflvl->shader) 177 if (perflvl->shader)
167 snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); 178 snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
168 179
180 m[0] = '\0';
181 if (perflvl->memory)
182 snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
183
169 v[0] = '\0'; 184 v[0] = '\0';
170 if (perflvl->voltage) 185 if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
171 snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10); 186 snprintf(v, sizeof(v), " voltage %dmV-%dmV",
187 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
188 } else
189 if (perflvl->volt_min) {
190 snprintf(v, sizeof(v), " voltage %dmV",
191 perflvl->volt_min / 1000);
192 }
172 193
173 f[0] = '\0'; 194 f[0] = '\0';
174 if (perflvl->fanspeed) 195 if (perflvl->fanspeed)
@@ -178,8 +199,7 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
178 if (perflvl->timing) 199 if (perflvl->timing)
179 snprintf(t, sizeof(t), " timing %d", perflvl->timing->id); 200 snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
180 201
181 snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000, 202 snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f);
182 c, s, v, f, t);
183} 203}
184 204
185static ssize_t 205static ssize_t
@@ -190,7 +210,7 @@ nouveau_pm_get_perflvl_info(struct device *d,
190 char *ptr = buf; 210 char *ptr = buf;
191 int len = PAGE_SIZE; 211 int len = PAGE_SIZE;
192 212
193 snprintf(ptr, len, "%d: ", perflvl->id); 213 snprintf(ptr, len, "%d:", perflvl->id);
194 ptr += strlen(buf); 214 ptr += strlen(buf);
195 len -= strlen(buf); 215 len -= strlen(buf);
196 216
@@ -211,9 +231,9 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
211 if (!pm->cur) 231 if (!pm->cur)
212 snprintf(ptr, len, "setting: boot\n"); 232 snprintf(ptr, len, "setting: boot\n");
213 else if (pm->cur == &pm->boot) 233 else if (pm->cur == &pm->boot)
214 snprintf(ptr, len, "setting: boot\nc: "); 234 snprintf(ptr, len, "setting: boot\nc:");
215 else 235 else
216 snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id); 236 snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id);
217 ptr += strlen(buf); 237 ptr += strlen(buf);
218 len -= strlen(buf); 238 len -= strlen(buf);
219 239
@@ -292,7 +312,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
292 } 312 }
293} 313}
294 314
295#ifdef CONFIG_HWMON 315#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
296static ssize_t 316static ssize_t
297nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 317nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
298{ 318{
@@ -409,7 +429,7 @@ static const struct attribute_group hwmon_attrgroup = {
409static int 429static int
410nouveau_hwmon_init(struct drm_device *dev) 430nouveau_hwmon_init(struct drm_device *dev)
411{ 431{
412#ifdef CONFIG_HWMON 432#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
413 struct drm_nouveau_private *dev_priv = dev->dev_private; 433 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 434 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
415 struct device *hwmon_dev; 435 struct device *hwmon_dev;
@@ -442,7 +462,7 @@ nouveau_hwmon_init(struct drm_device *dev)
442static void 462static void
443nouveau_hwmon_fini(struct drm_device *dev) 463nouveau_hwmon_fini(struct drm_device *dev)
444{ 464{
445#ifdef CONFIG_HWMON 465#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
446 struct drm_nouveau_private *dev_priv = dev->dev_private; 466 struct drm_nouveau_private *dev_priv = dev->dev_private;
447 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 467 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
448 468
@@ -488,7 +508,7 @@ nouveau_pm_init(struct drm_device *dev)
488 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); 508 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
489 for (i = 0; i < pm->nr_perflvl; i++) { 509 for (i = 0; i < pm->nr_perflvl; i++) {
490 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); 510 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
491 NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info); 511 NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
492 } 512 }
493 513
494 /* determine current ("boot") performance level */ 514 /* determine current ("boot") performance level */
@@ -498,7 +518,7 @@ nouveau_pm_init(struct drm_device *dev)
498 pm->cur = &pm->boot; 518 pm->cur = &pm->boot;
499 519
500 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); 520 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
501 NV_INFO(dev, "c: %s", info); 521 NV_INFO(dev, "c:%s", info);
502 } 522 }
503 523
504 /* switch performance levels now if requested */ 524 /* switch performance levels now if requested */
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 4a9838ddacec..8ac02cdd03a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -52,6 +52,11 @@ void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
52 u32 id, int khz); 52 u32 id, int khz);
53void nv04_pm_clock_set(struct drm_device *, void *); 53void nv04_pm_clock_set(struct drm_device *, void *);
54 54
55/* nv40_pm.c */
56int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
57void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
58void nv40_pm_clocks_set(struct drm_device *, void *);
59
55/* nv50_pm.c */ 60/* nv50_pm.c */
56int nv50_pm_clock_get(struct drm_device *, u32 id); 61int nv50_pm_clock_get(struct drm_device *, u32 id);
57void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, 62void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
@@ -59,10 +64,12 @@ void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
59void nv50_pm_clock_set(struct drm_device *, void *); 64void nv50_pm_clock_set(struct drm_device *, void *);
60 65
61/* nva3_pm.c */ 66/* nva3_pm.c */
62int nva3_pm_clock_get(struct drm_device *, u32 id); 67int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
63void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, 68void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
64 u32 id, int khz); 69void nva3_pm_clocks_set(struct drm_device *, void *);
65void nva3_pm_clock_set(struct drm_device *, void *); 70
71/* nvc0_pm.c */
72int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
66 73
67/* nouveau_temp.c */ 74/* nouveau_temp.c */
68void nouveau_temp_init(struct drm_device *dev); 75void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index f18cdfc3400f..43a96b99e180 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,9 +826,12 @@
826#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000 826#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
827#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000 827#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
828#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000 828#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
829#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084 829#define NV50_PDISP_SOR_PWM_DIV(i) (0x0061c080 + (i) * 0x800)
830#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 830#define NV50_PDISP_SOR_PWM_CTL(i) (0x0061c084 + (i) * 0x800)
831#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff 831#define NV50_PDISP_SOR_PWM_CTL_NEW 0x80000000
832#define NVA3_PDISP_SOR_PWM_CTL_UNK 0x40000000
833#define NV50_PDISP_SOR_PWM_CTL_VAL 0x000007ff
834#define NVA3_PDISP_SOR_PWM_CTL_VAL 0x00ffffff
832#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) 835#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
833#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 836#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
834#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 837#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
@@ -843,7 +846,7 @@
843#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 846#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
844#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) 847#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
845#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) 848#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
846#define NV50_SOR_DP_UNK128(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) 849#define NV50_SOR_DP_SCFG(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
847#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) 850#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
848 851
849#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) 852#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 2706cb3d871a..b75258a9fe44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,8 +12,8 @@ struct nouveau_sgdma_be {
12 struct drm_device *dev; 12 struct drm_device *dev;
13 13
14 dma_addr_t *pages; 14 dma_addr_t *pages;
15 bool *ttm_alloced;
16 unsigned nr_pages; 15 unsigned nr_pages;
16 bool unmap_pages;
17 17
18 u64 offset; 18 u64 offset;
19 bool bound; 19 bool bound;
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
26{ 26{
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 struct drm_device *dev = nvbe->dev; 28 struct drm_device *dev = nvbe->dev;
29 int i;
29 30
30 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); 31 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
31 32
32 if (nvbe->pages) 33 nvbe->pages = dma_addrs;
33 return -EINVAL; 34 nvbe->nr_pages = num_pages;
34 35 nvbe->unmap_pages = true;
35 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36 if (!nvbe->pages)
37 return -ENOMEM;
38 36
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); 37 /* this code path isn't called and is incorrect anyways */
40 if (!nvbe->ttm_alloced) { 38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
41 kfree(nvbe->pages); 39 nvbe->unmap_pages = false;
42 nvbe->pages = NULL; 40 return 0;
43 return -ENOMEM;
44 } 41 }
45 42
46 nvbe->nr_pages = 0; 43 for (i = 0; i < num_pages; i++) {
47 while (num_pages--) { 44 nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
48 /* this code path isn't called and is incorrect anyways */ 45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
49 if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ 46 if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
50 nvbe->pages[nvbe->nr_pages] = 47 nvbe->nr_pages = --i;
51 dma_addrs[nvbe->nr_pages]; 48 be->func->clear(be);
52 nvbe->ttm_alloced[nvbe->nr_pages] = true; 49 return -EFAULT;
53 } else {
54 nvbe->pages[nvbe->nr_pages] =
55 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
56 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
57 if (pci_dma_mapping_error(dev->pdev,
58 nvbe->pages[nvbe->nr_pages])) {
59 be->func->clear(be);
60 return -EFAULT;
61 }
62 nvbe->ttm_alloced[nvbe->nr_pages] = false;
63 } 50 }
64
65 nvbe->nr_pages++;
66 } 51 }
67 52
68 return 0; 53 return 0;
@@ -72,25 +57,16 @@ static void
72nouveau_sgdma_clear(struct ttm_backend *be) 57nouveau_sgdma_clear(struct ttm_backend *be)
73{ 58{
74 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
75 struct drm_device *dev; 60 struct drm_device *dev = nvbe->dev;
76
77 if (nvbe && nvbe->pages) {
78 dev = nvbe->dev;
79 NV_DEBUG(dev, "\n");
80 61
81 if (nvbe->bound) 62 if (nvbe->bound)
82 be->func->unbind(be); 63 be->func->unbind(be);
83 64
65 if (nvbe->unmap_pages) {
84 while (nvbe->nr_pages--) { 66 while (nvbe->nr_pages--) {
85 if (!nvbe->ttm_alloced[nvbe->nr_pages]) 67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
86 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
87 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
88 } 69 }
89 kfree(nvbe->pages);
90 kfree(nvbe->ttm_alloced);
91 nvbe->pages = NULL;
92 nvbe->ttm_alloced = NULL;
93 nvbe->nr_pages = 0;
94 } 70 }
95} 71}
96 72
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 10656e430b44..82478e0998e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -286,9 +286,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
286 engine->gpio.get = nv10_gpio_get; 286 engine->gpio.get = nv10_gpio_get;
287 engine->gpio.set = nv10_gpio_set; 287 engine->gpio.set = nv10_gpio_set;
288 engine->gpio.irq_enable = NULL; 288 engine->gpio.irq_enable = NULL;
289 engine->pm.clock_get = nv04_pm_clock_get; 289 engine->pm.clocks_get = nv40_pm_clocks_get;
290 engine->pm.clock_pre = nv04_pm_clock_pre; 290 engine->pm.clocks_pre = nv40_pm_clocks_pre;
291 engine->pm.clock_set = nv04_pm_clock_set; 291 engine->pm.clocks_set = nv40_pm_clocks_set;
292 engine->pm.voltage_get = nouveau_voltage_gpio_get; 292 engine->pm.voltage_get = nouveau_voltage_gpio_get;
293 engine->pm.voltage_set = nouveau_voltage_gpio_set; 293 engine->pm.voltage_set = nouveau_voltage_gpio_set;
294 engine->pm.temp_get = nv40_temp_get; 294 engine->pm.temp_get = nv40_temp_get;
@@ -299,7 +299,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
299 case 0x50: 299 case 0x50:
300 case 0x80: /* gotta love NVIDIA's consistency.. */ 300 case 0x80: /* gotta love NVIDIA's consistency.. */
301 case 0x90: 301 case 0x90:
302 case 0xA0: 302 case 0xa0:
303 engine->instmem.init = nv50_instmem_init; 303 engine->instmem.init = nv50_instmem_init;
304 engine->instmem.takedown = nv50_instmem_takedown; 304 engine->instmem.takedown = nv50_instmem_takedown;
305 engine->instmem.suspend = nv50_instmem_suspend; 305 engine->instmem.suspend = nv50_instmem_suspend;
@@ -359,9 +359,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
359 engine->pm.clock_set = nv50_pm_clock_set; 359 engine->pm.clock_set = nv50_pm_clock_set;
360 break; 360 break;
361 default: 361 default:
362 engine->pm.clock_get = nva3_pm_clock_get; 362 engine->pm.clocks_get = nva3_pm_clocks_get;
363 engine->pm.clock_pre = nva3_pm_clock_pre; 363 engine->pm.clocks_pre = nva3_pm_clocks_pre;
364 engine->pm.clock_set = nva3_pm_clock_set; 364 engine->pm.clocks_set = nva3_pm_clocks_set;
365 break; 365 break;
366 } 366 }
367 engine->pm.voltage_get = nouveau_voltage_gpio_get; 367 engine->pm.voltage_get = nouveau_voltage_gpio_get;
@@ -376,7 +376,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
376 engine->vram.put = nv50_vram_del; 376 engine->vram.put = nv50_vram_del;
377 engine->vram.flags_valid = nv50_vram_flags_valid; 377 engine->vram.flags_valid = nv50_vram_flags_valid;
378 break; 378 break;
379 case 0xC0: 379 case 0xc0:
380 engine->instmem.init = nvc0_instmem_init; 380 engine->instmem.init = nvc0_instmem_init;
381 engine->instmem.takedown = nvc0_instmem_takedown; 381 engine->instmem.takedown = nvc0_instmem_takedown;
382 engine->instmem.suspend = nvc0_instmem_suspend; 382 engine->instmem.suspend = nvc0_instmem_suspend;
@@ -422,12 +422,73 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
422 engine->vram.put = nv50_vram_del; 422 engine->vram.put = nv50_vram_del;
423 engine->vram.flags_valid = nvc0_vram_flags_valid; 423 engine->vram.flags_valid = nvc0_vram_flags_valid;
424 engine->pm.temp_get = nv84_temp_get; 424 engine->pm.temp_get = nv84_temp_get;
425 engine->pm.clocks_get = nvc0_pm_clocks_get;
426 engine->pm.voltage_get = nouveau_voltage_gpio_get;
427 engine->pm.voltage_set = nouveau_voltage_gpio_set;
428 break;
429 case 0xd0:
430 engine->instmem.init = nvc0_instmem_init;
431 engine->instmem.takedown = nvc0_instmem_takedown;
432 engine->instmem.suspend = nvc0_instmem_suspend;
433 engine->instmem.resume = nvc0_instmem_resume;
434 engine->instmem.get = nv50_instmem_get;
435 engine->instmem.put = nv50_instmem_put;
436 engine->instmem.map = nv50_instmem_map;
437 engine->instmem.unmap = nv50_instmem_unmap;
438 engine->instmem.flush = nv84_instmem_flush;
439 engine->mc.init = nv50_mc_init;
440 engine->mc.takedown = nv50_mc_takedown;
441 engine->timer.init = nv04_timer_init;
442 engine->timer.read = nv04_timer_read;
443 engine->timer.takedown = nv04_timer_takedown;
444 engine->fb.init = nvc0_fb_init;
445 engine->fb.takedown = nvc0_fb_takedown;
446 engine->fifo.channels = 128;
447 engine->fifo.init = nvc0_fifo_init;
448 engine->fifo.takedown = nvc0_fifo_takedown;
449 engine->fifo.disable = nvc0_fifo_disable;
450 engine->fifo.enable = nvc0_fifo_enable;
451 engine->fifo.reassign = nvc0_fifo_reassign;
452 engine->fifo.channel_id = nvc0_fifo_channel_id;
453 engine->fifo.create_context = nvc0_fifo_create_context;
454 engine->fifo.destroy_context = nvc0_fifo_destroy_context;
455 engine->fifo.load_context = nvc0_fifo_load_context;
456 engine->fifo.unload_context = nvc0_fifo_unload_context;
457 engine->display.early_init = nouveau_stub_init;
458 engine->display.late_takedown = nouveau_stub_takedown;
459 engine->display.create = nvd0_display_create;
460 engine->display.init = nvd0_display_init;
461 engine->display.destroy = nvd0_display_destroy;
462 engine->gpio.init = nv50_gpio_init;
463 engine->gpio.takedown = nouveau_stub_takedown;
464 engine->gpio.get = nvd0_gpio_get;
465 engine->gpio.set = nvd0_gpio_set;
466 engine->gpio.irq_register = nv50_gpio_irq_register;
467 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
468 engine->gpio.irq_enable = nv50_gpio_irq_enable;
469 engine->vram.init = nvc0_vram_init;
470 engine->vram.takedown = nv50_vram_fini;
471 engine->vram.get = nvc0_vram_new;
472 engine->vram.put = nv50_vram_del;
473 engine->vram.flags_valid = nvc0_vram_flags_valid;
474 engine->pm.clocks_get = nvc0_pm_clocks_get;
475 engine->pm.voltage_get = nouveau_voltage_gpio_get;
476 engine->pm.voltage_set = nouveau_voltage_gpio_set;
425 break; 477 break;
426 default: 478 default:
427 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); 479 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
428 return 1; 480 return 1;
429 } 481 }
430 482
483 /* headless mode */
484 if (nouveau_modeset == 2) {
485 engine->display.early_init = nouveau_stub_init;
486 engine->display.late_takedown = nouveau_stub_takedown;
487 engine->display.create = nouveau_stub_init;
488 engine->display.init = nouveau_stub_init;
489 engine->display.destroy = nouveau_stub_takedown;
490 }
491
431 return 0; 492 return 0;
432} 493}
433 494
@@ -449,21 +510,6 @@ nouveau_vga_set_decode(void *priv, bool state)
449 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 510 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
450} 511}
451 512
452static int
453nouveau_card_init_channel(struct drm_device *dev)
454{
455 struct drm_nouveau_private *dev_priv = dev->dev_private;
456 int ret;
457
458 ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
459 NvDmaFB, NvDmaTT);
460 if (ret)
461 return ret;
462
463 mutex_unlock(&dev_priv->channel->mutex);
464 return 0;
465}
466
467static void nouveau_switcheroo_set_state(struct pci_dev *pdev, 513static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
468 enum vga_switcheroo_state state) 514 enum vga_switcheroo_state state)
469{ 515{
@@ -630,8 +676,11 @@ nouveau_card_init(struct drm_device *dev)
630 break; 676 break;
631 } 677 }
632 678
633 if (dev_priv->card_type == NV_40) 679 if (dev_priv->card_type == NV_40 ||
634 nv40_mpeg_create(dev); 680 dev_priv->chipset == 0x31 ||
681 dev_priv->chipset == 0x34 ||
682 dev_priv->chipset == 0x36)
683 nv31_mpeg_create(dev);
635 else 684 else
636 if (dev_priv->card_type == NV_50 && 685 if (dev_priv->card_type == NV_50 &&
637 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) 686 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
@@ -651,41 +700,69 @@ nouveau_card_init(struct drm_device *dev)
651 goto out_engine; 700 goto out_engine;
652 } 701 }
653 702
654 ret = engine->display.create(dev); 703 ret = nouveau_irq_init(dev);
655 if (ret) 704 if (ret)
656 goto out_fifo; 705 goto out_fifo;
657 706
658 ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); 707 /* initialise general modesetting */
659 if (ret) 708 drm_mode_config_init(dev);
660 goto out_vblank; 709 drm_mode_create_scaling_mode_property(dev);
710 drm_mode_create_dithering_property(dev);
711 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
712 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
713 dev->mode_config.min_width = 0;
714 dev->mode_config.min_height = 0;
715 if (dev_priv->card_type < NV_10) {
716 dev->mode_config.max_width = 2048;
717 dev->mode_config.max_height = 2048;
718 } else
719 if (dev_priv->card_type < NV_50) {
720 dev->mode_config.max_width = 4096;
721 dev->mode_config.max_height = 4096;
722 } else {
723 dev->mode_config.max_width = 8192;
724 dev->mode_config.max_height = 8192;
725 }
661 726
662 ret = nouveau_irq_init(dev); 727 ret = engine->display.create(dev);
663 if (ret) 728 if (ret)
664 goto out_vblank; 729 goto out_irq;
665 730
666 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 731 nouveau_backlight_init(dev);
667 732
668 if (dev_priv->eng[NVOBJ_ENGINE_GR]) { 733 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
669 ret = nouveau_fence_init(dev); 734 ret = nouveau_fence_init(dev);
670 if (ret) 735 if (ret)
671 goto out_irq; 736 goto out_disp;
672 737
673 ret = nouveau_card_init_channel(dev); 738 ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
739 NvDmaFB, NvDmaTT);
674 if (ret) 740 if (ret)
675 goto out_fence; 741 goto out_fence;
742
743 mutex_unlock(&dev_priv->channel->mutex);
744 }
745
746 if (dev->mode_config.num_crtc) {
747 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
748 if (ret)
749 goto out_chan;
750
751 nouveau_fbcon_init(dev);
752 drm_kms_helper_poll_init(dev);
676 } 753 }
677 754
678 nouveau_fbcon_init(dev);
679 drm_kms_helper_poll_init(dev);
680 return 0; 755 return 0;
681 756
757out_chan:
758 nouveau_channel_put_unlocked(&dev_priv->channel);
682out_fence: 759out_fence:
683 nouveau_fence_fini(dev); 760 nouveau_fence_fini(dev);
761out_disp:
762 nouveau_backlight_exit(dev);
763 engine->display.destroy(dev);
684out_irq: 764out_irq:
685 nouveau_irq_fini(dev); 765 nouveau_irq_fini(dev);
686out_vblank:
687 drm_vblank_cleanup(dev);
688 engine->display.destroy(dev);
689out_fifo: 766out_fifo:
690 if (!dev_priv->noaccel) 767 if (!dev_priv->noaccel)
691 engine->fifo.takedown(dev); 768 engine->fifo.takedown(dev);
@@ -732,15 +809,20 @@ static void nouveau_card_takedown(struct drm_device *dev)
732 struct nouveau_engine *engine = &dev_priv->engine; 809 struct nouveau_engine *engine = &dev_priv->engine;
733 int e; 810 int e;
734 811
735 drm_kms_helper_poll_fini(dev); 812 if (dev->mode_config.num_crtc) {
736 nouveau_fbcon_fini(dev); 813 drm_kms_helper_poll_fini(dev);
814 nouveau_fbcon_fini(dev);
815 drm_vblank_cleanup(dev);
816 }
737 817
738 if (dev_priv->channel) { 818 if (dev_priv->channel) {
739 nouveau_channel_put_unlocked(&dev_priv->channel); 819 nouveau_channel_put_unlocked(&dev_priv->channel);
740 nouveau_fence_fini(dev); 820 nouveau_fence_fini(dev);
741 } 821 }
742 822
823 nouveau_backlight_exit(dev);
743 engine->display.destroy(dev); 824 engine->display.destroy(dev);
825 drm_mode_config_cleanup(dev);
744 826
745 if (!dev_priv->noaccel) { 827 if (!dev_priv->noaccel) {
746 engine->fifo.takedown(dev); 828 engine->fifo.takedown(dev);
@@ -774,7 +856,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
774 engine->vram.takedown(dev); 856 engine->vram.takedown(dev);
775 857
776 nouveau_irq_fini(dev); 858 nouveau_irq_fini(dev);
777 drm_vblank_cleanup(dev);
778 859
779 nouveau_pm_fini(dev); 860 nouveau_pm_fini(dev);
780 nouveau_bios_takedown(dev); 861 nouveau_bios_takedown(dev);
@@ -907,7 +988,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
907int nouveau_load(struct drm_device *dev, unsigned long flags) 988int nouveau_load(struct drm_device *dev, unsigned long flags)
908{ 989{
909 struct drm_nouveau_private *dev_priv; 990 struct drm_nouveau_private *dev_priv;
910 uint32_t reg0; 991 uint32_t reg0, strap;
911 resource_size_t mmio_start_offs; 992 resource_size_t mmio_start_offs;
912 int ret; 993 int ret;
913 994
@@ -951,13 +1032,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
951 1032
952 /* Time to determine the card architecture */ 1033 /* Time to determine the card architecture */
953 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); 1034 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
954 dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */
955 1035
956 /* We're dealing with >=NV10 */ 1036 /* We're dealing with >=NV10 */
957 if ((reg0 & 0x0f000000) > 0) { 1037 if ((reg0 & 0x0f000000) > 0) {
958 /* Bit 27-20 contain the architecture in hex */ 1038 /* Bit 27-20 contain the architecture in hex */
959 dev_priv->chipset = (reg0 & 0xff00000) >> 20; 1039 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
960 dev_priv->stepping = (reg0 & 0xff);
961 /* NV04 or NV05 */ 1040 /* NV04 or NV05 */
962 } else if ((reg0 & 0xff00fff0) == 0x20004000) { 1041 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
963 if (reg0 & 0x00f00000) 1042 if (reg0 & 0x00f00000)
@@ -987,6 +1066,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
987 case 0xc0: 1066 case 0xc0:
988 dev_priv->card_type = NV_C0; 1067 dev_priv->card_type = NV_C0;
989 break; 1068 break;
1069 case 0xd0:
1070 dev_priv->card_type = NV_D0;
1071 break;
990 default: 1072 default:
991 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0); 1073 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
992 ret = -EINVAL; 1074 ret = -EINVAL;
@@ -996,6 +1078,23 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
996 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", 1078 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
997 dev_priv->card_type, reg0); 1079 dev_priv->card_type, reg0);
998 1080
1081 /* determine frequency of timing crystal */
1082 strap = nv_rd32(dev, 0x101000);
1083 if ( dev_priv->chipset < 0x17 ||
1084 (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
1085 strap &= 0x00000040;
1086 else
1087 strap &= 0x00400040;
1088
1089 switch (strap) {
1090 case 0x00000000: dev_priv->crystal = 13500; break;
1091 case 0x00000040: dev_priv->crystal = 14318; break;
1092 case 0x00400000: dev_priv->crystal = 27000; break;
1093 case 0x00400040: dev_priv->crystal = 25000; break;
1094 }
1095
1096 NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
1097
999 /* Determine whether we'll attempt acceleration or not, some 1098 /* Determine whether we'll attempt acceleration or not, some
1000 * cards are disabled by default here due to them being known 1099 * cards are disabled by default here due to them being known
1001 * non-functional, or never been tested due to lack of hw. 1100 * non-functional, or never been tested due to lack of hw.
@@ -1030,7 +1129,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1030 ioremap(pci_resource_start(dev->pdev, ramin_bar), 1129 ioremap(pci_resource_start(dev->pdev, ramin_bar),
1031 dev_priv->ramin_size); 1130 dev_priv->ramin_size);
1032 if (!dev_priv->ramin) { 1131 if (!dev_priv->ramin) {
1033 NV_ERROR(dev, "Failed to PRAMIN BAR"); 1132 NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
1034 ret = -ENOMEM; 1133 ret = -ENOMEM;
1035 goto err_mmio; 1134 goto err_mmio;
1036 } 1135 }
@@ -1130,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1130 getparam->value = 1; 1229 getparam->value = 1;
1131 break; 1230 break;
1132 case NOUVEAU_GETPARAM_HAS_PAGEFLIP: 1231 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
1133 getparam->value = 1; 1232 getparam->value = dev_priv->card_type < NV_D0;
1134 break; 1233 break;
1135 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1234 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1136 /* NV40 and NV50 versions are quite different, but register 1235 /* NV40 and NV50 versions are quite different, but register
@@ -1198,6 +1297,23 @@ nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
1198 return false; 1297 return false;
1199} 1298}
1200 1299
1300/* Wait until cond(data) == true, up until timeout has hit */
1301bool
1302nouveau_wait_cb(struct drm_device *dev, u64 timeout,
1303 bool (*cond)(void *), void *data)
1304{
1305 struct drm_nouveau_private *dev_priv = dev->dev_private;
1306 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1307 u64 start = ptimer->read(dev);
1308
1309 do {
1310 if (cond(data) == true)
1311 return true;
1312 } while (ptimer->read(dev) - start < timeout);
1313
1314 return false;
1315}
1316
1201/* Waits for PGRAPH to go completely idle */ 1317/* Waits for PGRAPH to go completely idle */
1202bool nouveau_wait_for_idle(struct drm_device *dev) 1318bool nouveau_wait_for_idle(struct drm_device *dev)
1203{ 1319{
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 244fd38fdb84..ef0832b29ad2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
172 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 172 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
173 } 173 }
174 174
175 mutex_unlock(&vm->mm->mutex); 175 mutex_unlock(&vm->mm.mutex);
176 nouveau_gpuobj_ref(NULL, &pgt); 176 nouveau_gpuobj_ref(NULL, &pgt);
177 mutex_lock(&vm->mm->mutex); 177 mutex_lock(&vm->mm.mutex);
178 } 178 }
179} 179}
180 180
@@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
191 pgt_size = (1 << (vm->pgt_bits + 12)) >> type; 191 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
192 pgt_size *= 8; 192 pgt_size *= 8;
193 193
194 mutex_unlock(&vm->mm->mutex); 194 mutex_unlock(&vm->mm.mutex);
195 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, 195 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
196 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 196 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
197 mutex_lock(&vm->mm->mutex); 197 mutex_lock(&vm->mm.mutex);
198 if (unlikely(ret)) 198 if (unlikely(ret))
199 return ret; 199 return ret;
200 200
201 /* someone beat us to filling the PDE while we didn't have the lock */ 201 /* someone beat us to filling the PDE while we didn't have the lock */
202 if (unlikely(vpgt->refcount[big]++)) { 202 if (unlikely(vpgt->refcount[big]++)) {
203 mutex_unlock(&vm->mm->mutex); 203 mutex_unlock(&vm->mm.mutex);
204 nouveau_gpuobj_ref(NULL, &pgt); 204 nouveau_gpuobj_ref(NULL, &pgt);
205 mutex_lock(&vm->mm->mutex); 205 mutex_lock(&vm->mm.mutex);
206 return 0; 206 return 0;
207 } 207 }
208 208
@@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
223 u32 fpde, lpde, pde; 223 u32 fpde, lpde, pde;
224 int ret; 224 int ret;
225 225
226 mutex_lock(&vm->mm->mutex); 226 mutex_lock(&vm->mm.mutex);
227 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); 227 ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
228 if (unlikely(ret != 0)) { 228 if (unlikely(ret != 0)) {
229 mutex_unlock(&vm->mm->mutex); 229 mutex_unlock(&vm->mm.mutex);
230 return ret; 230 return ret;
231 } 231 }
232 232
@@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
245 if (ret) { 245 if (ret) {
246 if (pde != fpde) 246 if (pde != fpde)
247 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); 247 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
248 nouveau_mm_put(vm->mm, vma->node); 248 nouveau_mm_put(&vm->mm, vma->node);
249 mutex_unlock(&vm->mm->mutex); 249 mutex_unlock(&vm->mm.mutex);
250 vma->node = NULL; 250 vma->node = NULL;
251 return ret; 251 return ret;
252 } 252 }
253 } 253 }
254 mutex_unlock(&vm->mm->mutex); 254 mutex_unlock(&vm->mm.mutex);
255 255
256 vma->vm = vm; 256 vma->vm = vm;
257 vma->offset = (u64)vma->node->offset << 12; 257 vma->offset = (u64)vma->node->offset << 12;
@@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma)
270 fpde = (vma->node->offset >> vm->pgt_bits); 270 fpde = (vma->node->offset >> vm->pgt_bits);
271 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 271 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
272 272
273 mutex_lock(&vm->mm->mutex); 273 mutex_lock(&vm->mm.mutex);
274 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); 274 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
275 nouveau_mm_put(vm->mm, vma->node); 275 nouveau_mm_put(&vm->mm, vma->node);
276 vma->node = NULL; 276 vma->node = NULL;
277 mutex_unlock(&vm->mm->mutex); 277 mutex_unlock(&vm->mm.mutex);
278} 278}
279 279
280int 280int
@@ -306,7 +306,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
306 block = length; 306 block = length;
307 307
308 } else 308 } else
309 if (dev_priv->card_type == NV_C0) { 309 if (dev_priv->card_type >= NV_C0) {
310 vm->map_pgt = nvc0_vm_map_pgt; 310 vm->map_pgt = nvc0_vm_map_pgt;
311 vm->map = nvc0_vm_map; 311 vm->map = nvc0_vm_map;
312 vm->map_sg = nvc0_vm_map_sg; 312 vm->map_sg = nvc0_vm_map_sg;
@@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
360 360
361 nouveau_gpuobj_ref(pgd, &vpgd->obj); 361 nouveau_gpuobj_ref(pgd, &vpgd->obj);
362 362
363 mutex_lock(&vm->mm->mutex); 363 mutex_lock(&vm->mm.mutex);
364 for (i = vm->fpde; i <= vm->lpde; i++) 364 for (i = vm->fpde; i <= vm->lpde; i++)
365 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 365 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
366 list_add(&vpgd->head, &vm->pgd_list); 366 list_add(&vpgd->head, &vm->pgd_list);
367 mutex_unlock(&vm->mm->mutex); 367 mutex_unlock(&vm->mm.mutex);
368 return 0; 368 return 0;
369} 369}
370 370
@@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
377 if (!mpgd) 377 if (!mpgd)
378 return; 378 return;
379 379
380 mutex_lock(&vm->mm->mutex); 380 mutex_lock(&vm->mm.mutex);
381 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 381 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
382 if (vpgd->obj == mpgd) { 382 if (vpgd->obj == mpgd) {
383 pgd = vpgd->obj; 383 pgd = vpgd->obj;
@@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
386 break; 386 break;
387 } 387 }
388 } 388 }
389 mutex_unlock(&vm->mm->mutex); 389 mutex_unlock(&vm->mm.mutex);
390 390
391 nouveau_gpuobj_ref(NULL, &pgd); 391 nouveau_gpuobj_ref(NULL, &pgd);
392} 392}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 579ca8cc223c..6ce995f7797e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -51,7 +51,7 @@ struct nouveau_vma {
51 51
52struct nouveau_vm { 52struct nouveau_vm {
53 struct drm_device *dev; 53 struct drm_device *dev;
54 struct nouveau_mm *mm; 54 struct nouveau_mm mm;
55 int refcount; 55 int refcount;
56 56
57 struct list_head pgd_list; 57 struct list_head pgd_list;
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 75e872741d92..86d03e15735d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -27,7 +27,7 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a }; 30static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
31static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); 31static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
32 32
33int 33int
@@ -170,6 +170,13 @@ nouveau_volt_init(struct drm_device *dev)
170 */ 170 */
171 vidshift = 2; 171 vidshift = 2;
172 break; 172 break;
173 case 0x40:
174 headerlen = volt[1];
175 recordlen = volt[2];
176 entries = volt[3]; /* not a clue what the entries are for.. */
177 vidmask = volt[11]; /* guess.. */
178 vidshift = 0;
179 break;
173 default: 180 default:
174 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); 181 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
175 return; 182 return;
@@ -197,16 +204,37 @@ nouveau_volt_init(struct drm_device *dev)
197 } 204 }
198 205
199 /* parse vbios entries into common format */ 206 /* parse vbios entries into common format */
200 voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); 207 voltage->version = volt[0];
201 if (!voltage->level) 208 if (voltage->version < 0x40) {
202 return; 209 voltage->nr_level = entries;
210 voltage->level =
211 kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
212 if (!voltage->level)
213 return;
203 214
204 entry = volt + headerlen; 215 entry = volt + headerlen;
205 for (i = 0; i < entries; i++, entry += recordlen) { 216 for (i = 0; i < entries; i++, entry += recordlen) {
206 voltage->level[i].voltage = entry[0]; 217 voltage->level[i].voltage = entry[0] * 10000;
207 voltage->level[i].vid = entry[1] >> vidshift; 218 voltage->level[i].vid = entry[1] >> vidshift;
219 }
220 } else {
221 u32 volt_uv = ROM32(volt[4]);
222 s16 step_uv = ROM16(volt[8]);
223 u8 vid;
224
225 voltage->nr_level = voltage->vid_mask + 1;
226 voltage->level = kcalloc(voltage->nr_level,
227 sizeof(*voltage->level), GFP_KERNEL);
228 if (!voltage->level)
229 return;
230
231 for (vid = 0; vid <= voltage->vid_mask; vid++) {
232 voltage->level[vid].voltage = volt_uv;
233 voltage->level[vid].vid = vid;
234 volt_uv += step_uv;
235 }
208 } 236 }
209 voltage->nr_level = entries; 237
210 voltage->supported = true; 238 voltage->supported = true;
211} 239}
212 240
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 1715e1464b7d..6bd8518d7b2e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -126,27 +126,6 @@ nv04_display_create(struct drm_device *dev)
126 126
127 nouveau_hw_save_vga_fonts(dev, 1); 127 nouveau_hw_save_vga_fonts(dev, 1);
128 128
129 drm_mode_config_init(dev);
130 drm_mode_create_scaling_mode_property(dev);
131 drm_mode_create_dithering_property(dev);
132
133 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
134
135 dev->mode_config.min_width = 0;
136 dev->mode_config.min_height = 0;
137 switch (dev_priv->card_type) {
138 case NV_04:
139 dev->mode_config.max_width = 2048;
140 dev->mode_config.max_height = 2048;
141 break;
142 default:
143 dev->mode_config.max_width = 4096;
144 dev->mode_config.max_height = 4096;
145 break;
146 }
147
148 dev->mode_config.fb_base = dev_priv->fb_phys;
149
150 nv04_crtc_create(dev, 0); 129 nv04_crtc_create(dev, 0);
151 if (nv_two_heads(dev)) 130 if (nv_two_heads(dev))
152 nv04_crtc_create(dev, 1); 131 nv04_crtc_create(dev, 1);
@@ -235,8 +214,6 @@ nv04_display_destroy(struct drm_device *dev)
235 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 214 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
236 crtc->funcs->restore(crtc); 215 crtc->funcs->restore(crtc);
237 216
238 drm_mode_config_cleanup(dev);
239
240 nouveau_hw_save_vga_fonts(dev, 0); 217 nouveau_hw_save_vga_fonts(dev, 0);
241} 218}
242 219
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index eb1c70dd82ed..9ae92a87b8cc 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -68,6 +68,7 @@ void
68nv04_pm_clock_set(struct drm_device *dev, void *pre_state) 68nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
69{ 69{
70 struct drm_nouveau_private *dev_priv = dev->dev_private; 70 struct drm_nouveau_private *dev_priv = dev->dev_private;
71 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
71 struct nv04_pm_state *state = pre_state; 72 struct nv04_pm_state *state = pre_state;
72 u32 reg = state->pll.reg; 73 u32 reg = state->pll.reg;
73 74
@@ -85,6 +86,9 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
85 nv_mask(dev, 0x1002c0, 0, 1 << 8); 86 nv_mask(dev, 0x1002c0, 0, 1 << 8);
86 } 87 }
87 88
89 if (reg == NV_PRAMDAC_NVPLL_COEFF)
90 ptimer->init(dev);
91
88 kfree(state); 92 kfree(state);
89} 93}
90 94
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 1d09ddd57399..263301b809dd 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -6,43 +6,75 @@
6int 6int
7nv04_timer_init(struct drm_device *dev) 7nv04_timer_init(struct drm_device *dev)
8{ 8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 u32 m, n, d;
11
9 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000); 12 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
10 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF); 13 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
11 14
12 /* Just use the pre-existing values when possible for now; these regs 15 /* aim for 31.25MHz, which gives us nanosecond timestamps */
13 * are not written in nv (driver writer missed a /4 on the address), and 16 d = 1000000 / 32;
14 * writing 8 and 3 to the correct regs breaks the timings on the LVDS 17
15 * hardware sequencing microcode. 18 /* determine base clock for timer source */
16 * A correct solution (involving calculations with the GPU PLL) can 19 if (dev_priv->chipset < 0x40) {
17 * be done when kernel modesetting lands 20 n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
18 */ 21 } else
19 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) || 22 if (dev_priv->chipset == 0x40) {
20 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) { 23 /*XXX: figure this out */
21 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008); 24 n = 0;
22 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003); 25 } else {
26 n = dev_priv->crystal;
27 m = 1;
28 while (n < (d * 2)) {
29 n += (n / m);
30 m++;
31 }
32
33 nv_wr32(dev, 0x009220, m - 1);
34 }
35
36 if (!n) {
37 NV_WARN(dev, "PTIMER: unknown input clock freq\n");
38 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
39 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
40 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
41 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
42 }
43 return 0;
44 }
45
46 /* reduce ratio to acceptable values */
47 while (((n % 5) == 0) && ((d % 5) == 0)) {
48 n /= 5;
49 d /= 5;
23 } 50 }
24 51
52 while (((n % 2) == 0) && ((d % 2) == 0)) {
53 n /= 2;
54 d /= 2;
55 }
56
57 while (n > 0xffff || d > 0xffff) {
58 n >>= 1;
59 d >>= 1;
60 }
61
62 nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
63 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
25 return 0; 64 return 0;
26} 65}
27 66
28uint64_t 67u64
29nv04_timer_read(struct drm_device *dev) 68nv04_timer_read(struct drm_device *dev)
30{ 69{
31 uint32_t low; 70 u32 hi, lo;
32 /* From kmmio dumps on nv28 this looks like how the blob does this. 71
33 * It reads the high dword twice, before and after.
34 * The only explanation seems to be that the 64-bit timer counter
35 * advances between high and low dword reads and may corrupt the
36 * result. Not confirmed.
37 */
38 uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
39 uint32_t high1;
40 do { 72 do {
41 high1 = high2; 73 hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
42 low = nv_rd32(dev, NV04_PTIMER_TIME_0); 74 lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
43 high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); 75 } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
44 } while (high1 != high2); 76
45 return (((uint64_t)high2) << 32) | (uint64_t)low; 77 return ((u64)hi << 32 | lo);
46} 78}
47 79
48void 80void
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index ad03a0e1fc7d..6f06a0713f00 100644
--- a/drivers/gpu/drm/nouveau/nv40_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -26,10 +26,32 @@
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_ramht.h" 27#include "nouveau_ramht.h"
28 28
29struct nv40_mpeg_engine { 29struct nv31_mpeg_engine {
30 struct nouveau_exec_engine base; 30 struct nouveau_exec_engine base;
31 atomic_t refcount;
31}; 32};
32 33
34
35static int
36nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
37{
38 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
39
40 if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
41 return -EBUSY;
42
43 chan->engctx[engine] = (void *)0xdeadcafe;
44 return 0;
45}
46
47static void
48nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
49{
50 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
51 atomic_dec(&pmpeg->refcount);
52 chan->engctx[engine] = NULL;
53}
54
33static int 55static int
34nv40_mpeg_context_new(struct nouveau_channel *chan, int engine) 56nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
35{ 57{
@@ -81,7 +103,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
81} 103}
82 104
83static int 105static int
84nv40_mpeg_object_new(struct nouveau_channel *chan, int engine, 106nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
85 u32 handle, u16 class) 107 u32 handle, u16 class)
86{ 108{
87 struct drm_device *dev = chan->dev; 109 struct drm_device *dev = chan->dev;
@@ -103,10 +125,10 @@ nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
103} 125}
104 126
105static int 127static int
106nv40_mpeg_init(struct drm_device *dev, int engine) 128nv31_mpeg_init(struct drm_device *dev, int engine)
107{ 129{
108 struct drm_nouveau_private *dev_priv = dev->dev_private; 130 struct drm_nouveau_private *dev_priv = dev->dev_private;
109 struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); 131 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
110 int i; 132 int i;
111 133
112 /* VPE init */ 134 /* VPE init */
@@ -121,7 +143,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
121 /* PMPEG init */ 143 /* PMPEG init */
122 nv_wr32(dev, 0x00b32c, 0x00000000); 144 nv_wr32(dev, 0x00b32c, 0x00000000);
123 nv_wr32(dev, 0x00b314, 0x00000100); 145 nv_wr32(dev, 0x00b314, 0x00000100);
124 nv_wr32(dev, 0x00b220, 0x00000044); 146 nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
125 nv_wr32(dev, 0x00b300, 0x02001ec1); 147 nv_wr32(dev, 0x00b300, 0x02001ec1);
126 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); 148 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
127 149
@@ -137,7 +159,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
137} 159}
138 160
139static int 161static int
140nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend) 162nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
141{ 163{
142 /*XXX: context save? */ 164 /*XXX: context save? */
143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 165 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
@@ -146,7 +168,7 @@ nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
146} 168}
147 169
148static int 170static int
149nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) 171nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
150{ 172{
151 struct drm_device *dev = chan->dev; 173 struct drm_device *dev = chan->dev;
152 u32 inst = data << 4; 174 u32 inst = data << 4;
@@ -184,13 +206,17 @@ nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
184} 206}
185 207
186static int 208static int
187nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst) 209nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
188{ 210{
189 struct drm_nouveau_private *dev_priv = dev->dev_private; 211 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_gpuobj *ctx; 212 struct nouveau_gpuobj *ctx;
191 unsigned long flags; 213 unsigned long flags;
192 int i; 214 int i;
193 215
216 /* hardcode drm channel id on nv3x, so swmthd lookup works */
217 if (dev_priv->card_type < NV_40)
218 return 0;
219
194 spin_lock_irqsave(&dev_priv->channels.lock, flags); 220 spin_lock_irqsave(&dev_priv->channels.lock, flags);
195 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 221 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
196 if (!dev_priv->channels.ptr[i]) 222 if (!dev_priv->channels.ptr[i])
@@ -205,7 +231,7 @@ nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
205} 231}
206 232
207static void 233static void
208nv40_vpe_set_tile_region(struct drm_device *dev, int i) 234nv31_vpe_set_tile_region(struct drm_device *dev, int i)
209{ 235{
210 struct drm_nouveau_private *dev_priv = dev->dev_private; 236 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 237 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
@@ -216,10 +242,10 @@ nv40_vpe_set_tile_region(struct drm_device *dev, int i)
216} 242}
217 243
218static void 244static void
219nv40_mpeg_isr(struct drm_device *dev) 245nv31_mpeg_isr(struct drm_device *dev)
220{ 246{
221 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4; 247 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
222 u32 chid = nv40_mpeg_isr_chid(dev, inst); 248 u32 chid = nv31_mpeg_isr_chid(dev, inst);
223 u32 stat = nv_rd32(dev, 0x00b100); 249 u32 stat = nv_rd32(dev, 0x00b100);
224 u32 type = nv_rd32(dev, 0x00b230); 250 u32 type = nv_rd32(dev, 0x00b230);
225 u32 mthd = nv_rd32(dev, 0x00b234); 251 u32 mthd = nv_rd32(dev, 0x00b234);
@@ -249,10 +275,10 @@ nv40_mpeg_isr(struct drm_device *dev)
249} 275}
250 276
251static void 277static void
252nv40_vpe_isr(struct drm_device *dev) 278nv31_vpe_isr(struct drm_device *dev)
253{ 279{
254 if (nv_rd32(dev, 0x00b100)) 280 if (nv_rd32(dev, 0x00b100))
255 nv40_mpeg_isr(dev); 281 nv31_mpeg_isr(dev);
256 282
257 if (nv_rd32(dev, 0x00b800)) { 283 if (nv_rd32(dev, 0x00b800)) {
258 u32 stat = nv_rd32(dev, 0x00b800); 284 u32 stat = nv_rd32(dev, 0x00b800);
@@ -262,9 +288,9 @@ nv40_vpe_isr(struct drm_device *dev)
262} 288}
263 289
264static void 290static void
265nv40_mpeg_destroy(struct drm_device *dev, int engine) 291nv31_mpeg_destroy(struct drm_device *dev, int engine)
266{ 292{
267 struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine); 293 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
268 294
269 nouveau_irq_unregister(dev, 0); 295 nouveau_irq_unregister(dev, 0);
270 296
@@ -273,34 +299,41 @@ nv40_mpeg_destroy(struct drm_device *dev, int engine)
273} 299}
274 300
275int 301int
276nv40_mpeg_create(struct drm_device *dev) 302nv31_mpeg_create(struct drm_device *dev)
277{ 303{
278 struct nv40_mpeg_engine *pmpeg; 304 struct drm_nouveau_private *dev_priv = dev->dev_private;
305 struct nv31_mpeg_engine *pmpeg;
279 306
280 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL); 307 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
281 if (!pmpeg) 308 if (!pmpeg)
282 return -ENOMEM; 309 return -ENOMEM;
283 310 atomic_set(&pmpeg->refcount, 0);
284 pmpeg->base.destroy = nv40_mpeg_destroy; 311
285 pmpeg->base.init = nv40_mpeg_init; 312 pmpeg->base.destroy = nv31_mpeg_destroy;
286 pmpeg->base.fini = nv40_mpeg_fini; 313 pmpeg->base.init = nv31_mpeg_init;
287 pmpeg->base.context_new = nv40_mpeg_context_new; 314 pmpeg->base.fini = nv31_mpeg_fini;
288 pmpeg->base.context_del = nv40_mpeg_context_del; 315 if (dev_priv->card_type < NV_40) {
289 pmpeg->base.object_new = nv40_mpeg_object_new; 316 pmpeg->base.context_new = nv31_mpeg_context_new;
317 pmpeg->base.context_del = nv31_mpeg_context_del;
318 } else {
319 pmpeg->base.context_new = nv40_mpeg_context_new;
320 pmpeg->base.context_del = nv40_mpeg_context_del;
321 }
322 pmpeg->base.object_new = nv31_mpeg_object_new;
290 323
291 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between 324 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
292 * all VPE engines, for this driver's purposes the PMPEG engine 325 * all VPE engines, for this driver's purposes the PMPEG engine
293 * will be treated as the "master" and handle the global VPE 326 * will be treated as the "master" and handle the global VPE
294 * bits too 327 * bits too
295 */ 328 */
296 pmpeg->base.set_tile_region = nv40_vpe_set_tile_region; 329 pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
297 nouveau_irq_register(dev, 0, nv40_vpe_isr); 330 nouveau_irq_register(dev, 0, nv31_vpe_isr);
298 331
299 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base); 332 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
300 NVOBJ_CLASS(dev, 0x3174, MPEG); 333 NVOBJ_CLASS(dev, 0x3174, MPEG);
301 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma); 334 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
302 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma); 335 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
303 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma); 336 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
304 337
305#if 0 338#if 0
306 NVOBJ_ENGINE_ADD(dev, ME, &pme->base); 339 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
new file mode 100644
index 000000000000..bbc0b9c7e1f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29#include "nouveau_hw.h"
30
31#define min2(a,b) ((a) < (b) ? (a) : (b))
32
33static u32
34read_pll_1(struct drm_device *dev, u32 reg)
35{
36 u32 ctrl = nv_rd32(dev, reg + 0x00);
37 int P = (ctrl & 0x00070000) >> 16;
38 int N = (ctrl & 0x0000ff00) >> 8;
39 int M = (ctrl & 0x000000ff) >> 0;
40 u32 ref = 27000, clk = 0;
41
42 if (ctrl & 0x80000000)
43 clk = ref * N / M;
44
45 return clk >> P;
46}
47
48static u32
49read_pll_2(struct drm_device *dev, u32 reg)
50{
51 u32 ctrl = nv_rd32(dev, reg + 0x00);
52 u32 coef = nv_rd32(dev, reg + 0x04);
53 int N2 = (coef & 0xff000000) >> 24;
54 int M2 = (coef & 0x00ff0000) >> 16;
55 int N1 = (coef & 0x0000ff00) >> 8;
56 int M1 = (coef & 0x000000ff) >> 0;
57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0;
59
60 if (ctrl & 0x80000000)
61 clk = ref * N1 / M1;
62
63 if (!(ctrl & 0x00000100)) {
64 if (ctrl & 0x40000000)
65 clk = clk * N2 / M2;
66 }
67
68 return clk >> P;
69}
70
71static u32
72read_clk(struct drm_device *dev, u32 src)
73{
74 switch (src) {
75 case 3:
76 return read_pll_2(dev, 0x004000);
77 case 2:
78 return read_pll_1(dev, 0x004008);
79 default:
80 break;
81 }
82
83 return 0;
84}
85
86int
87nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
88{
89 u32 ctrl = nv_rd32(dev, 0x00c040);
90
91 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
92 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
93 perflvl->memory = read_pll_2(dev, 0x4020);
94 return 0;
95}
96
97struct nv40_pm_state {
98 u32 ctrl;
99 u32 npll_ctrl;
100 u32 npll_coef;
101 u32 spll;
102 u32 mpll_ctrl;
103 u32 mpll_coef;
104};
105
106static int
107nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
108 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
109{
110 struct nouveau_pll_vals coef;
111 int ret;
112
113 ret = get_pll_limits(dev, reg, pll);
114 if (ret)
115 return ret;
116
117 if (clk < pll->vco1.maxfreq)
118 pll->vco2.maxfreq = 0;
119
120 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
121 if (ret == 0)
122 return -ERANGE;
123
124 *N1 = coef.N1;
125 *M1 = coef.M1;
126 if (N2 && M2) {
127 if (pll->vco2.maxfreq) {
128 *N2 = coef.N2;
129 *M2 = coef.M2;
130 } else {
131 *N2 = 1;
132 *M2 = 1;
133 }
134 }
135 *log2P = coef.log2P;
136 return 0;
137}
138
139void *
140nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
141{
142 struct nv40_pm_state *info;
143 struct pll_lims pll;
144 int N1, N2, M1, M2, log2P;
145 int ret;
146
147 info = kmalloc(sizeof(*info), GFP_KERNEL);
148 if (!info)
149 return ERR_PTR(-ENOMEM);
150
151 /* core/geometric clock */
152 ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
153 &N1, &M1, &N2, &M2, &log2P);
154 if (ret < 0)
155 goto out;
156
157 if (N2 == M2) {
158 info->npll_ctrl = 0x80000100 | (log2P << 16);
159 info->npll_coef = (N1 << 8) | M1;
160 } else {
161 info->npll_ctrl = 0xc0000000 | (log2P << 16);
162 info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
163 }
164
165 /* use the second PLL for shader/rop clock, if it differs from core */
166 if (perflvl->shader && perflvl->shader != perflvl->core) {
167 ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
168 &N1, &M1, NULL, NULL, &log2P);
169 if (ret < 0)
170 goto out;
171
172 info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
173 info->ctrl = 0x00000223;
174 } else {
175 info->spll = 0x00000000;
176 info->ctrl = 0x00000333;
177 }
178
179 /* memory clock */
180 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
181 &N1, &M1, &N2, &M2, &log2P);
182 if (ret < 0)
183 goto out;
184
185 info->mpll_ctrl = 0x80000000 | (log2P << 16);
186 info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20;
187 if (N2 == M2) {
188 info->mpll_ctrl |= 0x00000100;
189 info->mpll_coef = (N1 << 8) | M1;
190 } else {
191 info->mpll_ctrl |= 0x40000000;
192 info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
193 }
194
195out:
196 if (ret < 0) {
197 kfree(info);
198 info = ERR_PTR(ret);
199 }
200 return info;
201}
202
203static bool
204nv40_pm_gr_idle(void *data)
205{
206 struct drm_device *dev = data;
207
208 if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 !=
209 (nv_rd32(dev, 0x400760) & 0x0000000f))
210 return false;
211
212 if (nv_rd32(dev, 0x400700))
213 return false;
214
215 return true;
216}
217
218void
219nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
220{
221 struct drm_nouveau_private *dev_priv = dev->dev_private;
222 struct nv40_pm_state *info = pre_state;
223 unsigned long flags;
224 struct bit_entry M;
225 u32 crtc_mask = 0;
226 u8 sr1[2];
227 int i;
228
229 /* determine which CRTCs are active, fetch VGA_SR1 for each */
230 for (i = 0; i < 2; i++) {
231 u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000));
232 u32 cnt = 0;
233 do {
234 if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) {
235 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
236 sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000));
237 if (!(sr1[i] & 0x20))
238 crtc_mask |= (1 << i);
239 break;
240 }
241 udelay(1);
242 } while (cnt++ < 32);
243 }
244
245 /* halt and idle engines */
246 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
247 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
248 if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
249 goto resume;
250 nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
251 if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
252 goto resume;
253 nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
254 nv04_fifo_cache_pull(dev, false);
255
256 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
257 goto resume;
258
259 /* set engine clocks */
260 nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
261 nv_wr32(dev, 0x004004, info->npll_coef);
262 nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl);
263 nv_mask(dev, 0x004008, 0xc007ffff, info->spll);
264 mdelay(5);
265 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
266
267 /* wait for vblank start on active crtcs, disable memory access */
268 for (i = 0; i < 2; i++) {
269 if (!(crtc_mask & (1 << i)))
270 continue;
271 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
272 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
273 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
274 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
275 }
276
277 /* prepare ram for reclocking */
278 nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */
279 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
280 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
281 nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
282 nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */
283
284 /* change the PLL of each memory partition */
285 nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000);
286 switch (dev_priv->chipset) {
287 case 0x40:
288 case 0x45:
289 case 0x41:
290 case 0x42:
291 case 0x47:
292 nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl);
293 nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl);
294 nv_wr32(dev, 0x004048, info->mpll_coef);
295 nv_wr32(dev, 0x004030, info->mpll_coef);
296 case 0x43:
297 case 0x49:
298 case 0x4b:
299 nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl);
300 nv_wr32(dev, 0x00403c, info->mpll_coef);
301 default:
302 nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl);
303 nv_wr32(dev, 0x004024, info->mpll_coef);
304 break;
305 }
306 udelay(100);
307 nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000);
308
309 /* re-enable normal operation of memory controller */
310 nv_wr32(dev, 0x1002dc, 0x00000000);
311 nv_mask(dev, 0x100210, 0x80000000, 0x80000000);
312 udelay(100);
313
314 /* execute memory reset script from vbios */
315 if (!bit_table(dev, 'M', &M))
316 nouveau_bios_init_exec(dev, ROM16(M.data[0]));
317
318 /* make sure we're in vblank (hopefully the same one as before), and
319 * then re-enable crtc memory access
320 */
321 for (i = 0; i < 2; i++) {
322 if (!(crtc_mask & (1 << i)))
323 continue;
324 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
325 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
326 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]);
327 }
328
329 /* resume engines */
330resume:
331 nv_wr32(dev, 0x003250, 0x00000001);
332 nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
333 nv_wr32(dev, 0x003200, 0x00000001);
334 nv_wr32(dev, 0x002500, 0x00000001);
335 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
336
337 kfree(info);
338}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 5d989073ba6e..882080e0b4f5 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -329,8 +329,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
329 329
330 drm_crtc_cleanup(&nv_crtc->base); 330 drm_crtc_cleanup(&nv_crtc->base);
331 331
332 nv50_cursor_fini(nv_crtc);
333
334 nouveau_bo_unmap(nv_crtc->lut.nvbo); 332 nouveau_bo_unmap(nv_crtc->lut.nvbo);
335 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 333 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
336 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 334 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 9752c35bb84b..adfc9b607a50 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -137,21 +137,3 @@ nv50_cursor_init(struct nouveau_crtc *nv_crtc)
137 nv_crtc->cursor.show = nv50_cursor_show; 137 nv_crtc->cursor.show = nv50_cursor_show;
138 return 0; 138 return 0;
139} 139}
140
141void
142nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
143{
144 struct drm_device *dev = nv_crtc->base.dev;
145 int idx = nv_crtc->index;
146
147 NV_DEBUG_KMS(dev, "\n");
148
149 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
150 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
151 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
152 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
153 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
154 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
155 }
156}
157
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index db1a5f4b711d..d23ca00e7d62 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -247,6 +247,16 @@ static int nv50_display_disable(struct drm_device *dev)
247 } 247 }
248 } 248 }
249 249
250 for (i = 0; i < 2; i++) {
251 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
252 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
253 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
254 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
255 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
256 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
257 }
258 }
259
250 nv50_evo_fini(dev); 260 nv50_evo_fini(dev);
251 261
252 for (i = 0; i < 3; i++) { 262 for (i = 0; i < 3; i++) {
@@ -286,23 +296,6 @@ int nv50_display_create(struct drm_device *dev)
286 return -ENOMEM; 296 return -ENOMEM;
287 dev_priv->engine.display.priv = priv; 297 dev_priv->engine.display.priv = priv;
288 298
289 /* init basic kernel modesetting */
290 drm_mode_config_init(dev);
291
292 /* Initialise some optional connector properties. */
293 drm_mode_create_scaling_mode_property(dev);
294 drm_mode_create_dithering_property(dev);
295
296 dev->mode_config.min_width = 0;
297 dev->mode_config.min_height = 0;
298
299 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
300
301 dev->mode_config.max_width = 8192;
302 dev->mode_config.max_height = 8192;
303
304 dev->mode_config.fb_base = dev_priv->fb_phys;
305
306 /* Create CRTC objects */ 299 /* Create CRTC objects */
307 for (i = 0; i < 2; i++) 300 for (i = 0; i < 2; i++)
308 nv50_crtc_create(dev, i); 301 nv50_crtc_create(dev, i);
@@ -364,8 +357,6 @@ nv50_display_destroy(struct drm_device *dev)
364 357
365 NV_DEBUG_KMS(dev, "\n"); 358 NV_DEBUG_KMS(dev, "\n");
366 359
367 drm_mode_config_cleanup(dev);
368
369 nv50_display_disable(dev); 360 nv50_display_disable(dev);
370 nouveau_irq_unregister(dev, 26); 361 nouveau_irq_unregister(dev, 26);
371 kfree(disp); 362 kfree(disp);
@@ -698,7 +689,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
698 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; 689 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
699 690
700 if (dcb->type == type && (dcb->or & (1 << or))) { 691 if (dcb->type == type && (dcb->or & (1 << or))) {
701 nouveau_bios_run_display_table(dev, dcb, 0, -1); 692 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
702 disp->irq.dcb = dcb; 693 disp->irq.dcb = dcb;
703 goto ack; 694 goto ack;
704 } 695 }
@@ -711,37 +702,6 @@ ack:
711} 702}
712 703
713static void 704static void
714nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
715{
716 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
717 struct drm_encoder *encoder;
718 uint32_t tmp, unk0 = 0, unk1 = 0;
719
720 if (dcb->type != OUTPUT_DP)
721 return;
722
723 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
724 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
725
726 if (nv_encoder->dcb == dcb) {
727 unk0 = nv_encoder->dp.unk0;
728 unk1 = nv_encoder->dp.unk1;
729 break;
730 }
731 }
732
733 if (unk0 || unk1) {
734 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
735 tmp &= 0xfffffe03;
736 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
737
738 tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
739 tmp &= 0xfef080c0;
740 nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
741 }
742}
743
744static void
745nv50_display_unk20_handler(struct drm_device *dev) 705nv50_display_unk20_handler(struct drm_device *dev)
746{ 706{
747 struct drm_nouveau_private *dev_priv = dev->dev_private; 707 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -753,7 +713,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
753 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
754 dcb = disp->irq.dcb; 714 dcb = disp->irq.dcb;
755 if (dcb) { 715 if (dcb) {
756 nouveau_bios_run_display_table(dev, dcb, 0, -2); 716 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
757 disp->irq.dcb = NULL; 717 disp->irq.dcb = NULL;
758 } 718 }
759 719
@@ -837,9 +797,15 @@ nv50_display_unk20_handler(struct drm_device *dev)
837 } 797 }
838 798
839 script = nv50_display_script_select(dev, dcb, mc, pclk); 799 script = nv50_display_script_select(dev, dcb, mc, pclk);
840 nouveau_bios_run_display_table(dev, dcb, script, pclk); 800 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
841 801
842 nv50_display_unk20_dp_hack(dev, dcb); 802 if (type == OUTPUT_DP) {
803 int link = !(dcb->dpconf.sor.link & 1);
804 if ((mc & 0x000f0000) == 0x00020000)
805 nouveau_dp_tu_update(dev, or, link, pclk, 18);
806 else
807 nouveau_dp_tu_update(dev, or, link, pclk, 24);
808 }
843 809
844 if (dcb->type != OUTPUT_ANALOG) { 810 if (dcb->type != OUTPUT_ANALOG) {
845 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); 811 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
@@ -904,7 +870,7 @@ nv50_display_unk40_handler(struct drm_device *dev)
904 if (!dcb) 870 if (!dcb)
905 goto ack; 871 goto ack;
906 872
907 nouveau_bios_run_display_table(dev, dcb, script, -pclk); 873 nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
908 nv50_display_unk40_dp_set_tmds(dev, dcb); 874 nv50_display_unk40_dp_set_tmds(dev, dcb);
909 875
910ack: 876ack:
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index d4f4206dad7e..793a5ccca121 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -98,6 +98,37 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
98} 98}
99 99
100int 100int
101nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
102{
103 struct dcb_gpio_entry *gpio;
104 u32 v;
105
106 gpio = nouveau_bios_gpio_entry(dev, tag);
107 if (!gpio)
108 return -ENOENT;
109
110 v = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
111 v &= 0x00004000;
112 return (!!v == (gpio->state[1] & 1));
113}
114
115int
116nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
117{
118 struct dcb_gpio_entry *gpio;
119 u32 v;
120
121 gpio = nouveau_bios_gpio_entry(dev, tag);
122 if (!gpio)
123 return -ENOENT;
124
125 v = gpio->state[state] ^ 2;
126
127 nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
128 return 0;
129}
130
131int
101nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, 132nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
102 void (*handler)(void *, int), void *data) 133 void (*handler)(void *, int), void *data)
103{ 134{
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index d43c46caa76e..8c979b31ff61 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -120,70 +120,62 @@ nv50_graph_unload_context(struct drm_device *dev)
120 return 0; 120 return 0;
121} 121}
122 122
123static void 123static int
124nv50_graph_init_reset(struct drm_device *dev) 124nv50_graph_init(struct drm_device *dev, int engine)
125{
126 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
127 NV_DEBUG(dev, "\n");
128
129 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
130 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
131}
132
133static void
134nv50_graph_init_intr(struct drm_device *dev)
135{
136 NV_DEBUG(dev, "\n");
137
138 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
139 nv_wr32(dev, 0x400138, 0xffffffff);
140 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
141}
142
143static void
144nv50_graph_init_regs__nv(struct drm_device *dev)
145{ 125{
146 struct drm_nouveau_private *dev_priv = dev->dev_private; 126 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 uint32_t units = nv_rd32(dev, 0x1540); 127 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
128 u32 units = nv_rd32(dev, 0x001540);
148 int i; 129 int i;
149 130
150 NV_DEBUG(dev, "\n"); 131 NV_DEBUG(dev, "\n");
151 132
133 /* master reset */
134 nv_mask(dev, 0x000200, 0x00200100, 0x00000000);
135 nv_mask(dev, 0x000200, 0x00200100, 0x00200100);
136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
137
138 /* reset/enable traps and interrupts */
152 nv_wr32(dev, 0x400804, 0xc0000000); 139 nv_wr32(dev, 0x400804, 0xc0000000);
153 nv_wr32(dev, 0x406800, 0xc0000000); 140 nv_wr32(dev, 0x406800, 0xc0000000);
154 nv_wr32(dev, 0x400c04, 0xc0000000); 141 nv_wr32(dev, 0x400c04, 0xc0000000);
155 nv_wr32(dev, 0x401800, 0xc0000000); 142 nv_wr32(dev, 0x401800, 0xc0000000);
156 nv_wr32(dev, 0x405018, 0xc0000000); 143 nv_wr32(dev, 0x405018, 0xc0000000);
157 nv_wr32(dev, 0x402000, 0xc0000000); 144 nv_wr32(dev, 0x402000, 0xc0000000);
158
159 for (i = 0; i < 16; i++) { 145 for (i = 0; i < 16; i++) {
160 if (units & 1 << i) { 146 if (!(units & (1 << i)))
161 if (dev_priv->chipset < 0xa0) { 147 continue;
162 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); 148
163 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); 149 if (dev_priv->chipset < 0xa0) {
164 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); 150 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
165 } else { 151 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
166 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); 152 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
167 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); 153 } else {
168 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); 154 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
169 } 155 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
156 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
170 } 157 }
171 } 158 }
172 159
173 nv_wr32(dev, 0x400108, 0xffffffff); 160 nv_wr32(dev, 0x400108, 0xffffffff);
174 161 nv_wr32(dev, 0x400138, 0xffffffff);
175 nv_wr32(dev, 0x400824, 0x00004000); 162 nv_wr32(dev, 0x400100, 0xffffffff);
163 nv_wr32(dev, 0x40013c, 0xffffffff);
176 nv_wr32(dev, 0x400500, 0x00010001); 164 nv_wr32(dev, 0x400500, 0x00010001);
177}
178
179static void
180nv50_graph_init_zcull(struct drm_device *dev)
181{
182 struct drm_nouveau_private *dev_priv = dev->dev_private;
183 int i;
184
185 NV_DEBUG(dev, "\n");
186 165
166 /* upload context program, initialise ctxctl defaults */
167 nv_wr32(dev, 0x400324, 0x00000000);
168 for (i = 0; i < pgraph->ctxprog_size; i++)
169 nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
170 nv_wr32(dev, 0x400824, 0x00000000);
171 nv_wr32(dev, 0x400828, 0x00000000);
172 nv_wr32(dev, 0x40082c, 0x00000000);
173 nv_wr32(dev, 0x400830, 0x00000000);
174 nv_wr32(dev, 0x400724, 0x00000000);
175 nv_wr32(dev, 0x40032c, 0x00000000);
176 nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
177
178 /* some unknown zcull magic */
187 switch (dev_priv->chipset & 0xf0) { 179 switch (dev_priv->chipset & 0xf0) {
188 case 0x50: 180 case 0x50:
189 case 0x80: 181 case 0x80:
@@ -212,43 +204,7 @@ nv50_graph_init_zcull(struct drm_device *dev)
212 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000); 204 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
213 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000); 205 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
214 } 206 }
215}
216
217static int
218nv50_graph_init_ctxctl(struct drm_device *dev)
219{
220 struct nv50_graph_engine *pgraph = nv_engine(dev, NVOBJ_ENGINE_GR);
221 int i;
222
223 NV_DEBUG(dev, "\n");
224
225 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
226 for (i = 0; i < pgraph->ctxprog_size; i++)
227 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, pgraph->ctxprog[i]);
228
229 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
230 nv_wr32(dev, 0x400320, 4);
231 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
232 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
233 return 0;
234}
235
236static int
237nv50_graph_init(struct drm_device *dev, int engine)
238{
239 int ret;
240
241 NV_DEBUG(dev, "\n");
242
243 nv50_graph_init_reset(dev);
244 nv50_graph_init_regs__nv(dev);
245 nv50_graph_init_zcull(dev);
246
247 ret = nv50_graph_init_ctxctl(dev);
248 if (ret)
249 return ret;
250 207
251 nv50_graph_init_intr(dev);
252 return 0; 208 return 0;
253} 209}
254 210
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index de9abff12b90..d05c2c3b2444 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -40,6 +40,12 @@
40#define CP_FLAG_UNK0B ((0 * 32) + 0xb) 40#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
41#define CP_FLAG_UNK0B_CLEAR 0 41#define CP_FLAG_UNK0B_CLEAR 0
42#define CP_FLAG_UNK0B_SET 1 42#define CP_FLAG_UNK0B_SET 1
43#define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe)
44#define CP_FLAG_XFER_SWITCH_DISABLE 0
45#define CP_FLAG_XFER_SWITCH_ENABLE 1
46#define CP_FLAG_STATE ((0 * 32) + 0x1c)
47#define CP_FLAG_STATE_STOPPED 0
48#define CP_FLAG_STATE_RUNNING 1
43#define CP_FLAG_UNK1D ((0 * 32) + 0x1d) 49#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
44#define CP_FLAG_UNK1D_CLEAR 0 50#define CP_FLAG_UNK1D_CLEAR 0
45#define CP_FLAG_UNK1D_SET 1 51#define CP_FLAG_UNK1D_SET 1
@@ -194,6 +200,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
194 "the devs.\n"); 200 "the devs.\n");
195 return -ENOSYS; 201 return -ENOSYS;
196 } 202 }
203
204 cp_set (ctx, STATE, RUNNING);
205 cp_set (ctx, XFER_SWITCH, ENABLE);
197 /* decide whether we're loading/unloading the context */ 206 /* decide whether we're loading/unloading the context */
198 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); 207 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
199 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); 208 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
@@ -260,6 +269,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
260 cp_name(ctx, cp_exit); 269 cp_name(ctx, cp_exit);
261 cp_set (ctx, USER_SAVE, NOT_PENDING); 270 cp_set (ctx, USER_SAVE, NOT_PENDING);
262 cp_set (ctx, USER_LOAD, NOT_PENDING); 271 cp_set (ctx, USER_LOAD, NOT_PENDING);
272 cp_set (ctx, XFER_SWITCH, DISABLE);
273 cp_set (ctx, STATE, STOPPED);
263 cp_out (ctx, CP_END); 274 cp_out (ctx, CP_END);
264 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ 275 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
265 276
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 8a2810011bda..3d5a86b98282 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -115,15 +115,15 @@ nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
115 BIT_M.version == 1 && BIT_M.length >= 0x0b) { 115 BIT_M.version == 1 && BIT_M.length >= 0x0b) {
116 script = ROM16(BIT_M.data[0x05]); 116 script = ROM16(BIT_M.data[0x05]);
117 if (script) 117 if (script)
118 nouveau_bios_run_init_table(dev, script, NULL); 118 nouveau_bios_run_init_table(dev, script, NULL, -1);
119 script = ROM16(BIT_M.data[0x07]); 119 script = ROM16(BIT_M.data[0x07]);
120 if (script) 120 if (script)
121 nouveau_bios_run_init_table(dev, script, NULL); 121 nouveau_bios_run_init_table(dev, script, NULL, -1);
122 script = ROM16(BIT_M.data[0x09]); 122 script = ROM16(BIT_M.data[0x09]);
123 if (script) 123 if (script)
124 nouveau_bios_run_init_table(dev, script, NULL); 124 nouveau_bios_run_init_table(dev, script, NULL, -1);
125 125
126 nouveau_bios_run_init_table(dev, perflvl->memscript, NULL); 126 nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
127 } 127 }
128 128
129 if (state->type == PLL_MEMORY) { 129 if (state->type == PLL_MEMORY) {
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ffe8b483b7b0..2633aa8554eb 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -124,7 +124,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
124 if (mode == DRM_MODE_DPMS_ON) { 124 if (mode == DRM_MODE_DPMS_ON) {
125 u8 status = DP_SET_POWER_D0; 125 u8 status = DP_SET_POWER_D0;
126 nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); 126 nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
127 nouveau_dp_link_train(encoder); 127 nouveau_dp_link_train(encoder, nv_encoder->dp.datarate);
128 } else { 128 } else {
129 u8 status = DP_SET_POWER_D3; 129 u8 status = DP_SET_POWER_D3;
130 nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); 130 nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
@@ -187,14 +187,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
187 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 187 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
188 struct drm_device *dev = encoder->dev; 188 struct drm_device *dev = encoder->dev;
189 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 189 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
190 struct nouveau_connector *nv_connector;
190 uint32_t mode_ctl = 0; 191 uint32_t mode_ctl = 0;
191 int ret; 192 int ret;
192 193
193 NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", 194 NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
194 nv_encoder->or, nv_encoder->dcb->type, crtc->index); 195 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
195 196
196 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
197
198 switch (nv_encoder->dcb->type) { 197 switch (nv_encoder->dcb->type) {
199 case OUTPUT_TMDS: 198 case OUTPUT_TMDS:
200 if (nv_encoder->dcb->sorconf.link & 1) { 199 if (nv_encoder->dcb->sorconf.link & 1) {
@@ -206,7 +205,15 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
206 mode_ctl = 0x0200; 205 mode_ctl = 0x0200;
207 break; 206 break;
208 case OUTPUT_DP: 207 case OUTPUT_DP:
209 mode_ctl |= (nv_encoder->dp.mc_unknown << 16); 208 nv_connector = nouveau_encoder_connector_get(nv_encoder);
209 if (nv_connector && nv_connector->base.display_info.bpc == 6) {
210 nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
211 mode_ctl |= 0x00020000;
212 } else {
213 nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
214 mode_ctl |= 0x00050000;
215 }
216
210 if (nv_encoder->dcb->sorconf.link & 1) 217 if (nv_encoder->dcb->sorconf.link & 1)
211 mode_ctl |= 0x00000800; 218 mode_ctl |= 0x00000800;
212 else 219 else
@@ -227,6 +234,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
227 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 234 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
228 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; 235 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
229 236
237 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
238
230 ret = RING_SPACE(evo, 2); 239 ret = RING_SPACE(evo, 2);
231 if (ret) { 240 if (ret) {
232 NV_ERROR(dev, "no space while connecting SOR\n"); 241 NV_ERROR(dev, "no space while connecting SOR\n");
@@ -313,31 +322,6 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
313 encoder->possible_crtcs = entry->heads; 322 encoder->possible_crtcs = entry->heads;
314 encoder->possible_clones = 0; 323 encoder->possible_clones = 0;
315 324
316 if (nv_encoder->dcb->type == OUTPUT_DP) {
317 int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
318 uint32_t tmp;
319
320 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
321 if (!tmp)
322 tmp = nv_rd32(dev, 0x610798 + (or * 8));
323
324 switch ((tmp & 0x00000f00) >> 8) {
325 case 8:
326 case 9:
327 nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
328 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
329 nv_encoder->dp.unk0 = tmp & 0x000001fc;
330 tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
331 nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
332 break;
333 default:
334 break;
335 }
336
337 if (!nv_encoder->dp.mc_unknown)
338 nv_encoder->dp.mc_unknown = 5;
339 }
340
341 drm_mode_connector_attach_encoder(connector, encoder); 325 drm_mode_connector_attach_encoder(connector, encoder);
342 return 0; 326 return 0;
343} 327}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index af32daecd1ed..9da23838e63e 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,7 +51,7 @@ void
51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) 51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{ 52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nouveau_mm *mm = dev_priv->engine.vram.mm; 54 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
55 struct nouveau_mm_node *this; 55 struct nouveau_mm_node *this;
56 struct nouveau_mem *mem; 56 struct nouveau_mem *mem;
57 57
@@ -82,7 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
82 u32 memtype, struct nouveau_mem **pmem) 82 u32 memtype, struct nouveau_mem **pmem)
83{ 83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private; 84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nouveau_mm *mm = dev_priv->engine.vram.mm; 85 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
86 struct nouveau_mm_node *r; 86 struct nouveau_mm_node *r;
87 struct nouveau_mem *mem; 87 struct nouveau_mem *mem;
88 int comp = (memtype & 0x300) >> 8; 88 int comp = (memtype & 0x300) >> 8;
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index e4b2b9e934b2..618c144b7a30 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -27,178 +27,316 @@
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30/* This is actually a lot more complex than it appears here, but hopefully 30static u32 read_clk(struct drm_device *, int, bool);
31 * this should be able to deal with what the VBIOS leaves for us.. 31static u32 read_pll(struct drm_device *, int, u32);
32 *
33 * If not, well, I'll jump off that bridge when I come to it.
34 */
35 32
36struct nva3_pm_state { 33static u32
37 enum pll_types type; 34read_vco(struct drm_device *dev, int clk)
38 u32 src0; 35{
39 u32 src1; 36 u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
40 u32 ctrl; 37 if ((sctl & 0x00000030) != 0x00000030)
41 u32 coef; 38 return read_pll(dev, 0x41, 0x00e820);
42 u32 old_pnm; 39 return read_pll(dev, 0x42, 0x00e8a0);
43 u32 new_pnm; 40}
44 u32 new_div;
45};
46 41
47static int 42static u32
48nva3_pm_pll_offset(u32 id) 43read_clk(struct drm_device *dev, int clk, bool ignore_en)
49{ 44{
50 static const u32 pll_map[] = { 45 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 0x00, PLL_CORE, 46 u32 sctl, sdiv, sclk;
52 0x01, PLL_SHADER, 47
53 0x02, PLL_MEMORY, 48 /* refclk for the 0xe8xx plls is a fixed frequency */
54 0x00, 0x00 49 if (clk >= 0x40) {
55 }; 50 if (dev_priv->chipset == 0xaf) {
56 const u32 *map = pll_map; 51 /* no joke.. seriously.. sigh.. */
57 52 return nv_rd32(dev, 0x00471c) * 1000;
58 while (map[1]) { 53 }
59 if (id == map[1]) 54
60 return map[0]; 55 return dev_priv->crystal;
61 map += 2;
62 } 56 }
63 57
64 return -ENOENT; 58 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
59 if (!ignore_en && !(sctl & 0x00000100))
60 return 0;
61
62 switch (sctl & 0x00003000) {
63 case 0x00000000:
64 return dev_priv->crystal;
65 case 0x00002000:
66 if (sctl & 0x00000040)
67 return 108000;
68 return 100000;
69 case 0x00003000:
70 sclk = read_vco(dev, clk);
71 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
72 return (sclk * 2) / sdiv;
73 default:
74 return 0;
75 }
65} 76}
66 77
67int 78static u32
68nva3_pm_clock_get(struct drm_device *dev, u32 id) 79read_pll(struct drm_device *dev, int clk, u32 pll)
80{
81 u32 ctrl = nv_rd32(dev, pll + 0);
82 u32 sclk = 0, P = 1, N = 1, M = 1;
83
84 if (!(ctrl & 0x00000008)) {
85 if (ctrl & 0x00000001) {
86 u32 coef = nv_rd32(dev, pll + 4);
87 M = (coef & 0x000000ff) >> 0;
88 N = (coef & 0x0000ff00) >> 8;
89 P = (coef & 0x003f0000) >> 16;
90
91 /* no post-divider on these.. */
92 if ((pll & 0x00ff00) == 0x00e800)
93 P = 1;
94
95 sclk = read_clk(dev, 0x00 + clk, false);
96 }
97 } else {
98 sclk = read_clk(dev, 0x10 + clk, false);
99 }
100
101 return sclk * N / (M * P);
102}
103
104struct creg {
105 u32 clk;
106 u32 pll;
107};
108
109static int
110calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
69{ 111{
70 u32 src0, src1, ctrl, coef; 112 struct pll_lims limits;
71 struct pll_lims pll; 113 u32 oclk, sclk, sdiv;
72 int ret, off; 114 int P, N, M, diff;
73 int P, N, M; 115 int ret;
116
117 reg->pll = 0;
118 reg->clk = 0;
119 if (!khz) {
120 NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk);
121 return 0;
122 }
74 123
75 ret = get_pll_limits(dev, id, &pll); 124 switch (khz) {
125 case 27000:
126 reg->clk = 0x00000100;
127 return khz;
128 case 100000:
129 reg->clk = 0x00002100;
130 return khz;
131 case 108000:
132 reg->clk = 0x00002140;
133 return khz;
134 default:
135 sclk = read_vco(dev, clk);
136 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
137 /* if the clock has a PLL attached, and we can get a within
138 * [-2, 3) MHz of a divider, we'll disable the PLL and use
139 * the divider instead.
140 *
141 * divider can go as low as 2, limited here because NVIDIA
142 * and the VBIOS on my NVA8 seem to prefer using the PLL
143 * for 810MHz - is there a good reason?
144 */
145 if (sdiv > 4) {
146 oclk = (sclk * 2) / sdiv;
147 diff = khz - oclk;
148 if (!pll || (diff >= -2000 && diff < 3000)) {
149 reg->clk = (((sdiv - 2) << 16) | 0x00003100);
150 return oclk;
151 }
152 }
153
154 if (!pll) {
155 NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk);
156 return -ERANGE;
157 }
158
159 break;
160 }
161
162 ret = get_pll_limits(dev, pll, &limits);
76 if (ret) 163 if (ret)
77 return ret; 164 return ret;
78 165
79 off = nva3_pm_pll_offset(id); 166 limits.refclk = read_clk(dev, clk - 0x10, true);
80 if (off < 0) 167 if (!limits.refclk)
81 return off; 168 return -EINVAL;
169
170 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
171 if (ret >= 0) {
172 reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
173 reg->pll = (P << 16) | (N << 8) | M;
174 }
175 return ret;
176}
177
178static void
179prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
180{
181 const u32 src0 = 0x004120 + (clk * 4);
182 const u32 src1 = 0x004160 + (clk * 4);
183 const u32 ctrl = pll + 0;
184 const u32 coef = pll + 4;
185 u32 cntl;
186
187 if (!reg->clk && !reg->pll) {
188 NV_DEBUG(dev, "no clock for %02x\n", clk);
189 return;
190 }
82 191
83 src0 = nv_rd32(dev, 0x4120 + (off * 4)); 192 cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
84 src1 = nv_rd32(dev, 0x4160 + (off * 4)); 193 if (reg->pll) {
85 ctrl = nv_rd32(dev, pll.reg + 0); 194 nv_mask(dev, src0, 0x00000101, 0x00000101);
86 coef = nv_rd32(dev, pll.reg + 4); 195 nv_wr32(dev, coef, reg->pll);
87 NV_DEBUG(dev, "PLL %02x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 196 nv_wr32(dev, ctrl, cntl | 0x00000015);
88 id, src0, src1, ctrl, coef); 197 nv_mask(dev, src1, 0x00000100, 0x00000000);
198 nv_mask(dev, src1, 0x00000001, 0x00000000);
199 } else {
200 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
201 nv_wr32(dev, ctrl, cntl | 0x0000001d);
202 nv_mask(dev, ctrl, 0x00000001, 0x00000000);
203 nv_mask(dev, src0, 0x00000100, 0x00000000);
204 nv_mask(dev, src0, 0x00000001, 0x00000000);
205 }
206}
89 207
90 if (ctrl & 0x00000008) { 208static void
91 u32 div = ((src1 & 0x003c0000) >> 18) + 1; 209prog_clk(struct drm_device *dev, int clk, struct creg *reg)
92 return (pll.refclk * 2) / div; 210{
211 if (!reg->clk) {
212 NV_DEBUG(dev, "no clock for %02x\n", clk);
213 return;
93 } 214 }
94 215
95 P = (coef & 0x003f0000) >> 16; 216 nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
96 N = (coef & 0x0000ff00) >> 8; 217}
97 M = (coef & 0x000000ff); 218
98 return pll.refclk * N / M / P; 219int
220nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
221{
222 perflvl->core = read_pll(dev, 0x00, 0x4200);
223 perflvl->shader = read_pll(dev, 0x01, 0x4220);
224 perflvl->memory = read_pll(dev, 0x02, 0x4000);
225 perflvl->unka0 = read_clk(dev, 0x20, false);
226 perflvl->vdec = read_clk(dev, 0x21, false);
227 perflvl->daemon = read_clk(dev, 0x25, false);
228 perflvl->copy = perflvl->core;
229 return 0;
99} 230}
100 231
232struct nva3_pm_state {
233 struct creg nclk;
234 struct creg sclk;
235 struct creg mclk;
236 struct creg vdec;
237 struct creg unka0;
238};
239
101void * 240void *
102nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, 241nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
103 u32 id, int khz)
104{ 242{
105 struct nva3_pm_state *pll; 243 struct nva3_pm_state *info;
106 struct pll_lims limits; 244 int ret;
107 int N, M, P, diff;
108 int ret, off;
109 245
110 ret = get_pll_limits(dev, id, &limits); 246 info = kzalloc(sizeof(*info), GFP_KERNEL);
247 if (!info)
248 return ERR_PTR(-ENOMEM);
249
250 ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
111 if (ret < 0) 251 if (ret < 0)
112 return (ret == -ENOENT) ? NULL : ERR_PTR(ret); 252 goto out;
113 253
114 off = nva3_pm_pll_offset(id); 254 ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
115 if (id < 0) 255 if (ret < 0)
116 return ERR_PTR(-EINVAL); 256 goto out;
117 257
258 ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
259 if (ret < 0)
260 goto out;
118 261
119 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 262 ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
120 if (!pll) 263 if (ret < 0)
121 return ERR_PTR(-ENOMEM); 264 goto out;
122 pll->type = id;
123 pll->src0 = 0x004120 + (off * 4);
124 pll->src1 = 0x004160 + (off * 4);
125 pll->ctrl = limits.reg + 0;
126 pll->coef = limits.reg + 4;
127
128 /* If target clock is within [-2, 3) MHz of a divisor, we'll
129 * use that instead of calculating MNP values
130 */
131 pll->new_div = min((limits.refclk * 2) / (khz - 2999), 16);
132 if (pll->new_div) {
133 diff = khz - ((limits.refclk * 2) / pll->new_div);
134 if (diff < -2000 || diff >= 3000)
135 pll->new_div = 0;
136 }
137 265
138 if (!pll->new_div) { 266 ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
139 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); 267 if (ret < 0)
140 if (ret < 0) 268 goto out;
141 return ERR_PTR(ret);
142 269
143 pll->new_pnm = (P << 16) | (N << 8) | M; 270out:
144 pll->new_div = 2 - 1; 271 if (ret < 0) {
145 } else { 272 kfree(info);
146 pll->new_pnm = 0; 273 info = ERR_PTR(ret);
147 pll->new_div--;
148 } 274 }
275 return info;
276}
277
278static bool
279nva3_pm_grcp_idle(void *data)
280{
281 struct drm_device *dev = data;
149 282
150 if ((nv_rd32(dev, pll->src1) & 0x00000101) != 0x00000101) 283 if (!(nv_rd32(dev, 0x400304) & 0x00000001))
151 pll->old_pnm = nv_rd32(dev, pll->coef); 284 return true;
152 return pll; 285 if (nv_rd32(dev, 0x400308) == 0x0050001c)
286 return true;
287 return false;
153} 288}
154 289
155void 290void
156nva3_pm_clock_set(struct drm_device *dev, void *pre_state) 291nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
157{ 292{
158 struct nva3_pm_state *pll = pre_state; 293 struct drm_nouveau_private *dev_priv = dev->dev_private;
159 u32 ctrl = 0; 294 struct nva3_pm_state *info = pre_state;
295 unsigned long flags;
160 296
161 /* For the memory clock, NVIDIA will build a "script" describing 297 /* prevent any new grctx switches from starting */
162 * the reclocking process and ask PDAEMON to execute it. 298 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
163 */ 299 nv_wr32(dev, 0x400324, 0x00000000);
164 if (pll->type == PLL_MEMORY) { 300 nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
165 nv_wr32(dev, 0x100210, 0); 301 /* wait for any pending grctx switches to complete */
166 nv_wr32(dev, 0x1002dc, 1); 302 if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) {
167 nv_wr32(dev, 0x004018, 0x00001000); 303 NV_ERROR(dev, "pm: ctxprog didn't go idle\n");
168 ctrl = 0x18000100; 304 goto cleanup;
169 } 305 }
170 306 /* freeze PFIFO */
171 if (pll->old_pnm || !pll->new_pnm) { 307 nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
172 nv_mask(dev, pll->src1, 0x003c0101, 0x00000101 | 308 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) {
173 (pll->new_div << 18)); 309 NV_ERROR(dev, "pm: fifo didn't go idle\n");
174 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); 310 goto cleanup;
175 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
176 } 311 }
177 312
178 if (pll->new_pnm) { 313 prog_pll(dev, 0x00, 0x004200, &info->nclk);
179 nv_mask(dev, pll->src0, 0x00000101, 0x00000101); 314 prog_pll(dev, 0x01, 0x004220, &info->sclk);
180 nv_wr32(dev, pll->coef, pll->new_pnm); 315 prog_clk(dev, 0x20, &info->unka0);
181 nv_wr32(dev, pll->ctrl, 0x0001001d | ctrl); 316 prog_clk(dev, 0x21, &info->vdec);
182 nv_mask(dev, pll->ctrl, 0x00000010, 0x00000000);
183 nv_mask(dev, pll->ctrl, 0x00020010, 0x00020010);
184 nv_wr32(dev, pll->ctrl, 0x00010015 | ctrl);
185 nv_mask(dev, pll->src1, 0x00000100, 0x00000000);
186 nv_mask(dev, pll->src1, 0x00000001, 0x00000000);
187 if (pll->type == PLL_MEMORY)
188 nv_wr32(dev, 0x4018, 0x10005000);
189 } else {
190 nv_mask(dev, pll->ctrl, 0x00000001, 0x00000000);
191 nv_mask(dev, pll->src0, 0x00000100, 0x00000000);
192 nv_mask(dev, pll->src0, 0x00000001, 0x00000000);
193 if (pll->type == PLL_MEMORY)
194 nv_wr32(dev, 0x4018, 0x1000d000);
195 }
196 317
197 if (pll->type == PLL_MEMORY) { 318 if (info->mclk.clk || info->mclk.pll) {
319 nv_wr32(dev, 0x100210, 0);
320 nv_wr32(dev, 0x1002dc, 1);
321 nv_wr32(dev, 0x004018, 0x00001000);
322 prog_pll(dev, 0x02, 0x004000, &info->mclk);
323 if (nv_rd32(dev, 0x4000) & 0x00000008)
324 nv_wr32(dev, 0x004018, 0x1000d000);
325 else
326 nv_wr32(dev, 0x004018, 0x10005000);
198 nv_wr32(dev, 0x1002dc, 0); 327 nv_wr32(dev, 0x1002dc, 0);
199 nv_wr32(dev, 0x100210, 0x80000000); 328 nv_wr32(dev, 0x100210, 0x80000000);
200 } 329 }
201 330
202 kfree(pll); 331cleanup:
332 /* unfreeze PFIFO */
333 nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
334 /* restore ctxprog to normal */
335 nv_wr32(dev, 0x400324, 0x00000000);
336 nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */
337 /* unblock it if necessary */
338 if (nv_rd32(dev, 0x400308) == 0x0050001c)
339 nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
340 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
341 kfree(info);
203} 342}
204
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 08e6b118f021..5bf55038fd92 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -32,6 +32,30 @@ struct nvc0_fb_priv {
32 dma_addr_t r100c10; 32 dma_addr_t r100c10;
33}; 33};
34 34
35static inline void
36nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
37{
38 u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
39 u32 stat = nv_rd32(dev, subp_base + 0x020);
40
41 if (stat) {
42 NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
43 nv_wr32(dev, subp_base + 0x020, stat);
44 }
45}
46
47static void
48nvc0_mfb_isr(struct drm_device *dev)
49{
50 u32 units = nv_rd32(dev, 0x00017c);
51 while (units) {
52 u32 subp, unit = ffs(units) - 1;
53 for (subp = 0; subp < 2; subp++)
54 nvc0_mfb_subp_isr(dev, unit, subp);
55 units &= ~(1 << unit);
56 }
57}
58
35static void 59static void
36nvc0_fb_destroy(struct drm_device *dev) 60nvc0_fb_destroy(struct drm_device *dev)
37{ 61{
@@ -39,6 +63,8 @@ nvc0_fb_destroy(struct drm_device *dev)
39 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 63 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
40 struct nvc0_fb_priv *priv = pfb->priv; 64 struct nvc0_fb_priv *priv = pfb->priv;
41 65
66 nouveau_irq_unregister(dev, 25);
67
42 if (priv->r100c10_page) { 68 if (priv->r100c10_page) {
43 pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE, 69 pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
44 PCI_DMA_BIDIRECTIONAL); 70 PCI_DMA_BIDIRECTIONAL);
@@ -74,6 +100,7 @@ nvc0_fb_create(struct drm_device *dev)
74 return -EFAULT; 100 return -EFAULT;
75 } 101 }
76 102
103 nouveau_irq_register(dev, 25, nvc0_mfb_isr);
77 return 0; 104 return 0;
78} 105}
79 106
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 6f9f341c3e86..dcbe0d5d0241 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -322,7 +322,7 @@ nvc0_fifo_init(struct drm_device *dev)
322 } 322 }
323 323
324 /* PSUBFIFO[n] */ 324 /* PSUBFIFO[n] */
325 for (i = 0; i < 3; i++) { 325 for (i = 0; i < priv->spoon_nr; i++) {
326 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 326 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
327 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 327 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
328 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ 328 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5b2f6f420468..4b8d0b3f7d2b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -390,7 +390,7 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
390 } 390 }
391 391
392 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918); 392 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
393 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); 393 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
394} 394}
395 395
396static void 396static void
@@ -700,22 +700,6 @@ nvc0_graph_isr(struct drm_device *dev)
700 nv_wr32(dev, 0x400500, 0x00010001); 700 nv_wr32(dev, 0x400500, 0x00010001);
701} 701}
702 702
703static void
704nvc0_runk140_isr(struct drm_device *dev)
705{
706 u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
707
708 while (units) {
709 u32 unit = ffs(units) - 1;
710 u32 reg = 0x140000 + unit * 0x2000;
711 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
712 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
713
714 NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
715 units &= ~(1 << unit);
716 }
717}
718
719static int 703static int
720nvc0_graph_create_fw(struct drm_device *dev, const char *fwname, 704nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
721 struct nvc0_graph_fuc *fuc) 705 struct nvc0_graph_fuc *fuc)
@@ -764,7 +748,6 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
764 } 748 }
765 749
766 nouveau_irq_unregister(dev, 12); 750 nouveau_irq_unregister(dev, 12);
767 nouveau_irq_unregister(dev, 25);
768 751
769 nouveau_gpuobj_ref(NULL, &priv->unk4188b8); 752 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
770 nouveau_gpuobj_ref(NULL, &priv->unk4188b4); 753 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -803,7 +786,6 @@ nvc0_graph_create(struct drm_device *dev)
803 786
804 NVOBJ_ENGINE_ADD(dev, GR, &priv->base); 787 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
805 nouveau_irq_register(dev, 12, nvc0_graph_isr); 788 nouveau_irq_register(dev, 12, nvc0_graph_isr);
806 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
807 789
808 if (nouveau_ctxfw) { 790 if (nouveau_ctxfw) {
809 NV_INFO(dev, "PGRAPH: using external firmware\n"); 791 NV_INFO(dev, "PGRAPH: using external firmware\n");
@@ -864,6 +846,9 @@ nvc0_graph_create(struct drm_device *dev)
864 case 0xce: /* 4/4/0/0, 4 */ 846 case 0xce: /* 4/4/0/0, 4 */
865 priv->magic_not_rop_nr = 0x03; 847 priv->magic_not_rop_nr = 0x03;
866 break; 848 break;
849 case 0xcf: /* 4/0/0/0, 3 */
850 priv->magic_not_rop_nr = 0x03;
851 break;
867 } 852 }
868 853
869 if (!priv->magic_not_rop_nr) { 854 if (!priv->magic_not_rop_nr) {
@@ -889,20 +874,3 @@ error:
889 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR); 874 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
890 return ret; 875 return ret;
891} 876}
892
893MODULE_FIRMWARE("nouveau/nvc0_fuc409c");
894MODULE_FIRMWARE("nouveau/nvc0_fuc409d");
895MODULE_FIRMWARE("nouveau/nvc0_fuc41ac");
896MODULE_FIRMWARE("nouveau/nvc0_fuc41ad");
897MODULE_FIRMWARE("nouveau/nvc3_fuc409c");
898MODULE_FIRMWARE("nouveau/nvc3_fuc409d");
899MODULE_FIRMWARE("nouveau/nvc3_fuc41ac");
900MODULE_FIRMWARE("nouveau/nvc3_fuc41ad");
901MODULE_FIRMWARE("nouveau/nvc4_fuc409c");
902MODULE_FIRMWARE("nouveau/nvc4_fuc409d");
903MODULE_FIRMWARE("nouveau/nvc4_fuc41ac");
904MODULE_FIRMWARE("nouveau/nvc4_fuc41ad");
905MODULE_FIRMWARE("nouveau/fuc409c");
906MODULE_FIRMWARE("nouveau/fuc409d");
907MODULE_FIRMWARE("nouveau/fuc41ac");
908MODULE_FIRMWARE("nouveau/fuc41ad");
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 55689e997286..636fe9812f79 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -82,6 +82,7 @@ nvc0_graph_class(struct drm_device *dev)
82 case 0xc3: 82 case 0xc3:
83 case 0xc4: 83 case 0xc4:
84 case 0xce: /* guess, mmio trace shows only 0x9097 state */ 84 case 0xce: /* guess, mmio trace shows only 0x9097 state */
85 case 0xcf: /* guess, mmio trace shows only 0x9097 state */
85 return 0x9097; 86 return 0x9097;
86 case 0xc1: 87 case 0xc1:
87 return 0x9197; 88 return 0x9197;
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 31018eaf5279..dd0e6a736b3b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1678,7 +1678,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1678 nv_wr32(dev, 0x419c04, 0x00000006); 1678 nv_wr32(dev, 0x419c04, 0x00000006);
1679 nv_wr32(dev, 0x419c08, 0x00000002); 1679 nv_wr32(dev, 0x419c08, 0x00000002);
1680 nv_wr32(dev, 0x419c20, 0x00000000); 1680 nv_wr32(dev, 0x419c20, 0x00000000);
1681 nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048 1681 if (chipset == 0xce || chipset == 0xcf)
1682 nv_wr32(dev, 0x419cb0, 0x00020048);
1683 else
1684 nv_wr32(dev, 0x419cb0, 0x00060048);
1682 nv_wr32(dev, 0x419ce8, 0x00000000); 1685 nv_wr32(dev, 0x419ce8, 0x00000000);
1683 nv_wr32(dev, 0x419cf4, 0x00000183); 1686 nv_wr32(dev, 0x419cf4, 0x00000183);
1684 nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000); 1687 nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
@@ -1783,11 +1786,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1783 nv_wr32(dev, 0x40587c, 0x00000000); 1786 nv_wr32(dev, 0x40587c, 0x00000000);
1784 1787
1785 if (1) { 1788 if (1) {
1786 const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0, 1789 u8 tpnr[GPC_MAX], data[TP_MAX];
1787 16, 0, 0, 0, 0, 0, 8, 0 };
1788 u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
1789 u8 tpnr[GPC_MAX];
1790 u8 data[TP_MAX];
1791 1790
1792 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1791 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1793 memset(data, 0x1f, sizeof(data)); 1792 memset(data, 0x1f, sizeof(data));
@@ -1801,7 +1800,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1801 data[tp] = gpc; 1800 data[tp] = gpc;
1802 } 1801 }
1803 1802
1804 for (i = 0; i < max / 4; i++) 1803 for (i = 0; i < 4; i++)
1805 nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]); 1804 nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
1806 } 1805 }
1807 1806
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
index 0ec2add72a76..06f5e26d1e0f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -77,6 +77,11 @@ chipsets:
77.b16 nvc0_gpc_mmio_tail 77.b16 nvc0_gpc_mmio_tail
78.b16 nvc0_tpc_mmio_head 78.b16 nvc0_tpc_mmio_head
79.b16 nvc3_tpc_mmio_tail 79.b16 nvc3_tpc_mmio_tail
80.b8 0xcf 0 0 0
81.b16 nvc0_gpc_mmio_head
82.b16 nvc0_gpc_mmio_tail
83.b16 nvc0_tpc_mmio_head
84.b16 nvcf_tpc_mmio_tail
80.b8 0 0 0 0 85.b8 0 0 0 0
81 86
82// GPC mmio lists 87// GPC mmio lists
@@ -134,8 +139,9 @@ mmctx_data(0x000750, 2)
134nvc0_tpc_mmio_tail: 139nvc0_tpc_mmio_tail:
135mmctx_data(0x000758, 1) 140mmctx_data(0x000758, 1)
136mmctx_data(0x0002c4, 1) 141mmctx_data(0x0002c4, 1)
137mmctx_data(0x0004bc, 1)
138mmctx_data(0x0006e0, 1) 142mmctx_data(0x0006e0, 1)
143nvcf_tpc_mmio_tail:
144mmctx_data(0x0004bc, 1)
139nvc3_tpc_mmio_tail: 145nvc3_tpc_mmio_tail:
140mmctx_data(0x000544, 1) 146mmctx_data(0x000544, 1)
141nvc1_tpc_mmio_tail: 147nvc1_tpc_mmio_tail:
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
index 1896c898f5ba..6f820324480e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -25,23 +25,26 @@ uint32_t nvc0_grgpc_data[] = {
25 0x00000000, 25 0x00000000,
26 0x00000000, 26 0x00000000,
27 0x000000c0, 27 0x000000c0,
28 0x011000b0, 28 0x011c00bc,
29 0x01640114, 29 0x01700120,
30 0x000000c1, 30 0x000000c1,
31 0x011400b0, 31 0x012000bc,
32 0x01780114, 32 0x01840120,
33 0x000000c3, 33 0x000000c3,
34 0x011000b0, 34 0x011c00bc,
35 0x01740114, 35 0x01800120,
36 0x000000c4, 36 0x000000c4,
37 0x011000b0, 37 0x011c00bc,
38 0x01740114, 38 0x01800120,
39 0x000000c8, 39 0x000000c8,
40 0x011000b0, 40 0x011c00bc,
41 0x01640114, 41 0x01700120,
42 0x000000ce, 42 0x000000ce,
43 0x011000b0, 43 0x011c00bc,
44 0x01740114, 44 0x01800120,
45 0x000000cf,
46 0x011c00bc,
47 0x017c0120,
45 0x00000000, 48 0x00000000,
46 0x00000380, 49 0x00000380,
47 0x14000400, 50 0x14000400,
@@ -90,8 +93,8 @@ uint32_t nvc0_grgpc_data[] = {
90 0x04000750, 93 0x04000750,
91 0x00000758, 94 0x00000758,
92 0x000002c4, 95 0x000002c4,
93 0x000004bc,
94 0x000006e0, 96 0x000006e0,
97 0x000004bc,
95 0x00000544, 98 0x00000544,
96}; 99};
97 100
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
index a1a599124cf4..e4f8c7e89ddd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -56,6 +56,9 @@ chipsets:
56.b8 0xce 0 0 0 56.b8 0xce 0 0 0
57.b16 nvc0_hub_mmio_head 57.b16 nvc0_hub_mmio_head
58.b16 nvc0_hub_mmio_tail 58.b16 nvc0_hub_mmio_tail
59.b8 0xcf 0 0 0
60.b16 nvc0_hub_mmio_head
61.b16 nvc0_hub_mmio_tail
59.b8 0 0 0 0 62.b8 0 0 0 0
60 63
61nvc0_hub_mmio_head: 64nvc0_hub_mmio_head:
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
index b3b541b6d044..241d3263f1e5 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -23,17 +23,19 @@ uint32_t nvc0_grhub_data[] = {
23 0x00000000, 23 0x00000000,
24 0x00000000, 24 0x00000000,
25 0x000000c0, 25 0x000000c0,
26 0x012c0090, 26 0x01340098,
27 0x000000c1, 27 0x000000c1,
28 0x01300090, 28 0x01380098,
29 0x000000c3, 29 0x000000c3,
30 0x012c0090, 30 0x01340098,
31 0x000000c4, 31 0x000000c4,
32 0x012c0090, 32 0x01340098,
33 0x000000c8, 33 0x000000c8,
34 0x012c0090, 34 0x01340098,
35 0x000000ce, 35 0x000000ce,
36 0x012c0090, 36 0x01340098,
37 0x000000cf,
38 0x01340098,
37 0x00000000, 39 0x00000000,
38 0x0417e91c, 40 0x0417e91c,
39 0x04400204, 41 0x04400204,
@@ -190,8 +192,6 @@ uint32_t nvc0_grhub_data[] = {
190 0x00000000, 192 0x00000000,
191 0x00000000, 193 0x00000000,
192 0x00000000, 194 0x00000000,
193 0x00000000,
194 0x00000000,
195}; 195};
196 196
197uint32_t nvc0_grhub_code[] = { 197uint32_t nvc0_grhub_code[] = {
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
new file mode 100644
index 000000000000..929aded35cb5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h"
28#include "nouveau_pm.h"
29
30static u32 read_div(struct drm_device *, int, u32, u32);
31static u32 read_pll(struct drm_device *, u32);
32
33static u32
34read_vco(struct drm_device *dev, u32 dsrc)
35{
36 u32 ssrc = nv_rd32(dev, dsrc);
37 if (!(ssrc & 0x00000100))
38 return read_pll(dev, 0x00e800);
39 return read_pll(dev, 0x00e820);
40}
41
42static u32
43read_pll(struct drm_device *dev, u32 pll)
44{
45 u32 ctrl = nv_rd32(dev, pll + 0);
46 u32 coef = nv_rd32(dev, pll + 4);
47 u32 P = (coef & 0x003f0000) >> 16;
48 u32 N = (coef & 0x0000ff00) >> 8;
49 u32 M = (coef & 0x000000ff) >> 0;
50 u32 sclk, doff;
51
52 if (!(ctrl & 0x00000001))
53 return 0;
54
55 switch (pll & 0xfff000) {
56 case 0x00e000:
57 sclk = 27000;
58 P = 1;
59 break;
60 case 0x137000:
61 doff = (pll - 0x137000) / 0x20;
62 sclk = read_div(dev, doff, 0x137120, 0x137140);
63 break;
64 case 0x132000:
65 switch (pll) {
66 case 0x132000:
67 sclk = read_pll(dev, 0x132020);
68 break;
69 case 0x132020:
70 sclk = read_div(dev, 0, 0x137320, 0x137330);
71 break;
72 default:
73 return 0;
74 }
75 break;
76 default:
77 return 0;
78 }
79
80 return sclk * N / M / P;
81}
82
83static u32
84read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
85{
86 u32 ssrc = nv_rd32(dev, dsrc + (doff * 4));
87 u32 sctl = nv_rd32(dev, dctl + (doff * 4));
88
89 switch (ssrc & 0x00000003) {
90 case 0:
91 if ((ssrc & 0x00030000) != 0x00030000)
92 return 27000;
93 return 108000;
94 case 2:
95 return 100000;
96 case 3:
97 if (sctl & 0x80000000) {
98 u32 sclk = read_vco(dev, dsrc + (doff * 4));
99 u32 sdiv = (sctl & 0x0000003f) + 2;
100 return (sclk * 2) / sdiv;
101 }
102
103 return read_vco(dev, dsrc + (doff * 4));
104 default:
105 return 0;
106 }
107}
108
109static u32
110read_mem(struct drm_device *dev)
111{
112 u32 ssel = nv_rd32(dev, 0x1373f0);
113 if (ssel & 0x00000001)
114 return read_div(dev, 0, 0x137300, 0x137310);
115 return read_pll(dev, 0x132000);
116}
117
118static u32
119read_clk(struct drm_device *dev, int clk)
120{
121 u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4));
122 u32 ssel = nv_rd32(dev, 0x137100);
123 u32 sclk, sdiv;
124
125 if (ssel & (1 << clk)) {
126 if (clk < 7)
127 sclk = read_pll(dev, 0x137000 + (clk * 0x20));
128 else
129 sclk = read_pll(dev, 0x1370e0);
130 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
131 } else {
132 sclk = read_div(dev, clk, 0x137160, 0x1371d0);
133 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
134 }
135
136 if (sctl & 0x80000000)
137 return (sclk * 2) / sdiv;
138 return sclk;
139}
140
141int
142nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
143{
144 perflvl->shader = read_clk(dev, 0x00);
145 perflvl->core = perflvl->shader / 2;
146 perflvl->memory = read_mem(dev);
147 perflvl->rop = read_clk(dev, 0x01);
148 perflvl->hub07 = read_clk(dev, 0x02);
149 perflvl->hub06 = read_clk(dev, 0x07);
150 perflvl->hub01 = read_clk(dev, 0x08);
151 perflvl->copy = read_clk(dev, 0x09);
152 perflvl->daemon = read_clk(dev, 0x0c);
153 perflvl->vdec = read_clk(dev, 0x0e);
154 return 0;
155}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index e45a24d84e98..edbfe9360ae2 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,7 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_mem **pmem) 61 u32 type, struct nouveau_mem **pmem)
62{ 62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_mm *mm = dev_priv->engine.vram.mm; 64 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
65 struct nouveau_mm_node *r; 65 struct nouveau_mm_node *r;
66 struct nouveau_mem *mem; 66 struct nouveau_mem *mem;
67 int ret; 67 int ret;
@@ -106,12 +106,50 @@ nvc0_vram_init(struct drm_device *dev)
106 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 106 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
107 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 107 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
108 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 108 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
109 u32 length; 109 u32 parts = nv_rd32(dev, 0x121c74);
110 u32 bsize = nv_rd32(dev, 0x10f20c);
111 u32 offset, length;
112 bool uniform = true;
113 int ret, i;
110 114
111 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
112 dev_priv->vram_size *= nv_rd32(dev, 0x121c74); 116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
113 117
114 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail; 118 /* read amount of vram attached to each memory controller */
119 for (i = 0; i < parts; i++) {
120 u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000));
121 if (psize != bsize) {
122 if (psize < bsize)
123 bsize = psize;
124 uniform = false;
125 }
126
127 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize);
128
129 dev_priv->vram_size += (u64)psize << 20;
130 }
131
132 /* if all controllers have the same amount attached, there's no holes */
133 if (uniform) {
134 offset = rsvd_head;
135 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
136 return nouveau_mm_init(&vram->mm, offset, length, 1);
137 }
115 138
116 return nouveau_mm_init(&vram->mm, rsvd_head, length, 1); 139 /* otherwise, address lowest common amount from 0GiB */
140 ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
141 if (ret)
142 return ret;
143
144 /* and the rest starting from (8GiB + common_size) */
145 offset = (0x0200000000ULL >> 12) + (bsize << 8);
146 length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
147
148 ret = nouveau_mm_init(&vram->mm, offset, length, 0);
149 if (ret) {
150 nouveau_mm_fini(&vram->mm);
151 return ret;
152 }
153
154 return 0;
117} 155}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
new file mode 100644
index 000000000000..23d63b4b3d77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -0,0 +1,1473 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/dma-mapping.h>
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_connector.h"
32#include "nouveau_encoder.h"
33#include "nouveau_crtc.h"
34#include "nouveau_dma.h"
35#include "nouveau_fb.h"
36#include "nv50_display.h"
37
38struct nvd0_display {
39 struct nouveau_gpuobj *mem;
40 struct {
41 dma_addr_t handle;
42 u32 *ptr;
43 } evo[1];
44
45 struct tasklet_struct tasklet;
46 u32 modeset;
47};
48
49static struct nvd0_display *
50nvd0_display(struct drm_device *dev)
51{
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 return dev_priv->engine.display.priv;
54}
55
56static inline int
57evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
58{
59 int ret = 0;
60 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
61 nv_wr32(dev, 0x610704 + (id * 0x10), data);
62 nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
63 if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
64 ret = -EBUSY;
65 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
66 return ret;
67}
68
69static u32 *
70evo_wait(struct drm_device *dev, int id, int nr)
71{
72 struct nvd0_display *disp = nvd0_display(dev);
73 u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
74
75 if (put + nr >= (PAGE_SIZE / 4)) {
76 disp->evo[id].ptr[put] = 0x20000000;
77
78 nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
79 if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
80 NV_ERROR(dev, "evo %d dma stalled\n", id);
81 return NULL;
82 }
83
84 put = 0;
85 }
86
87 return disp->evo[id].ptr + put;
88}
89
90static void
91evo_kick(u32 *push, struct drm_device *dev, int id)
92{
93 struct nvd0_display *disp = nvd0_display(dev);
94 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
95}
96
97#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
98#define evo_data(p,d) *((p)++) = (d)
99
100static struct drm_crtc *
101nvd0_display_crtc_get(struct drm_encoder *encoder)
102{
103 return nouveau_encoder(encoder)->crtc;
104}
105
106/******************************************************************************
107 * CRTC
108 *****************************************************************************/
109static int
110nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
111{
112 struct drm_device *dev = nv_crtc->base.dev;
113 u32 *push, mode;
114
115 mode = 0x00000000;
116 if (on) {
117 /* 0x11: 6bpc dynamic 2x2
118 * 0x13: 8bpc dynamic 2x2
119 * 0x19: 6bpc static 2x2
120 * 0x1b: 8bpc static 2x2
121 * 0x21: 6bpc temporal
122 * 0x23: 8bpc temporal
123 */
124 mode = 0x00000011;
125 }
126
127 push = evo_wait(dev, 0, 4);
128 if (push) {
129 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
130 evo_data(push, mode);
131 if (update) {
132 evo_mthd(push, 0x0080, 1);
133 evo_data(push, 0x00000000);
134 }
135 evo_kick(push, dev, 0);
136 }
137
138 return 0;
139}
140
141static int
142nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
143{
144 struct drm_display_mode *mode = &nv_crtc->base.mode;
145 struct drm_device *dev = nv_crtc->base.dev;
146 struct nouveau_connector *nv_connector;
147 u32 *push, outX, outY;
148
149 outX = mode->hdisplay;
150 outY = mode->vdisplay;
151
152 nv_connector = nouveau_crtc_connector_get(nv_crtc);
153 if (nv_connector && nv_connector->native_mode) {
154 struct drm_display_mode *native = nv_connector->native_mode;
155 u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
156 u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
157
158 switch (type) {
159 case DRM_MODE_SCALE_ASPECT:
160 if (xratio > yratio) {
161 outX = (mode->hdisplay * yratio) >> 19;
162 outY = (mode->vdisplay * yratio) >> 19;
163 } else {
164 outX = (mode->hdisplay * xratio) >> 19;
165 outY = (mode->vdisplay * xratio) >> 19;
166 }
167 break;
168 case DRM_MODE_SCALE_FULLSCREEN:
169 outX = native->hdisplay;
170 outY = native->vdisplay;
171 break;
172 default:
173 break;
174 }
175 }
176
177 push = evo_wait(dev, 0, 16);
178 if (push) {
179 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
180 evo_data(push, (outY << 16) | outX);
181 evo_data(push, (outY << 16) | outX);
182 evo_data(push, (outY << 16) | outX);
183 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
184 evo_data(push, 0x00000000);
185 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
186 evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
187 if (update) {
188 evo_mthd(push, 0x0080, 1);
189 evo_data(push, 0x00000000);
190 }
191 evo_kick(push, dev, 0);
192 }
193
194 return 0;
195}
196
197static int
198nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
199 int x, int y, bool update)
200{
201 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
202 u32 *push;
203
204 push = evo_wait(fb->dev, 0, 16);
205 if (push) {
206 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
207 evo_data(push, nvfb->nvbo->bo.offset >> 8);
208 evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
209 evo_data(push, (fb->height << 16) | fb->width);
210 evo_data(push, nvfb->r_pitch);
211 evo_data(push, nvfb->r_format);
212 evo_data(push, nvfb->r_dma);
213 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
214 evo_data(push, (y << 16) | x);
215 if (update) {
216 evo_mthd(push, 0x0080, 1);
217 evo_data(push, 0x00000000);
218 }
219 evo_kick(push, fb->dev, 0);
220 }
221
222 nv_crtc->fb.tile_flags = nvfb->r_dma;
223 return 0;
224}
225
226static void
227nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
228{
229 struct drm_device *dev = nv_crtc->base.dev;
230 u32 *push = evo_wait(dev, 0, 16);
231 if (push) {
232 if (show) {
233 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
234 evo_data(push, 0x85000000);
235 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
236 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
237 evo_data(push, NvEvoVRAM);
238 } else {
239 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
240 evo_data(push, 0x05000000);
241 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
242 evo_data(push, 0x00000000);
243 }
244
245 if (update) {
246 evo_mthd(push, 0x0080, 1);
247 evo_data(push, 0x00000000);
248 }
249
250 evo_kick(push, dev, 0);
251 }
252}
253
254static void
255nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
256{
257}
258
259static void
260nvd0_crtc_prepare(struct drm_crtc *crtc)
261{
262 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
263 u32 *push;
264
265 push = evo_wait(crtc->dev, 0, 2);
266 if (push) {
267 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
268 evo_data(push, 0x00000000);
269 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
270 evo_data(push, 0x03000000);
271 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
272 evo_data(push, 0x00000000);
273 evo_kick(push, crtc->dev, 0);
274 }
275
276 nvd0_crtc_cursor_show(nv_crtc, false, false);
277}
278
279static void
280nvd0_crtc_commit(struct drm_crtc *crtc)
281{
282 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
283 u32 *push;
284
285 push = evo_wait(crtc->dev, 0, 32);
286 if (push) {
287 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
288 evo_data(push, nv_crtc->fb.tile_flags);
289 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
290 evo_data(push, 0x83000000);
291 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
292 evo_data(push, 0x00000000);
293 evo_data(push, 0x00000000);
294 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
295 evo_data(push, NvEvoVRAM);
296 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
297 evo_data(push, 0xffffff00);
298 evo_kick(push, crtc->dev, 0);
299 }
300
301 nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
302}
303
304static bool
305nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
306 struct drm_display_mode *adjusted_mode)
307{
308 return true;
309}
310
311static int
312nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
313{
314 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
315 int ret;
316
317 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
318 if (ret)
319 return ret;
320
321 if (old_fb) {
322 nvfb = nouveau_framebuffer(old_fb);
323 nouveau_bo_unpin(nvfb->nvbo);
324 }
325
326 return 0;
327}
328
329static int
330nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
331 struct drm_display_mode *mode, int x, int y,
332 struct drm_framebuffer *old_fb)
333{
334 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
335 struct nouveau_connector *nv_connector;
336 u32 htotal = mode->htotal;
337 u32 vtotal = mode->vtotal;
338 u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
339 u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
340 u32 hfrntp = mode->hsync_start - mode->hdisplay;
341 u32 vfrntp = mode->vsync_start - mode->vdisplay;
342 u32 hbackp = mode->htotal - mode->hsync_end;
343 u32 vbackp = mode->vtotal - mode->vsync_end;
344 u32 hss2be = hsyncw + hbackp;
345 u32 vss2be = vsyncw + vbackp;
346 u32 hss2de = htotal - hfrntp;
347 u32 vss2de = vtotal - vfrntp;
348 u32 syncs, *push;
349 int ret;
350
351 syncs = 0x00000001;
352 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
353 syncs |= 0x00000008;
354 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
355 syncs |= 0x00000010;
356
357 ret = nvd0_crtc_swap_fbs(crtc, old_fb);
358 if (ret)
359 return ret;
360
361 push = evo_wait(crtc->dev, 0, 64);
362 if (push) {
363 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
364 evo_data(push, 0x00000000);
365 evo_data(push, (vtotal << 16) | htotal);
366 evo_data(push, (vsyncw << 16) | hsyncw);
367 evo_data(push, (vss2be << 16) | hss2be);
368 evo_data(push, (vss2de << 16) | hss2de);
369 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
370 evo_data(push, 0x00000000); /* ??? */
371 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
372 evo_data(push, mode->clock * 1000);
373 evo_data(push, 0x00200000); /* ??? */
374 evo_data(push, mode->clock * 1000);
375 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
376 evo_data(push, syncs);
377 evo_kick(push, crtc->dev, 0);
378 }
379
380 nv_connector = nouveau_crtc_connector_get(nv_crtc);
381 nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
382 nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
383 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
384 return 0;
385}
386
387static int
388nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
389 struct drm_framebuffer *old_fb)
390{
391 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
392 int ret;
393
394 if (!crtc->fb) {
395 NV_DEBUG_KMS(crtc->dev, "No FB bound\n");
396 return 0;
397 }
398
399 ret = nvd0_crtc_swap_fbs(crtc, old_fb);
400 if (ret)
401 return ret;
402
403 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
404 return 0;
405}
406
407static int
408nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
409 struct drm_framebuffer *fb, int x, int y,
410 enum mode_set_atomic state)
411{
412 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
413 nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
414 return 0;
415}
416
417static void
418nvd0_crtc_lut_load(struct drm_crtc *crtc)
419{
420 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
421 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
422 int i;
423
424 for (i = 0; i < 256; i++) {
425 writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
426 writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
427 writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
428 }
429}
430
431static int
432nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
433 uint32_t handle, uint32_t width, uint32_t height)
434{
435 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
436 struct drm_device *dev = crtc->dev;
437 struct drm_gem_object *gem;
438 struct nouveau_bo *nvbo;
439 bool visible = (handle != 0);
440 int i, ret = 0;
441
442 if (visible) {
443 if (width != 64 || height != 64)
444 return -EINVAL;
445
446 gem = drm_gem_object_lookup(dev, file_priv, handle);
447 if (unlikely(!gem))
448 return -ENOENT;
449 nvbo = nouveau_gem_object(gem);
450
451 ret = nouveau_bo_map(nvbo);
452 if (ret == 0) {
453 for (i = 0; i < 64 * 64; i++) {
454 u32 v = nouveau_bo_rd32(nvbo, i);
455 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
456 }
457 nouveau_bo_unmap(nvbo);
458 }
459
460 drm_gem_object_unreference_unlocked(gem);
461 }
462
463 if (visible != nv_crtc->cursor.visible) {
464 nvd0_crtc_cursor_show(nv_crtc, visible, true);
465 nv_crtc->cursor.visible = visible;
466 }
467
468 return ret;
469}
470
471static int
472nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
473{
474 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
475 const u32 data = (y << 16) | x;
476
477 nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
478 nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
479 return 0;
480}
481
482static void
483nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
484 uint32_t start, uint32_t size)
485{
486 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
487 u32 end = max(start + size, (u32)256);
488 u32 i;
489
490 for (i = start; i < end; i++) {
491 nv_crtc->lut.r[i] = r[i];
492 nv_crtc->lut.g[i] = g[i];
493 nv_crtc->lut.b[i] = b[i];
494 }
495
496 nvd0_crtc_lut_load(crtc);
497}
498
499static void
500nvd0_crtc_destroy(struct drm_crtc *crtc)
501{
502 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
503 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
504 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
505 nouveau_bo_unmap(nv_crtc->lut.nvbo);
506 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
507 drm_crtc_cleanup(crtc);
508 kfree(crtc);
509}
510
511static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
512 .dpms = nvd0_crtc_dpms,
513 .prepare = nvd0_crtc_prepare,
514 .commit = nvd0_crtc_commit,
515 .mode_fixup = nvd0_crtc_mode_fixup,
516 .mode_set = nvd0_crtc_mode_set,
517 .mode_set_base = nvd0_crtc_mode_set_base,
518 .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
519 .load_lut = nvd0_crtc_lut_load,
520};
521
522static const struct drm_crtc_funcs nvd0_crtc_func = {
523 .cursor_set = nvd0_crtc_cursor_set,
524 .cursor_move = nvd0_crtc_cursor_move,
525 .gamma_set = nvd0_crtc_gamma_set,
526 .set_config = drm_crtc_helper_set_config,
527 .destroy = nvd0_crtc_destroy,
528};
529
530static void
531nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
532{
533}
534
535static void
536nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
537{
538}
539
540static int
541nvd0_crtc_create(struct drm_device *dev, int index)
542{
543 struct nouveau_crtc *nv_crtc;
544 struct drm_crtc *crtc;
545 int ret, i;
546
547 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
548 if (!nv_crtc)
549 return -ENOMEM;
550
551 nv_crtc->index = index;
552 nv_crtc->set_dither = nvd0_crtc_set_dither;
553 nv_crtc->set_scale = nvd0_crtc_set_scale;
554 nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
555 nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
556 for (i = 0; i < 256; i++) {
557 nv_crtc->lut.r[i] = i << 8;
558 nv_crtc->lut.g[i] = i << 8;
559 nv_crtc->lut.b[i] = i << 8;
560 }
561
562 crtc = &nv_crtc->base;
563 drm_crtc_init(dev, crtc, &nvd0_crtc_func);
564 drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
565 drm_mode_crtc_set_gamma_size(crtc, 256);
566
567 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
568 0, 0x0000, &nv_crtc->cursor.nvbo);
569 if (!ret) {
570 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
571 if (!ret)
572 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
573 if (ret)
574 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
575 }
576
577 if (ret)
578 goto out;
579
580 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
581 0, 0x0000, &nv_crtc->lut.nvbo);
582 if (!ret) {
583 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
584 if (!ret)
585 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
586 if (ret)
587 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
588 }
589
590 if (ret)
591 goto out;
592
593 nvd0_crtc_lut_load(crtc);
594
595out:
596 if (ret)
597 nvd0_crtc_destroy(crtc);
598 return ret;
599}
600
601/******************************************************************************
602 * DAC
603 *****************************************************************************/
604static void
605nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
606{
607 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
608 struct drm_device *dev = encoder->dev;
609 int or = nv_encoder->or;
610 u32 dpms_ctrl;
611
612 dpms_ctrl = 0x80000000;
613 if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
614 dpms_ctrl |= 0x00000001;
615 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
616 dpms_ctrl |= 0x00000004;
617
618 nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
619 nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
620 nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
621}
622
623static bool
624nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
625 struct drm_display_mode *adjusted_mode)
626{
627 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
628 struct nouveau_connector *nv_connector;
629
630 nv_connector = nouveau_encoder_connector_get(nv_encoder);
631 if (nv_connector && nv_connector->native_mode) {
632 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
633 int id = adjusted_mode->base.id;
634 *adjusted_mode = *nv_connector->native_mode;
635 adjusted_mode->base.id = id;
636 }
637 }
638
639 return true;
640}
641
642static void
643nvd0_dac_prepare(struct drm_encoder *encoder)
644{
645}
646
647static void
648nvd0_dac_commit(struct drm_encoder *encoder)
649{
650}
651
652static void
653nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
654 struct drm_display_mode *adjusted_mode)
655{
656 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
657 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
658 u32 *push;
659
660 nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
661
662 push = evo_wait(encoder->dev, 0, 4);
663 if (push) {
664 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
665 evo_data(push, 1 << nv_crtc->index);
666 evo_data(push, 0x00ff);
667 evo_kick(push, encoder->dev, 0);
668 }
669
670 nv_encoder->crtc = encoder->crtc;
671}
672
673static void
674nvd0_dac_disconnect(struct drm_encoder *encoder)
675{
676 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
677 struct drm_device *dev = encoder->dev;
678 u32 *push;
679
680 if (nv_encoder->crtc) {
681 nvd0_crtc_prepare(nv_encoder->crtc);
682
683 push = evo_wait(dev, 0, 4);
684 if (push) {
685 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
686 evo_data(push, 0x00000000);
687 evo_mthd(push, 0x0080, 1);
688 evo_data(push, 0x00000000);
689 evo_kick(push, dev, 0);
690 }
691
692 nv_encoder->crtc = NULL;
693 }
694}
695
696static enum drm_connector_status
697nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
698{
699 enum drm_connector_status status = connector_status_disconnected;
700 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
701 struct drm_device *dev = encoder->dev;
702 int or = nv_encoder->or;
703 u32 load;
704
705 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000);
706 udelay(9500);
707 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000);
708
709 load = nv_rd32(dev, 0x61a00c + (or * 0x800));
710 if ((load & 0x38000000) == 0x38000000)
711 status = connector_status_connected;
712
713 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000);
714 return status;
715}
716
717static void
718nvd0_dac_destroy(struct drm_encoder *encoder)
719{
720 drm_encoder_cleanup(encoder);
721 kfree(encoder);
722}
723
724static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
725 .dpms = nvd0_dac_dpms,
726 .mode_fixup = nvd0_dac_mode_fixup,
727 .prepare = nvd0_dac_prepare,
728 .commit = nvd0_dac_commit,
729 .mode_set = nvd0_dac_mode_set,
730 .disable = nvd0_dac_disconnect,
731 .get_crtc = nvd0_display_crtc_get,
732 .detect = nvd0_dac_detect
733};
734
735static const struct drm_encoder_funcs nvd0_dac_func = {
736 .destroy = nvd0_dac_destroy,
737};
738
739static int
740nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
741{
742 struct drm_device *dev = connector->dev;
743 struct nouveau_encoder *nv_encoder;
744 struct drm_encoder *encoder;
745
746 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
747 if (!nv_encoder)
748 return -ENOMEM;
749 nv_encoder->dcb = dcbe;
750 nv_encoder->or = ffs(dcbe->or) - 1;
751
752 encoder = to_drm_encoder(nv_encoder);
753 encoder->possible_crtcs = dcbe->heads;
754 encoder->possible_clones = 0;
755 drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
756 drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
757
758 drm_mode_connector_attach_encoder(connector, encoder);
759 return 0;
760}
761
762/******************************************************************************
763 * SOR
764 *****************************************************************************/
765static void
766nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
767{
768 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
769 struct drm_device *dev = encoder->dev;
770 struct drm_encoder *partner;
771 int or = nv_encoder->or;
772 u32 dpms_ctrl;
773
774 nv_encoder->last_dpms = mode;
775
776 list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
777 struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
778
779 if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
780 continue;
781
782 if (nv_partner != nv_encoder &&
783 nv_partner->dcb->or == nv_encoder->or) {
784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
785 return;
786 break;
787 }
788 }
789
790 dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
791 dpms_ctrl |= 0x80000000;
792
793 nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
794 nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
795 nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
796 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
797}
798
799static bool
800nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
801 struct drm_display_mode *adjusted_mode)
802{
803 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
804 struct nouveau_connector *nv_connector;
805
806 nv_connector = nouveau_encoder_connector_get(nv_encoder);
807 if (nv_connector && nv_connector->native_mode) {
808 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
809 int id = adjusted_mode->base.id;
810 *adjusted_mode = *nv_connector->native_mode;
811 adjusted_mode->base.id = id;
812 }
813 }
814
815 return true;
816}
817
818static void
819nvd0_sor_prepare(struct drm_encoder *encoder)
820{
821}
822
823static void
824nvd0_sor_commit(struct drm_encoder *encoder)
825{
826}
827
828static void
829nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
830 struct drm_display_mode *mode)
831{
832 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
833 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
834 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
835 struct nouveau_connector *nv_connector;
836 struct nvbios *bios = &dev_priv->vbios;
837 u32 mode_ctrl = (1 << nv_crtc->index);
838 u32 *push, or_config;
839
840 nv_connector = nouveau_encoder_connector_get(nv_encoder);
841 switch (nv_encoder->dcb->type) {
842 case OUTPUT_TMDS:
843 if (nv_encoder->dcb->sorconf.link & 1) {
844 if (mode->clock < 165000)
845 mode_ctrl |= 0x00000100;
846 else
847 mode_ctrl |= 0x00000500;
848 } else {
849 mode_ctrl |= 0x00000200;
850 }
851
852 or_config = (mode_ctrl & 0x00000f00) >> 8;
853 if (mode->clock >= 165000)
854 or_config |= 0x0100;
855 break;
856 case OUTPUT_LVDS:
857 or_config = (mode_ctrl & 0x00000f00) >> 8;
858 if (bios->fp_no_ddc) {
859 if (bios->fp.dual_link)
860 or_config |= 0x0100;
861 if (bios->fp.if_is_24bit)
862 or_config |= 0x0200;
863 } else {
864 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
865 if (((u8 *)nv_connector->edid)[121] == 2)
866 or_config |= 0x0100;
867 } else
868 if (mode->clock >= bios->fp.duallink_transition_clk) {
869 or_config |= 0x0100;
870 }
871
872 if (or_config & 0x0100) {
873 if (bios->fp.strapless_is_24bit & 2)
874 or_config |= 0x0200;
875 } else {
876 if (bios->fp.strapless_is_24bit & 1)
877 or_config |= 0x0200;
878 }
879
880 if (nv_connector->base.display_info.bpc == 8)
881 or_config |= 0x0200;
882
883 }
884 break;
885 default:
886 BUG_ON(1);
887 break;
888 }
889
890 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
891
892 push = evo_wait(encoder->dev, 0, 4);
893 if (push) {
894 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
895 evo_data(push, mode_ctrl);
896 evo_data(push, or_config);
897 evo_kick(push, encoder->dev, 0);
898 }
899
900 nv_encoder->crtc = encoder->crtc;
901}
902
903static void
904nvd0_sor_disconnect(struct drm_encoder *encoder)
905{
906 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
907 struct drm_device *dev = encoder->dev;
908 u32 *push;
909
910 if (nv_encoder->crtc) {
911 nvd0_crtc_prepare(nv_encoder->crtc);
912
913 push = evo_wait(dev, 0, 4);
914 if (push) {
915 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
916 evo_data(push, 0x00000000);
917 evo_mthd(push, 0x0080, 1);
918 evo_data(push, 0x00000000);
919 evo_kick(push, dev, 0);
920 }
921
922 nv_encoder->crtc = NULL;
923 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
924 }
925}
926
927static void
928nvd0_sor_destroy(struct drm_encoder *encoder)
929{
930 drm_encoder_cleanup(encoder);
931 kfree(encoder);
932}
933
934static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
935 .dpms = nvd0_sor_dpms,
936 .mode_fixup = nvd0_sor_mode_fixup,
937 .prepare = nvd0_sor_prepare,
938 .commit = nvd0_sor_commit,
939 .mode_set = nvd0_sor_mode_set,
940 .disable = nvd0_sor_disconnect,
941 .get_crtc = nvd0_display_crtc_get,
942};
943
944static const struct drm_encoder_funcs nvd0_sor_func = {
945 .destroy = nvd0_sor_destroy,
946};
947
948static int
949nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
950{
951 struct drm_device *dev = connector->dev;
952 struct nouveau_encoder *nv_encoder;
953 struct drm_encoder *encoder;
954
955 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
956 if (!nv_encoder)
957 return -ENOMEM;
958 nv_encoder->dcb = dcbe;
959 nv_encoder->or = ffs(dcbe->or) - 1;
960 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
961
962 encoder = to_drm_encoder(nv_encoder);
963 encoder->possible_crtcs = dcbe->heads;
964 encoder->possible_clones = 0;
965 drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
966 drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
967
968 drm_mode_connector_attach_encoder(connector, encoder);
969 return 0;
970}
971
972/******************************************************************************
973 * IRQ
974 *****************************************************************************/
975static struct dcb_entry *
976lookup_dcb(struct drm_device *dev, int id, u32 mc)
977{
978 struct drm_nouveau_private *dev_priv = dev->dev_private;
979 int type, or, i;
980
981 if (id < 4) {
982 type = OUTPUT_ANALOG;
983 or = id;
984 } else {
985 switch (mc & 0x00000f00) {
986 case 0x00000000: type = OUTPUT_LVDS; break;
987 case 0x00000100: type = OUTPUT_TMDS; break;
988 case 0x00000200: type = OUTPUT_TMDS; break;
989 case 0x00000500: type = OUTPUT_TMDS; break;
990 default:
991 NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
992 return NULL;
993 }
994
995 or = id - 4;
996 }
997
998 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
999 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
1000 if (dcb->type == type && (dcb->or & (1 << or)))
1001 return dcb;
1002 }
1003
1004 NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
1005 return NULL;
1006}
1007
1008static void
1009nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1010{
1011 struct dcb_entry *dcb;
1012 int i;
1013
1014 for (i = 0; mask && i < 8; i++) {
1015 u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
1016 if (!(mcc & (1 << crtc)))
1017 continue;
1018
1019 dcb = lookup_dcb(dev, i, mcc);
1020 if (!dcb)
1021 continue;
1022
1023 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
1024 }
1025
1026 nv_wr32(dev, 0x6101d4, 0x00000000);
1027 nv_wr32(dev, 0x6109d4, 0x00000000);
1028 nv_wr32(dev, 0x6101d0, 0x80000000);
1029}
1030
1031static void
1032nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1033{
1034 struct dcb_entry *dcb;
1035 u32 or, tmp, pclk;
1036 int i;
1037
1038 for (i = 0; mask && i < 8; i++) {
1039 u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
1040 if (!(mcc & (1 << crtc)))
1041 continue;
1042
1043 dcb = lookup_dcb(dev, i, mcc);
1044 if (!dcb)
1045 continue;
1046
1047 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
1048 }
1049
1050 pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
1051 if (mask & 0x00010000) {
1052 nv50_crtc_set_clock(dev, crtc, pclk);
1053 }
1054
1055 for (i = 0; mask && i < 8; i++) {
1056 u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
1057 u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
1058 if (!(mcp & (1 << crtc)))
1059 continue;
1060
1061 dcb = lookup_dcb(dev, i, mcp);
1062 if (!dcb)
1063 continue;
1064 or = ffs(dcb->or) - 1;
1065
1066 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
1067
1068 nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000);
1069 switch (dcb->type) {
1070 case OUTPUT_ANALOG:
1071 nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000);
1072 break;
1073 case OUTPUT_TMDS:
1074 case OUTPUT_LVDS:
1075 if (cfg & 0x00000100)
1076 tmp = 0x00000101;
1077 else
1078 tmp = 0x00000000;
1079
1080 nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp);
1081 break;
1082 default:
1083 break;
1084 }
1085
1086 break;
1087 }
1088
1089 nv_wr32(dev, 0x6101d4, 0x00000000);
1090 nv_wr32(dev, 0x6109d4, 0x00000000);
1091 nv_wr32(dev, 0x6101d0, 0x80000000);
1092}
1093
1094static void
1095nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1096{
1097 struct dcb_entry *dcb;
1098 int pclk, i;
1099
1100 pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
1101
1102 for (i = 0; mask && i < 8; i++) {
1103 u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
1104 u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
1105 if (!(mcp & (1 << crtc)))
1106 continue;
1107
1108 dcb = lookup_dcb(dev, i, mcp);
1109 if (!dcb)
1110 continue;
1111
1112 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
1113 }
1114
1115 nv_wr32(dev, 0x6101d4, 0x00000000);
1116 nv_wr32(dev, 0x6109d4, 0x00000000);
1117 nv_wr32(dev, 0x6101d0, 0x80000000);
1118}
1119
1120static void
1121nvd0_display_bh(unsigned long data)
1122{
1123 struct drm_device *dev = (struct drm_device *)data;
1124 struct nvd0_display *disp = nvd0_display(dev);
1125 u32 mask, crtc;
1126 int i;
1127
1128 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
1129 NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset);
1130 NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n",
1131 nv_rd32(dev, 0x6101d0),
1132 nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4));
1133 for (i = 0; i < 8; i++) {
1134 NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n",
1135 i < 4 ? "DAC" : "SOR", i,
1136 nv_rd32(dev, 0x640180 + (i * 0x20)),
1137 nv_rd32(dev, 0x660180 + (i * 0x20)));
1138 }
1139 }
1140
1141 mask = nv_rd32(dev, 0x6101d4);
1142 crtc = 0;
1143 if (!mask) {
1144 mask = nv_rd32(dev, 0x6109d4);
1145 crtc = 1;
1146 }
1147
1148 if (disp->modeset & 0x00000001)
1149 nvd0_display_unk1_handler(dev, crtc, mask);
1150 if (disp->modeset & 0x00000002)
1151 nvd0_display_unk2_handler(dev, crtc, mask);
1152 if (disp->modeset & 0x00000004)
1153 nvd0_display_unk4_handler(dev, crtc, mask);
1154}
1155
1156static void
1157nvd0_display_intr(struct drm_device *dev)
1158{
1159 struct nvd0_display *disp = nvd0_display(dev);
1160 u32 intr = nv_rd32(dev, 0x610088);
1161
1162 if (intr & 0x00000002) {
1163 u32 stat = nv_rd32(dev, 0x61009c);
1164 int chid = ffs(stat) - 1;
1165 if (chid >= 0) {
1166 u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
1167 u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
1168 u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
1169
1170 NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
1171 "0x%08x 0x%08x\n",
1172 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1173 nv_wr32(dev, 0x61009c, (1 << chid));
1174 nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
1175 }
1176
1177 intr &= ~0x00000002;
1178 }
1179
1180 if (intr & 0x00100000) {
1181 u32 stat = nv_rd32(dev, 0x6100ac);
1182
1183 if (stat & 0x00000007) {
1184 disp->modeset = stat;
1185 tasklet_schedule(&disp->tasklet);
1186
1187 nv_wr32(dev, 0x6100ac, (stat & 0x00000007));
1188 stat &= ~0x00000007;
1189 }
1190
1191 if (stat) {
1192 NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat);
1193 nv_wr32(dev, 0x6100ac, stat);
1194 }
1195
1196 intr &= ~0x00100000;
1197 }
1198
1199 if (intr & 0x01000000) {
1200 u32 stat = nv_rd32(dev, 0x6100bc);
1201 nv_wr32(dev, 0x6100bc, stat);
1202 intr &= ~0x01000000;
1203 }
1204
1205 if (intr & 0x02000000) {
1206 u32 stat = nv_rd32(dev, 0x6108bc);
1207 nv_wr32(dev, 0x6108bc, stat);
1208 intr &= ~0x02000000;
1209 }
1210
1211 if (intr)
1212 NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
1213}
1214
1215/******************************************************************************
1216 * Init
1217 *****************************************************************************/
1218static void
1219nvd0_display_fini(struct drm_device *dev)
1220{
1221 int i;
1222
1223 /* fini cursors */
1224 for (i = 14; i >= 13; i--) {
1225 if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
1226 continue;
1227
1228 nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
1229 nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
1230 nv_mask(dev, 0x610090, 1 << i, 0x00000000);
1231 nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
1232 }
1233
1234 /* fini master */
1235 if (nv_rd32(dev, 0x610490) & 0x00000010) {
1236 nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
1237 nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
1238 nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
1239 nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
1240 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
1241 }
1242}
1243
1244int
1245nvd0_display_init(struct drm_device *dev)
1246{
1247 struct nvd0_display *disp = nvd0_display(dev);
1248 u32 *push;
1249 int i;
1250
1251 if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
1252 nv_wr32(dev, 0x6100ac, 0x00000100);
1253 nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
1254 if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
1255 NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
1256 nv_rd32(dev, 0x6194e8));
1257 return -EBUSY;
1258 }
1259 }
1260
1261 /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
1262 * work at all unless you do the SOR part below.
1263 */
1264 for (i = 0; i < 3; i++) {
1265 u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800));
1266 nv_wr32(dev, 0x6101c0 + (i * 0x800), dac);
1267 }
1268
1269 for (i = 0; i < 4; i++) {
1270 u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800));
1271 nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
1272 }
1273
1274 for (i = 0; i < 2; i++) {
1275 u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
1276 u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
1277 u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
1278 nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0);
1279 nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1);
1280 nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2);
1281 }
1282
1283 /* point at our hash table / objects, enable interrupts */
1284 nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
1285 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
1286
1287 /* init master */
1288 nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
1289 nv_wr32(dev, 0x610498, 0x00010000);
1290 nv_wr32(dev, 0x61049c, 0x00000001);
1291 nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
1292 nv_wr32(dev, 0x640000, 0x00000000);
1293 nv_wr32(dev, 0x610490, 0x01000013);
1294 if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
1295 NV_ERROR(dev, "PDISP: master 0x%08x\n",
1296 nv_rd32(dev, 0x610490));
1297 return -EBUSY;
1298 }
1299 nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
1300 nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
1301
1302 /* init cursors */
1303 for (i = 13; i <= 14; i++) {
1304 nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
1305 if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
1306 NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
1307 nv_rd32(dev, 0x610490 + (i * 0x10)));
1308 return -EBUSY;
1309 }
1310
1311 nv_mask(dev, 0x610090, 1 << i, 1 << i);
1312 nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
1313 }
1314
1315 push = evo_wait(dev, 0, 32);
1316 if (!push)
1317 return -EBUSY;
1318 evo_mthd(push, 0x0088, 1);
1319 evo_data(push, NvEvoSync);
1320 evo_mthd(push, 0x0084, 1);
1321 evo_data(push, 0x00000000);
1322 evo_mthd(push, 0x0084, 1);
1323 evo_data(push, 0x80000000);
1324 evo_mthd(push, 0x008c, 1);
1325 evo_data(push, 0x00000000);
1326 evo_kick(push, dev, 0);
1327
1328 return 0;
1329}
1330
1331void
1332nvd0_display_destroy(struct drm_device *dev)
1333{
1334 struct drm_nouveau_private *dev_priv = dev->dev_private;
1335 struct nvd0_display *disp = nvd0_display(dev);
1336 struct pci_dev *pdev = dev->pdev;
1337
1338 nvd0_display_fini(dev);
1339
1340 pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
1341 nouveau_gpuobj_ref(NULL, &disp->mem);
1342 nouveau_irq_unregister(dev, 26);
1343
1344 dev_priv->engine.display.priv = NULL;
1345 kfree(disp);
1346}
1347
1348int
1349nvd0_display_create(struct drm_device *dev)
1350{
1351 struct drm_nouveau_private *dev_priv = dev->dev_private;
1352 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
1353 struct dcb_table *dcb = &dev_priv->vbios.dcb;
1354 struct drm_connector *connector, *tmp;
1355 struct pci_dev *pdev = dev->pdev;
1356 struct nvd0_display *disp;
1357 struct dcb_entry *dcbe;
1358 int ret, i;
1359
1360 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
1361 if (!disp)
1362 return -ENOMEM;
1363 dev_priv->engine.display.priv = disp;
1364
1365 /* create crtc objects to represent the hw heads */
1366 for (i = 0; i < 2; i++) {
1367 ret = nvd0_crtc_create(dev, i);
1368 if (ret)
1369 goto out;
1370 }
1371
1372 /* create encoder/connector objects based on VBIOS DCB table */
1373 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
1374 connector = nouveau_connector_create(dev, dcbe->connector);
1375 if (IS_ERR(connector))
1376 continue;
1377
1378 if (dcbe->location != DCB_LOC_ON_CHIP) {
1379 NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
1380 dcbe->type, ffs(dcbe->or) - 1);
1381 continue;
1382 }
1383
1384 switch (dcbe->type) {
1385 case OUTPUT_TMDS:
1386 case OUTPUT_LVDS:
1387 nvd0_sor_create(connector, dcbe);
1388 break;
1389 case OUTPUT_ANALOG:
1390 nvd0_dac_create(connector, dcbe);
1391 break;
1392 default:
1393 NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
1394 dcbe->type, ffs(dcbe->or) - 1);
1395 continue;
1396 }
1397 }
1398
1399 /* cull any connectors we created that don't have an encoder */
1400 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
1401 if (connector->encoder_ids[0])
1402 continue;
1403
1404 NV_WARN(dev, "%s has no encoders, removing\n",
1405 drm_get_connector_name(connector));
1406 connector->funcs->destroy(connector);
1407 }
1408
1409 /* setup interrupt handling */
1410 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
1411 nouveau_irq_register(dev, 26, nvd0_display_intr);
1412
1413 /* hash table and dma objects for the memory areas we care about */
1414 ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
1415 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
1416 if (ret)
1417 goto out;
1418
1419 nv_wo32(disp->mem, 0x1000, 0x00000049);
1420 nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
1421 nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
1422 nv_wo32(disp->mem, 0x100c, 0x00000000);
1423 nv_wo32(disp->mem, 0x1010, 0x00000000);
1424 nv_wo32(disp->mem, 0x1014, 0x00000000);
1425 nv_wo32(disp->mem, 0x0000, NvEvoSync);
1426 nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
1427
1428 nv_wo32(disp->mem, 0x1020, 0x00000049);
1429 nv_wo32(disp->mem, 0x1024, 0x00000000);
1430 nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
1431 nv_wo32(disp->mem, 0x102c, 0x00000000);
1432 nv_wo32(disp->mem, 0x1030, 0x00000000);
1433 nv_wo32(disp->mem, 0x1034, 0x00000000);
1434 nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
1435 nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
1436
1437 nv_wo32(disp->mem, 0x1040, 0x00000009);
1438 nv_wo32(disp->mem, 0x1044, 0x00000000);
1439 nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
1440 nv_wo32(disp->mem, 0x104c, 0x00000000);
1441 nv_wo32(disp->mem, 0x1050, 0x00000000);
1442 nv_wo32(disp->mem, 0x1054, 0x00000000);
1443 nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
1444 nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
1445
1446 nv_wo32(disp->mem, 0x1060, 0x0fe00009);
1447 nv_wo32(disp->mem, 0x1064, 0x00000000);
1448 nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
1449 nv_wo32(disp->mem, 0x106c, 0x00000000);
1450 nv_wo32(disp->mem, 0x1070, 0x00000000);
1451 nv_wo32(disp->mem, 0x1074, 0x00000000);
1452 nv_wo32(disp->mem, 0x0018, NvEvoFB32);
1453 nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
1454
1455 pinstmem->flush(dev);
1456
1457 /* push buffers for evo channels */
1458 disp->evo[0].ptr =
1459 pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
1460 if (!disp->evo[0].ptr) {
1461 ret = -ENOMEM;
1462 goto out;
1463 }
1464
1465 ret = nvd0_display_init(dev);
1466 if (ret)
1467 goto out;
1468
1469out:
1470 if (ret)
1471 nvd0_display_destroy(dev);
1472 return ret;
1473}