aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/video.c7
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c47
-rw-r--r--drivers/gpu/drm/drm_modes.c87
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c41
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c69
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h46
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c142
-rw-r--r--drivers/gpu/drm/i915/intel_display.c699
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c17
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c232
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c148
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c112
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c41
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc474
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h483
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc808
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h838
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c57
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c23
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c16
106 files changed, 5556 insertions, 1420 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index db39e9e607d8..ada4b4d9bdc8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -46,7 +46,6 @@
46 46
47#define PREFIX "ACPI: " 47#define PREFIX "ACPI: "
48 48
49#define ACPI_VIDEO_CLASS "video"
50#define ACPI_VIDEO_BUS_NAME "Video Bus" 49#define ACPI_VIDEO_BUS_NAME "Video Bus"
51#define ACPI_VIDEO_DEVICE_NAME "Video Device" 50#define ACPI_VIDEO_DEVICE_NAME "Video Device"
52#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 51#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -1445,7 +1444,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1445 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch, 1444 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
1446 * most likely via hotkey. */ 1445 * most likely via hotkey. */
1447 acpi_bus_generate_proc_event(device, event, 0); 1446 acpi_bus_generate_proc_event(device, event, 0);
1448 keycode = KEY_SWITCHVIDEOMODE; 1447 if (!acpi_notifier_call_chain(device, event, 0))
1448 keycode = KEY_SWITCHVIDEOMODE;
1449 break; 1449 break;
1450 1450
1451 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video 1451 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1475,7 +1475,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1475 break; 1475 break;
1476 } 1476 }
1477 1477
1478 acpi_notifier_call_chain(device, event, 0); 1478 if (event != ACPI_VIDEO_NOTIFY_SWITCH)
1479 acpi_notifier_call_chain(device, event, 0);
1479 1480
1480 if (keycode) { 1481 if (keycode) {
1481 input_report_key(input, keycode, 1); 1482 input_report_key(input, keycode, 1);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9e3585..987a165ede26 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
1199} 1199}
1200EXPORT_SYMBOL(cpufreq_quick_get); 1200EXPORT_SYMBOL(cpufreq_quick_get);
1201 1201
1202/**
1203 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1204 * @cpu: CPU number
1205 *
1206 * Just return the max possible frequency for a given CPU.
1207 */
1208unsigned int cpufreq_quick_get_max(unsigned int cpu)
1209{
1210 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1211 unsigned int ret_freq = 0;
1212
1213 if (policy) {
1214 ret_freq = policy->max;
1215 cpufreq_cpu_put(policy);
1216 }
1217
1218 return ret_freq;
1219}
1220EXPORT_SYMBOL(cpufreq_quick_get_max);
1221
1202 1222
1203static unsigned int __cpufreq_get(unsigned int cpu) 1223static unsigned int __cpufreq_get(unsigned int cpu)
1204{ 1224{
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 92369655dca3..f88a9b2c977b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
560 mode_changed = true; 560 mode_changed = true;
561 } else if (set->fb == NULL) { 561 } else if (set->fb == NULL) {
562 mode_changed = true; 562 mode_changed = true;
563 } else if (set->fb->depth != set->crtc->fb->depth) {
564 mode_changed = true;
565 } else if (set->fb->bits_per_pixel !=
566 set->crtc->fb->bits_per_pixel) {
567 mode_changed = true;
563 } else 568 } else
564 fb_changed = true; 569 fb_changed = true;
565 } 570 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 09292193dafe..756af4d7ec74 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -185,8 +185,8 @@ drm_edid_block_valid(u8 *raw_edid)
185bad: 185bad:
186 if (raw_edid) { 186 if (raw_edid) {
187 printk(KERN_ERR "Raw EDID:\n"); 187 printk(KERN_ERR "Raw EDID:\n");
188 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); 188 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
189 printk(KERN_ERR "\n"); 189 raw_edid, EDID_LENGTH, false);
190 } 190 }
191 return 0; 191 return 0;
192} 192}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4012fe423460..186d62eb063b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -129,7 +129,7 @@ drm_gem_destroy(struct drm_device *dev)
129} 129}
130 130
131/** 131/**
132 * Initialize an already allocate GEM object of the specified size with 132 * Initialize an already allocated GEM object of the specified size with
133 * shmfs backing store. 133 * shmfs backing store.
134 */ 134 */
135int drm_gem_object_init(struct drm_device *dev, 135int drm_gem_object_init(struct drm_device *dev,
@@ -151,6 +151,27 @@ int drm_gem_object_init(struct drm_device *dev,
151EXPORT_SYMBOL(drm_gem_object_init); 151EXPORT_SYMBOL(drm_gem_object_init);
152 152
153/** 153/**
154 * Initialize an already allocated GEM object of the specified size with
155 * no GEM provided backing store. Instead the caller is responsible for
156 * backing the object and handling it.
157 */
158int drm_gem_private_object_init(struct drm_device *dev,
159 struct drm_gem_object *obj, size_t size)
160{
161 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
162
163 obj->dev = dev;
164 obj->filp = NULL;
165
166 kref_init(&obj->refcount);
167 atomic_set(&obj->handle_count, 0);
168 obj->size = size;
169
170 return 0;
171}
172EXPORT_SYMBOL(drm_gem_private_object_init);
173
174/**
154 * Allocate a GEM object of the specified size with shmfs backing store 175 * Allocate a GEM object of the specified size with shmfs backing store
155 */ 176 */
156struct drm_gem_object * 177struct drm_gem_object *
@@ -211,6 +232,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
211 idr_remove(&filp->object_idr, handle); 232 idr_remove(&filp->object_idr, handle);
212 spin_unlock(&filp->table_lock); 233 spin_unlock(&filp->table_lock);
213 234
235 if (dev->driver->gem_close_object)
236 dev->driver->gem_close_object(obj, filp);
214 drm_gem_object_handle_unreference_unlocked(obj); 237 drm_gem_object_handle_unreference_unlocked(obj);
215 238
216 return 0; 239 return 0;
@@ -227,7 +250,8 @@ drm_gem_handle_create(struct drm_file *file_priv,
227 struct drm_gem_object *obj, 250 struct drm_gem_object *obj,
228 u32 *handlep) 251 u32 *handlep)
229{ 252{
230 int ret; 253 struct drm_device *dev = obj->dev;
254 int ret;
231 255
232 /* 256 /*
233 * Get the user-visible handle using idr. 257 * Get the user-visible handle using idr.
@@ -248,6 +272,15 @@ again:
248 return ret; 272 return ret;
249 273
250 drm_gem_object_handle_reference(obj); 274 drm_gem_object_handle_reference(obj);
275
276 if (dev->driver->gem_open_object) {
277 ret = dev->driver->gem_open_object(obj, file_priv);
278 if (ret) {
279 drm_gem_handle_delete(file_priv, *handlep);
280 return ret;
281 }
282 }
283
251 return 0; 284 return 0;
252} 285}
253EXPORT_SYMBOL(drm_gem_handle_create); 286EXPORT_SYMBOL(drm_gem_handle_create);
@@ -402,7 +435,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
402static int 435static int
403drm_gem_object_release_handle(int id, void *ptr, void *data) 436drm_gem_object_release_handle(int id, void *ptr, void *data)
404{ 437{
438 struct drm_file *file_priv = data;
405 struct drm_gem_object *obj = ptr; 439 struct drm_gem_object *obj = ptr;
440 struct drm_device *dev = obj->dev;
441
442 if (dev->driver->gem_close_object)
443 dev->driver->gem_close_object(obj, file_priv);
406 444
407 drm_gem_object_handle_unreference_unlocked(obj); 445 drm_gem_object_handle_unreference_unlocked(obj);
408 446
@@ -418,7 +456,7 @@ void
418drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 456drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
419{ 457{
420 idr_for_each(&file_private->object_idr, 458 idr_for_each(&file_private->object_idr,
421 &drm_gem_object_release_handle, NULL); 459 &drm_gem_object_release_handle, file_private);
422 460
423 idr_remove_all(&file_private->object_idr); 461 idr_remove_all(&file_private->object_idr);
424 idr_destroy(&file_private->object_idr); 462 idr_destroy(&file_private->object_idr);
@@ -427,7 +465,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
427void 465void
428drm_gem_object_release(struct drm_gem_object *obj) 466drm_gem_object_release(struct drm_gem_object *obj)
429{ 467{
430 fput(obj->filp); 468 if (obj->filp)
469 fput(obj->filp);
431} 470}
432EXPORT_SYMBOL(drm_gem_object_release); 471EXPORT_SYMBOL(drm_gem_object_release);
433 472
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c2d32f20e2fb..ad74fb4dc542 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -994,9 +994,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
994{ 994{
995 const char *name; 995 const char *name;
996 unsigned int namelen; 996 unsigned int namelen;
997 int res_specified = 0, bpp_specified = 0, refresh_specified = 0; 997 bool res_specified = false, bpp_specified = false, refresh_specified = false;
998 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; 998 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
999 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; 999 bool yres_specified = false, cvt = false, rb = false;
1000 bool interlace = false, margins = false, was_digit = false;
1000 int i; 1001 int i;
1001 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 1002 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
1002 1003
@@ -1015,54 +1016,65 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1015 for (i = namelen-1; i >= 0; i--) { 1016 for (i = namelen-1; i >= 0; i--) {
1016 switch (name[i]) { 1017 switch (name[i]) {
1017 case '@': 1018 case '@':
1018 namelen = i;
1019 if (!refresh_specified && !bpp_specified && 1019 if (!refresh_specified && !bpp_specified &&
1020 !yres_specified) { 1020 !yres_specified && !cvt && !rb && was_digit) {
1021 refresh = simple_strtol(&name[i+1], NULL, 10); 1021 refresh = simple_strtol(&name[i+1], NULL, 10);
1022 refresh_specified = 1; 1022 refresh_specified = true;
1023 if (cvt || rb) 1023 was_digit = false;
1024 cvt = 0;
1025 } else 1024 } else
1026 goto done; 1025 goto done;
1027 break; 1026 break;
1028 case '-': 1027 case '-':
1029 namelen = i; 1028 if (!bpp_specified && !yres_specified && !cvt &&
1030 if (!bpp_specified && !yres_specified) { 1029 !rb && was_digit) {
1031 bpp = simple_strtol(&name[i+1], NULL, 10); 1030 bpp = simple_strtol(&name[i+1], NULL, 10);
1032 bpp_specified = 1; 1031 bpp_specified = true;
1033 if (cvt || rb) 1032 was_digit = false;
1034 cvt = 0;
1035 } else 1033 } else
1036 goto done; 1034 goto done;
1037 break; 1035 break;
1038 case 'x': 1036 case 'x':
1039 if (!yres_specified) { 1037 if (!yres_specified && was_digit) {
1040 yres = simple_strtol(&name[i+1], NULL, 10); 1038 yres = simple_strtol(&name[i+1], NULL, 10);
1041 yres_specified = 1; 1039 yres_specified = true;
1040 was_digit = false;
1042 } else 1041 } else
1043 goto done; 1042 goto done;
1044 case '0' ... '9': 1043 case '0' ... '9':
1044 was_digit = true;
1045 break; 1045 break;
1046 case 'M': 1046 case 'M':
1047 if (!yres_specified) 1047 if (yres_specified || cvt || was_digit)
1048 cvt = 1; 1048 goto done;
1049 cvt = true;
1049 break; 1050 break;
1050 case 'R': 1051 case 'R':
1051 if (cvt) 1052 if (yres_specified || cvt || rb || was_digit)
1052 rb = 1; 1053 goto done;
1054 rb = true;
1053 break; 1055 break;
1054 case 'm': 1056 case 'm':
1055 if (!cvt) 1057 if (cvt || yres_specified || was_digit)
1056 margins = 1; 1058 goto done;
1059 margins = true;
1057 break; 1060 break;
1058 case 'i': 1061 case 'i':
1059 if (!cvt) 1062 if (cvt || yres_specified || was_digit)
1060 interlace = 1; 1063 goto done;
1064 interlace = true;
1061 break; 1065 break;
1062 case 'e': 1066 case 'e':
1067 if (yres_specified || bpp_specified || refresh_specified ||
1068 was_digit || (force != DRM_FORCE_UNSPECIFIED))
1069 goto done;
1070
1063 force = DRM_FORCE_ON; 1071 force = DRM_FORCE_ON;
1064 break; 1072 break;
1065 case 'D': 1073 case 'D':
1074 if (yres_specified || bpp_specified || refresh_specified ||
1075 was_digit || (force != DRM_FORCE_UNSPECIFIED))
1076 goto done;
1077
1066 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && 1078 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
1067 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 1079 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
1068 force = DRM_FORCE_ON; 1080 force = DRM_FORCE_ON;
@@ -1070,17 +1082,37 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1070 force = DRM_FORCE_ON_DIGITAL; 1082 force = DRM_FORCE_ON_DIGITAL;
1071 break; 1083 break;
1072 case 'd': 1084 case 'd':
1085 if (yres_specified || bpp_specified || refresh_specified ||
1086 was_digit || (force != DRM_FORCE_UNSPECIFIED))
1087 goto done;
1088
1073 force = DRM_FORCE_OFF; 1089 force = DRM_FORCE_OFF;
1074 break; 1090 break;
1075 default: 1091 default:
1076 goto done; 1092 goto done;
1077 } 1093 }
1078 } 1094 }
1095
1079 if (i < 0 && yres_specified) { 1096 if (i < 0 && yres_specified) {
1080 xres = simple_strtol(name, NULL, 10); 1097 char *ch;
1081 res_specified = 1; 1098 xres = simple_strtol(name, &ch, 10);
1099 if ((ch != NULL) && (*ch == 'x'))
1100 res_specified = true;
1101 else
1102 i = ch - name;
1103 } else if (!yres_specified && was_digit) {
1104 /* catch mode that begins with digits but has no 'x' */
1105 i = 0;
1082 } 1106 }
1083done: 1107done:
1108 if (i >= 0) {
1109 printk(KERN_WARNING
1110 "parse error at position %i in video mode '%s'\n",
1111 i, name);
1112 mode->specified = false;
1113 return false;
1114 }
1115
1084 if (res_specified) { 1116 if (res_specified) {
1085 mode->specified = true; 1117 mode->specified = true;
1086 mode->xres = xres; 1118 mode->xres = xres;
@@ -1096,9 +1128,10 @@ done:
1096 mode->bpp_specified = true; 1128 mode->bpp_specified = true;
1097 mode->bpp = bpp; 1129 mode->bpp = bpp;
1098 } 1130 }
1099 mode->rb = rb ? true : false; 1131 mode->rb = rb;
1100 mode->cvt = cvt ? true : false; 1132 mode->cvt = cvt;
1101 mode->interlace = interlace ? true : false; 1133 mode->interlace = interlace;
1134 mode->margins = margins;
1102 mode->force = force; 1135 mode->force = force;
1103 1136
1104 return true; 1137 return true;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 7223f06d8e58..2a8b6265ad3d 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -123,14 +123,15 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
123{ 123{
124 int len, ret; 124 int len, ret;
125 125
126 master->unique_len = 10 + strlen(dev->platformdev->name); 126 master->unique_len = 13 + strlen(dev->platformdev->name);
127 master->unique_size = master->unique_len;
127 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); 128 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
128 129
129 if (master->unique == NULL) 130 if (master->unique == NULL)
130 return -ENOMEM; 131 return -ENOMEM;
131 132
132 len = snprintf(master->unique, master->unique_len, 133 len = snprintf(master->unique, master->unique_len,
133 "platform:%s", dev->platformdev->name); 134 "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id);
134 135
135 if (len > master->unique_len) { 136 if (len > master->unique_len) {
136 DRM_ERROR("Unique buffer overflowed\n"); 137 DRM_ERROR("Unique buffer overflowed\n");
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a893f7400fa..e2662497d50f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 MEMSTAT_VID_SHIFT); 865 MEMSTAT_VID_SHIFT);
866 seq_printf(m, "Current P-state: %d\n", 866 seq_printf(m, "Current P-state: %d\n",
867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
868 } else if (IS_GEN6(dev)) { 868 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1123 return 0; 1123 return 0;
1124} 1124}
1125 1125
1126static int i915_ring_freq_table(struct seq_file *m, void *unused)
1127{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private;
1129 struct drm_device *dev = node->minor->dev;
1130 drm_i915_private_t *dev_priv = dev->dev_private;
1131 int ret;
1132 int gpu_freq, ia_freq;
1133
1134 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1135 seq_printf(m, "unsupported on this chipset\n");
1136 return 0;
1137 }
1138
1139 ret = mutex_lock_interruptible(&dev->struct_mutex);
1140 if (ret)
1141 return ret;
1142
1143 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1144
1145 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1146 gpu_freq++) {
1147 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1148 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1149 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1150 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1151 GEN6_PCODE_READY) == 0, 10)) {
1152 DRM_ERROR("pcode read of freq table timed out\n");
1153 continue;
1154 }
1155 ia_freq = I915_READ(GEN6_PCODE_DATA);
1156 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1157 }
1158
1159 mutex_unlock(&dev->struct_mutex);
1160
1161 return 0;
1162}
1163
1126static int i915_gfxec(struct seq_file *m, void *unused) 1164static int i915_gfxec(struct seq_file *m, void *unused)
1127{ 1165{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1166 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1430,6 +1468,7 @@ static struct drm_info_list i915_debugfs_list[] = {
1430 {"i915_inttoext_table", i915_inttoext_table, 0}, 1468 {"i915_inttoext_table", i915_inttoext_table, 0},
1431 {"i915_drpc_info", i915_drpc_info, 0}, 1469 {"i915_drpc_info", i915_drpc_info, 0},
1432 {"i915_emon_status", i915_emon_status, 0}, 1470 {"i915_emon_status", i915_emon_status, 0},
1471 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1433 {"i915_gfxec", i915_gfxec, 0}, 1472 {"i915_gfxec", i915_gfxec, 0},
1434 {"i915_fbc_status", i915_fbc_status, 0}, 1473 {"i915_fbc_status", i915_fbc_status, 0},
1435 {"i915_sr_status", i915_sr_status, 0}, 1474 {"i915_sr_status", i915_sr_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 296fbd66f0e1..12712824a6d2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1073,6 +1073,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1073 unsigned long cfb_base; 1073 unsigned long cfb_base;
1074 unsigned long ll_base = 0; 1074 unsigned long ll_base = 0;
1075 1075
1076 /* Just in case the BIOS is doing something questionable. */
1077 intel_disable_fbc(dev);
1078
1076 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1079 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1077 if (compressed_fb) 1080 if (compressed_fb)
1078 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1081 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
@@ -1099,7 +1102,6 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1099 1102
1100 dev_priv->cfb_size = size; 1103 dev_priv->cfb_size = size;
1101 1104
1102 intel_disable_fbc(dev);
1103 dev_priv->compressed_fb = compressed_fb; 1105 dev_priv->compressed_fb = compressed_fb;
1104 if (HAS_PCH_SPLIT(dev)) 1106 if (HAS_PCH_SPLIT(dev))
1105 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1107 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index eb91e2dd7914..ce045a8cf82c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -37,38 +37,70 @@
37#include <linux/console.h> 37#include <linux/console.h>
38#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
39 39
40static int i915_modeset = -1; 40static int i915_modeset __read_mostly = -1;
41module_param_named(modeset, i915_modeset, int, 0400); 41module_param_named(modeset, i915_modeset, int, 0400);
42MODULE_PARM_DESC(modeset,
43 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
44 "1=on, -1=force vga console preference [default])");
42 45
43unsigned int i915_fbpercrtc = 0; 46unsigned int i915_fbpercrtc __always_unused = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 47module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 48
46int i915_panel_ignore_lid = 0; 49int i915_panel_ignore_lid __read_mostly = 0;
47module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 50module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
51MODULE_PARM_DESC(panel_ignore_lid,
52 "Override lid status (0=autodetect [default], 1=lid open, "
53 "-1=lid closed)");
48 54
49unsigned int i915_powersave = 1; 55unsigned int i915_powersave __read_mostly = 1;
50module_param_named(powersave, i915_powersave, int, 0600); 56module_param_named(powersave, i915_powersave, int, 0600);
57MODULE_PARM_DESC(powersave,
58 "Enable powersavings, fbc, downclocking, etc. (default: true)");
51 59
52unsigned int i915_semaphores = 0; 60unsigned int i915_semaphores __read_mostly = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 61module_param_named(semaphores, i915_semaphores, int, 0600);
62MODULE_PARM_DESC(semaphores,
63 "Use semaphores for inter-ring sync (default: false)");
54 64
55unsigned int i915_enable_rc6 = 0; 65unsigned int i915_enable_rc6 __read_mostly = 0;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 66module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
67MODULE_PARM_DESC(i915_enable_rc6,
68 "Enable power-saving render C-state 6 (default: true)");
57 69
58unsigned int i915_enable_fbc = 0; 70unsigned int i915_enable_fbc __read_mostly = 1;
59module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
72MODULE_PARM_DESC(i915_enable_fbc,
73 "Enable frame buffer compression for power savings "
74 "(default: false)");
60 75
61unsigned int i915_lvds_downclock = 0; 76unsigned int i915_lvds_downclock __read_mostly = 0;
62module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
78MODULE_PARM_DESC(lvds_downclock,
79 "Use panel (LVDS/eDP) downclocking for power savings "
80 "(default: false)");
63 81
64unsigned int i915_panel_use_ssc = 1; 82unsigned int i915_panel_use_ssc __read_mostly = 1;
65module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 83module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
84MODULE_PARM_DESC(lvds_use_ssc,
85 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
86 "(default: true)");
66 87
67int i915_vbt_sdvo_panel_type = -1; 88int i915_vbt_sdvo_panel_type __read_mostly = -1;
68module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 89module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
90MODULE_PARM_DESC(vbt_sdvo_panel_type,
91 "Override selection of SDVO panel mode in the VBT "
92 "(default: auto)");
69 93
70static bool i915_try_reset = true; 94static bool i915_try_reset __read_mostly = true;
71module_param_named(reset, i915_try_reset, bool, 0600); 95module_param_named(reset, i915_try_reset, bool, 0600);
96MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
97
98bool i915_enable_hangcheck __read_mostly = true;
99module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
100MODULE_PARM_DESC(enable_hangcheck,
101 "Periodically check GPU activity for detecting hangs. "
102 "WARNING: Disabling this can cause system wide hangs. "
103 "(default: true)");
72 104
73static struct drm_driver driver; 105static struct drm_driver driver;
74extern int intel_agp_enabled; 106extern int intel_agp_enabled;
@@ -345,12 +377,17 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
345 377
346void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 378void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
347{ 379{
348 int loop = 500; 380 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
349 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 381 int loop = 500;
350 while (fifo < 20 && loop--) { 382 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
351 udelay(10); 383 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
352 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 384 udelay(10);
385 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
386 }
387 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
388 dev_priv->gt_fifo_count = fifo;
353 } 389 }
390 dev_priv->gt_fifo_count--;
354} 391}
355 392
356static int i915_drm_freeze(struct drm_device *dev) 393static int i915_drm_freeze(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce7914c4c044..6867e193d85e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -214,6 +214,8 @@ struct drm_i915_display_funcs {
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb, 215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj); 216 struct drm_i915_gem_object *obj);
217 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
218 int x, int y);
217 /* clock updates for mode set */ 219 /* clock updates for mode set */
218 /* cursor updates */ 220 /* cursor updates */
219 /* render clock increase/decrease */ 221 /* render clock increase/decrease */
@@ -265,6 +267,7 @@ enum intel_pch {
265#define QUIRK_LVDS_SSC_DISABLE (1<<1) 267#define QUIRK_LVDS_SSC_DISABLE (1<<1)
266 268
267struct intel_fbdev; 269struct intel_fbdev;
270struct intel_fbc_work;
268 271
269typedef struct drm_i915_private { 272typedef struct drm_i915_private {
270 struct drm_device *dev; 273 struct drm_device *dev;
@@ -275,6 +278,7 @@ typedef struct drm_i915_private {
275 int relative_constants_mode; 278 int relative_constants_mode;
276 279
277 void __iomem *regs; 280 void __iomem *regs;
281 u32 gt_fifo_count;
278 282
279 struct intel_gmbus { 283 struct intel_gmbus {
280 struct i2c_adapter adapter; 284 struct i2c_adapter adapter;
@@ -329,11 +333,10 @@ typedef struct drm_i915_private {
329 uint32_t last_instdone1; 333 uint32_t last_instdone1;
330 334
331 unsigned long cfb_size; 335 unsigned long cfb_size;
332 unsigned long cfb_pitch; 336 unsigned int cfb_fb;
333 unsigned long cfb_offset; 337 enum plane cfb_plane;
334 int cfb_fence;
335 int cfb_plane;
336 int cfb_y; 338 int cfb_y;
339 struct intel_fbc_work *fbc_work;
337 340
338 struct intel_opregion opregion; 341 struct intel_opregion opregion;
339 342
@@ -986,15 +989,16 @@ struct drm_i915_file_private {
986 989
987extern struct drm_ioctl_desc i915_ioctls[]; 990extern struct drm_ioctl_desc i915_ioctls[];
988extern int i915_max_ioctl; 991extern int i915_max_ioctl;
989extern unsigned int i915_fbpercrtc; 992extern unsigned int i915_fbpercrtc __always_unused;
990extern int i915_panel_ignore_lid; 993extern int i915_panel_ignore_lid __read_mostly;
991extern unsigned int i915_powersave; 994extern unsigned int i915_powersave __read_mostly;
992extern unsigned int i915_semaphores; 995extern unsigned int i915_semaphores __read_mostly;
993extern unsigned int i915_lvds_downclock; 996extern unsigned int i915_lvds_downclock __read_mostly;
994extern unsigned int i915_panel_use_ssc; 997extern unsigned int i915_panel_use_ssc __read_mostly;
995extern int i915_vbt_sdvo_panel_type; 998extern int i915_vbt_sdvo_panel_type __read_mostly;
996extern unsigned int i915_enable_rc6; 999extern unsigned int i915_enable_rc6 __read_mostly;
997extern unsigned int i915_enable_fbc; 1000extern unsigned int i915_enable_fbc __read_mostly;
1001extern bool i915_enable_hangcheck __read_mostly;
998 1002
999extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1003extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1000extern int i915_resume(struct drm_device *dev); 1004extern int i915_resume(struct drm_device *dev);
@@ -1164,7 +1168,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1164int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1168int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1165 uint32_t read_domains, 1169 uint32_t read_domains,
1166 uint32_t write_domain); 1170 uint32_t write_domain);
1167int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); 1171int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1168int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); 1172int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1169void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1173void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1170void i915_gem_do_init(struct drm_device *dev, 1174void i915_gem_do_init(struct drm_device *dev,
@@ -1183,7 +1187,8 @@ int __must_check
1183i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1187i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1184 bool write); 1188 bool write);
1185int __must_check 1189int __must_check
1186i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, 1190i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1191 u32 alignment,
1187 struct intel_ring_buffer *pipelined); 1192 struct intel_ring_buffer *pipelined);
1188int i915_gem_attach_phys_object(struct drm_device *dev, 1193int i915_gem_attach_phys_object(struct drm_device *dev,
1189 struct drm_i915_gem_object *obj, 1194 struct drm_i915_gem_object *obj,
@@ -1199,9 +1204,14 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1199 uint32_t size, 1204 uint32_t size,
1200 int tiling_mode); 1205 int tiling_mode);
1201 1206
1207int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1208 enum i915_cache_level cache_level);
1209
1202/* i915_gem_gtt.c */ 1210/* i915_gem_gtt.c */
1203void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1211void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1204int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1212int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1213void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
1214 enum i915_cache_level cache_level);
1205void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1215void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1206 1216
1207/* i915_gem_evict.c */ 1217/* i915_gem_evict.c */
@@ -1283,12 +1293,8 @@ extern void intel_modeset_init(struct drm_device *dev);
1283extern void intel_modeset_gem_init(struct drm_device *dev); 1293extern void intel_modeset_gem_init(struct drm_device *dev);
1284extern void intel_modeset_cleanup(struct drm_device *dev); 1294extern void intel_modeset_cleanup(struct drm_device *dev);
1285extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1295extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1286extern void i8xx_disable_fbc(struct drm_device *dev);
1287extern void g4x_disable_fbc(struct drm_device *dev);
1288extern void ironlake_disable_fbc(struct drm_device *dev);
1289extern void intel_disable_fbc(struct drm_device *dev);
1290extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1291extern bool intel_fbc_enabled(struct drm_device *dev); 1296extern bool intel_fbc_enabled(struct drm_device *dev);
1297extern void intel_disable_fbc(struct drm_device *dev);
1292extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1298extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1293extern void ironlake_enable_rc6(struct drm_device *dev); 1299extern void ironlake_enable_rc6(struct drm_device *dev);
1294extern void gen6_set_rps(struct drm_device *dev, u8 val); 1300extern void gen6_set_rps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a087e1bf0c2f..d1cd8b89f47d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1763,8 +1763,11 @@ i915_add_request(struct intel_ring_buffer *ring,
1763 ring->outstanding_lazy_request = false; 1763 ring->outstanding_lazy_request = false;
1764 1764
1765 if (!dev_priv->mm.suspended) { 1765 if (!dev_priv->mm.suspended) {
1766 mod_timer(&dev_priv->hangcheck_timer, 1766 if (i915_enable_hangcheck) {
1767 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1767 mod_timer(&dev_priv->hangcheck_timer,
1768 jiffies +
1769 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1770 }
1768 if (was_empty) 1771 if (was_empty)
1769 queue_delayed_work(dev_priv->wq, 1772 queue_delayed_work(dev_priv->wq,
1770 &dev_priv->mm.retire_work, HZ); 1773 &dev_priv->mm.retire_work, HZ);
@@ -2135,6 +2138,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2135 return 0; 2138 return 0;
2136} 2139}
2137 2140
2141static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2142{
2143 u32 old_write_domain, old_read_domains;
2144
2145 /* Act a barrier for all accesses through the GTT */
2146 mb();
2147
2148 /* Force a pagefault for domain tracking on next user access */
2149 i915_gem_release_mmap(obj);
2150
2151 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2152 return;
2153
2154 old_read_domains = obj->base.read_domains;
2155 old_write_domain = obj->base.write_domain;
2156
2157 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2158 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2159
2160 trace_i915_gem_object_change_domain(obj,
2161 old_read_domains,
2162 old_write_domain);
2163}
2164
2138/** 2165/**
2139 * Unbinds an object from the GTT aperture. 2166 * Unbinds an object from the GTT aperture.
2140 */ 2167 */
@@ -2151,23 +2178,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2151 return -EINVAL; 2178 return -EINVAL;
2152 } 2179 }
2153 2180
2154 /* blow away mappings if mapped through GTT */ 2181 ret = i915_gem_object_finish_gpu(obj);
2155 i915_gem_release_mmap(obj);
2156
2157 /* Move the object to the CPU domain to ensure that
2158 * any possible CPU writes while it's not in the GTT
2159 * are flushed when we go to remap it. This will
2160 * also ensure that all pending GPU writes are finished
2161 * before we unbind.
2162 */
2163 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2164 if (ret == -ERESTARTSYS) 2182 if (ret == -ERESTARTSYS)
2165 return ret; 2183 return ret;
2166 /* Continue on if we fail due to EIO, the GPU is hung so we 2184 /* Continue on if we fail due to EIO, the GPU is hung so we
2167 * should be safe and we need to cleanup or else we might 2185 * should be safe and we need to cleanup or else we might
2168 * cause memory corruption through use-after-free. 2186 * cause memory corruption through use-after-free.
2169 */ 2187 */
2188
2189 i915_gem_object_finish_gtt(obj);
2190
2191 /* Move the object to the CPU domain to ensure that
2192 * any possible CPU writes while it's not in the GTT
2193 * are flushed when we go to remap it.
2194 */
2195 if (ret == 0)
2196 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2197 if (ret == -ERESTARTSYS)
2198 return ret;
2170 if (ret) { 2199 if (ret) {
2200 /* In the event of a disaster, abandon all caches and
2201 * hope for the best.
2202 */
2171 i915_gem_clflush_object(obj); 2203 i915_gem_clflush_object(obj);
2172 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2204 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2173 } 2205 }
@@ -2996,51 +3028,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2996 return 0; 3028 return 0;
2997} 3029}
2998 3030
3031int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3032 enum i915_cache_level cache_level)
3033{
3034 int ret;
3035
3036 if (obj->cache_level == cache_level)
3037 return 0;
3038
3039 if (obj->pin_count) {
3040 DRM_DEBUG("can not change the cache level of pinned objects\n");
3041 return -EBUSY;
3042 }
3043
3044 if (obj->gtt_space) {
3045 ret = i915_gem_object_finish_gpu(obj);
3046 if (ret)
3047 return ret;
3048
3049 i915_gem_object_finish_gtt(obj);
3050
3051 /* Before SandyBridge, you could not use tiling or fence
3052 * registers with snooped memory, so relinquish any fences
3053 * currently pointing to our region in the aperture.
3054 */
3055 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3056 ret = i915_gem_object_put_fence(obj);
3057 if (ret)
3058 return ret;
3059 }
3060
3061 i915_gem_gtt_rebind_object(obj, cache_level);
3062 }
3063
3064 if (cache_level == I915_CACHE_NONE) {
3065 u32 old_read_domains, old_write_domain;
3066
3067 /* If we're coming from LLC cached, then we haven't
3068 * actually been tracking whether the data is in the
3069 * CPU cache or not, since we only allow one bit set
3070 * in obj->write_domain and have been skipping the clflushes.
3071 * Just set it to the CPU cache for now.
3072 */
3073 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3074 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3075
3076 old_read_domains = obj->base.read_domains;
3077 old_write_domain = obj->base.write_domain;
3078
3079 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3080 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3081
3082 trace_i915_gem_object_change_domain(obj,
3083 old_read_domains,
3084 old_write_domain);
3085 }
3086
3087 obj->cache_level = cache_level;
3088 return 0;
3089}
3090
2999/* 3091/*
3000 * Prepare buffer for display plane. Use uninterruptible for possible flush 3092 * Prepare buffer for display plane (scanout, cursors, etc).
3001 * wait, as in modesetting process we're not supposed to be interrupted. 3093 * Can be called from an uninterruptible phase (modesetting) and allows
3094 * any flushes to be pipelined (for pageflips).
3095 *
3096 * For the display plane, we want to be in the GTT but out of any write
3097 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3098 * ability to pipeline the waits, pinning and any additional subtleties
3099 * that may differentiate the display plane from ordinary buffers.
3002 */ 3100 */
3003int 3101int
3004i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, 3102i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3103 u32 alignment,
3005 struct intel_ring_buffer *pipelined) 3104 struct intel_ring_buffer *pipelined)
3006{ 3105{
3007 uint32_t old_read_domains; 3106 u32 old_read_domains, old_write_domain;
3008 int ret; 3107 int ret;
3009 3108
3010 /* Not valid to be called on unbound objects. */
3011 if (obj->gtt_space == NULL)
3012 return -EINVAL;
3013
3014 ret = i915_gem_object_flush_gpu_write_domain(obj); 3109 ret = i915_gem_object_flush_gpu_write_domain(obj);
3015 if (ret) 3110 if (ret)
3016 return ret; 3111 return ret;
3017 3112
3018
3019 /* Currently, we are always called from an non-interruptible context. */
3020 if (pipelined != obj->ring) { 3113 if (pipelined != obj->ring) {
3021 ret = i915_gem_object_wait_rendering(obj); 3114 ret = i915_gem_object_wait_rendering(obj);
3022 if (ret) 3115 if (ret)
3023 return ret; 3116 return ret;
3024 } 3117 }
3025 3118
3119 /* The display engine is not coherent with the LLC cache on gen6. As
3120 * a result, we make sure that the pinning that is about to occur is
3121 * done with uncached PTEs. This is lowest common denominator for all
3122 * chipsets.
3123 *
3124 * However for gen6+, we could do better by using the GFDT bit instead
3125 * of uncaching, which would allow us to flush all the LLC-cached data
3126 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3127 */
3128 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3129 if (ret)
3130 return ret;
3131
3132 /* As the user may map the buffer once pinned in the display plane
3133 * (e.g. libkms for the bootup splash), we have to ensure that we
3134 * always use map_and_fenceable for all scanout buffers.
3135 */
3136 ret = i915_gem_object_pin(obj, alignment, true);
3137 if (ret)
3138 return ret;
3139
3026 i915_gem_object_flush_cpu_write_domain(obj); 3140 i915_gem_object_flush_cpu_write_domain(obj);
3027 3141
3142 old_write_domain = obj->base.write_domain;
3028 old_read_domains = obj->base.read_domains; 3143 old_read_domains = obj->base.read_domains;
3144
3145 /* It should now be out of any other write domains, and we can update
3146 * the domain values for our changes.
3147 */
3148 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3029 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3149 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3030 3150
3031 trace_i915_gem_object_change_domain(obj, 3151 trace_i915_gem_object_change_domain(obj,
3032 old_read_domains, 3152 old_read_domains,
3033 obj->base.write_domain); 3153 old_write_domain);
3034 3154
3035 return 0; 3155 return 0;
3036} 3156}
3037 3157
3038int 3158int
3039i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) 3159i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3040{ 3160{
3041 int ret; 3161 int ret;
3042 3162
3043 if (!obj->active) 3163 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3044 return 0; 3164 return 0;
3045 3165
3046 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3166 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3049,6 +3169,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3049 return ret; 3169 return ret;
3050 } 3170 }
3051 3171
3172 /* Ensure that we invalidate the GPU's caches and TLBs. */
3173 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3174
3052 return i915_gem_object_wait_rendering(obj); 3175 return i915_gem_object_wait_rendering(obj);
3053} 3176}
3054 3177
@@ -3575,7 +3698,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3575 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3698 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3576 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3699 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3577 3700
3578 obj->cache_level = I915_CACHE_NONE; 3701 if (IS_GEN6(dev)) {
3702 /* On Gen6, we can have the GPU use the LLC (the CPU
3703 * cache) for about a 10% performance improvement
3704 * compared to uncached. Graphics requests other than
3705 * display scanout are coherent with the CPU in
3706 * accessing this cache. This means in this mode we
3707 * don't need to clflush on the CPU side, and on the
3708 * GPU side we only need to flush internal caches to
3709 * get data visible to the CPU.
3710 *
3711 * However, we maintain the display planes as UC, and so
3712 * need to rebind when first used as such.
3713 */
3714 obj->cache_level = I915_CACHE_LLC;
3715 } else
3716 obj->cache_level = I915_CACHE_NONE;
3717
3579 obj->base.driver_private = NULL; 3718 obj->base.driver_private = NULL;
3580 obj->fence_reg = I915_FENCE_REG_NONE; 3719 obj->fence_reg = I915_FENCE_REG_NONE;
3581 INIT_LIST_HEAD(&obj->mm_list); 3720 INIT_LIST_HEAD(&obj->mm_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e46b645773cf..7a709cd8d543 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
60 60
61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
62 unsigned int agp_type =
63 cache_level_to_agp_type(dev, obj->cache_level);
64
65 i915_gem_clflush_object(obj); 62 i915_gem_clflush_object(obj);
66 63 i915_gem_gtt_rebind_object(obj, obj->cache_level);
67 if (dev_priv->mm.gtt->needs_dmar) {
68 BUG_ON(!obj->sg_list);
69
70 intel_gtt_insert_sg_entries(obj->sg_list,
71 obj->num_sg,
72 obj->gtt_space->start >> PAGE_SHIFT,
73 agp_type);
74 } else
75 intel_gtt_insert_pages(obj->gtt_space->start
76 >> PAGE_SHIFT,
77 obj->base.size >> PAGE_SHIFT,
78 obj->pages,
79 agp_type);
80 } 64 }
81 65
82 intel_gtt_chipset_flush(); 66 intel_gtt_chipset_flush();
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
110 return 0; 94 return 0;
111} 95}
112 96
97void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
98 enum i915_cache_level cache_level)
99{
100 struct drm_device *dev = obj->base.dev;
101 struct drm_i915_private *dev_priv = dev->dev_private;
102 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
103
104 if (dev_priv->mm.gtt->needs_dmar) {
105 BUG_ON(!obj->sg_list);
106
107 intel_gtt_insert_sg_entries(obj->sg_list,
108 obj->num_sg,
109 obj->gtt_space->start >> PAGE_SHIFT,
110 agp_type);
111 } else
112 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
113 obj->base.size >> PAGE_SHIFT,
114 obj->pages,
115 agp_type);
116}
117
113void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 118void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
114{ 119{
115 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 120 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3b03f85ea627..23d1ae67d279 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -361,10 +361,12 @@ static void notify_ring(struct drm_device *dev,
361 361
362 ring->irq_seqno = seqno; 362 ring->irq_seqno = seqno;
363 wake_up_all(&ring->irq_queue); 363 wake_up_all(&ring->irq_queue);
364 364 if (i915_enable_hangcheck) {
365 dev_priv->hangcheck_count = 0; 365 dev_priv->hangcheck_count = 0;
366 mod_timer(&dev_priv->hangcheck_timer, 366 mod_timer(&dev_priv->hangcheck_timer,
367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 367 jiffies +
368 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
369 }
368} 370}
369 371
370static void gen6_pm_rps_work(struct work_struct *work) 372static void gen6_pm_rps_work(struct work_struct *work)
@@ -1664,6 +1666,9 @@ void i915_hangcheck_elapsed(unsigned long data)
1664 uint32_t acthd, instdone, instdone1; 1666 uint32_t acthd, instdone, instdone1;
1665 bool err = false; 1667 bool err = false;
1666 1668
1669 if (!i915_enable_hangcheck)
1670 return;
1671
1667 /* If all work is done then ACTHD clearly hasn't advanced. */ 1672 /* If all work is done then ACTHD clearly hasn't advanced. */
1668 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1673 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1669 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1674 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5d5def756c9e..02db299f621a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -579,6 +579,7 @@
579#define DPFC_CTL_PLANEA (0<<30) 579#define DPFC_CTL_PLANEA (0<<30)
580#define DPFC_CTL_PLANEB (1<<30) 580#define DPFC_CTL_PLANEB (1<<30)
581#define DPFC_CTL_FENCE_EN (1<<29) 581#define DPFC_CTL_FENCE_EN (1<<29)
582#define DPFC_CTL_PERSISTENT_MODE (1<<25)
582#define DPFC_SR_EN (1<<10) 583#define DPFC_SR_EN (1<<10)
583#define DPFC_CTL_LIMIT_1X (0<<6) 584#define DPFC_CTL_LIMIT_1X (0<<6)
584#define DPFC_CTL_LIMIT_2X (1<<6) 585#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -3360,6 +3361,7 @@
3360#define FORCEWAKE_ACK 0x130090 3361#define FORCEWAKE_ACK 0x130090
3361 3362
3362#define GT_FIFO_FREE_ENTRIES 0x120008 3363#define GT_FIFO_FREE_ENTRIES 0x120008
3364#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3363 3365
3364#define GEN6_RPNSWREQ 0xA008 3366#define GEN6_RPNSWREQ 0xA008
3365#define GEN6_TURBO_DISABLE (1<<31) 3367#define GEN6_TURBO_DISABLE (1<<31)
@@ -3434,7 +3436,9 @@
3434#define GEN6_PCODE_MAILBOX 0x138124 3436#define GEN6_PCODE_MAILBOX 0x138124
3435#define GEN6_PCODE_READY (1<<31) 3437#define GEN6_PCODE_READY (1<<31)
3436#define GEN6_READ_OC_PARAMS 0xc 3438#define GEN6_READ_OC_PARAMS 0xc
3437#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 3439#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
3440#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
3438#define GEN6_PCODE_DATA 0x138128 3441#define GEN6_PCODE_DATA 0x138128
3442#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
3439 3443
3440#endif /* _I915_REG_H_ */ 3444#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5257cfc34c35..285758603ac8 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -760,15 +760,13 @@ static void i915_restore_display(struct drm_device *dev)
760 /* FIXME: restore TV & SDVO state */ 760 /* FIXME: restore TV & SDVO state */
761 761
762 /* only restore FBC info on the platform that supports FBC*/ 762 /* only restore FBC info on the platform that supports FBC*/
763 intel_disable_fbc(dev);
763 if (I915_HAS_FBC(dev)) { 764 if (I915_HAS_FBC(dev)) {
764 if (HAS_PCH_SPLIT(dev)) { 765 if (HAS_PCH_SPLIT(dev)) {
765 ironlake_disable_fbc(dev);
766 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 766 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
767 } else if (IS_GM45(dev)) { 767 } else if (IS_GM45(dev)) {
768 g4x_disable_fbc(dev);
769 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 768 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
770 } else { 769 } else {
771 i8xx_disable_fbc(dev);
772 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 770 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
773 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 771 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
774 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 772 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
@@ -878,8 +876,10 @@ int i915_restore_state(struct drm_device *dev)
878 intel_init_emon(dev); 876 intel_init_emon(dev);
879 } 877 }
880 878
881 if (IS_GEN6(dev)) 879 if (IS_GEN6(dev)) {
882 gen6_enable_rps(dev_priv); 880 gen6_enable_rps(dev_priv);
881 gen6_update_ring_freq(dev_priv);
882 }
883 883
884 mutex_lock(&dev->struct_mutex); 884 mutex_lock(&dev->struct_mutex);
885 885
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 927442a11925..61abef8a8119 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -74,7 +74,7 @@ get_blocksize(void *p)
74 74
75static void 75static void
76fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 76fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
77 struct lvds_dvo_timing *dvo_timing) 77 const struct lvds_dvo_timing *dvo_timing)
78{ 78{
79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
80 dvo_timing->hactive_lo; 80 dvo_timing->hactive_lo;
@@ -115,20 +115,75 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
115 drm_mode_set_name(panel_fixed_mode); 115 drm_mode_set_name(panel_fixed_mode);
116} 116}
117 117
118static bool
119lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
120 const struct lvds_dvo_timing *b)
121{
122 if (a->hactive_hi != b->hactive_hi ||
123 a->hactive_lo != b->hactive_lo)
124 return false;
125
126 if (a->hsync_off_hi != b->hsync_off_hi ||
127 a->hsync_off_lo != b->hsync_off_lo)
128 return false;
129
130 if (a->hsync_pulse_width != b->hsync_pulse_width)
131 return false;
132
133 if (a->hblank_hi != b->hblank_hi ||
134 a->hblank_lo != b->hblank_lo)
135 return false;
136
137 if (a->vactive_hi != b->vactive_hi ||
138 a->vactive_lo != b->vactive_lo)
139 return false;
140
141 if (a->vsync_off != b->vsync_off)
142 return false;
143
144 if (a->vsync_pulse_width != b->vsync_pulse_width)
145 return false;
146
147 if (a->vblank_hi != b->vblank_hi ||
148 a->vblank_lo != b->vblank_lo)
149 return false;
150
151 return true;
152}
153
154static const struct lvds_dvo_timing *
155get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
156 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
157 int index)
158{
159 /*
160 * the size of fp_timing varies on the different platform.
161 * So calculate the DVO timing relative offset in LVDS data
162 * entry to get the DVO timing entry
163 */
164
165 int lfp_data_size =
166 lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
167 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
168 int dvo_timing_offset =
169 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
170 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
171 char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
172
173 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
174}
175
118/* Try to find integrated panel data */ 176/* Try to find integrated panel data */
119static void 177static void
120parse_lfp_panel_data(struct drm_i915_private *dev_priv, 178parse_lfp_panel_data(struct drm_i915_private *dev_priv,
121 struct bdb_header *bdb) 179 struct bdb_header *bdb)
122{ 180{
123 struct bdb_lvds_options *lvds_options; 181 const struct bdb_lvds_options *lvds_options;
124 struct bdb_lvds_lfp_data *lvds_lfp_data; 182 const struct bdb_lvds_lfp_data *lvds_lfp_data;
125 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 183 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
126 struct bdb_lvds_lfp_data_entry *entry; 184 const struct lvds_dvo_timing *panel_dvo_timing;
127 struct lvds_dvo_timing *dvo_timing;
128 struct drm_display_mode *panel_fixed_mode; 185 struct drm_display_mode *panel_fixed_mode;
129 int lfp_data_size, dvo_timing_offset; 186 int i, downclock;
130 int i, temp_downclock;
131 struct drm_display_mode *temp_mode;
132 187
133 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 188 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
134 if (!lvds_options) 189 if (!lvds_options)
@@ -150,75 +205,44 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
150 205
151 dev_priv->lvds_vbt = 1; 206 dev_priv->lvds_vbt = 1;
152 207
153 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - 208 panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
154 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; 209 lvds_lfp_data_ptrs,
155 entry = (struct bdb_lvds_lfp_data_entry *) 210 lvds_options->panel_type);
156 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
157 lvds_options->panel_type));
158 dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
159 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
160
161 /*
162 * the size of fp_timing varies on the different platform.
163 * So calculate the DVO timing relative offset in LVDS data
164 * entry to get the DVO timing entry
165 */
166 dvo_timing = (struct lvds_dvo_timing *)
167 ((unsigned char *)entry + dvo_timing_offset);
168 211
169 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 212 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
170 if (!panel_fixed_mode) 213 if (!panel_fixed_mode)
171 return; 214 return;
172 215
173 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 216 fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
174 217
175 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 218 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
176 219
177 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); 220 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
178 drm_mode_debug_printmodeline(panel_fixed_mode); 221 drm_mode_debug_printmodeline(panel_fixed_mode);
179 222
180 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
181 temp_downclock = panel_fixed_mode->clock;
182 /* 223 /*
183 * enumerate the LVDS panel timing info entry in VBT to check whether 224 * Iterate over the LVDS panel timing info to find the lowest clock
184 * the LVDS downclock is found. 225 * for the native resolution.
185 */ 226 */
227 downclock = panel_dvo_timing->clock;
186 for (i = 0; i < 16; i++) { 228 for (i = 0; i < 16; i++) {
187 entry = (struct bdb_lvds_lfp_data_entry *) 229 const struct lvds_dvo_timing *dvo_timing;
188 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i)); 230
189 dvo_timing = (struct lvds_dvo_timing *) 231 dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
190 ((unsigned char *)entry + dvo_timing_offset); 232 lvds_lfp_data_ptrs,
191 233 i);
192 fill_detail_timing_data(temp_mode, dvo_timing); 234 if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
193 235 dvo_timing->clock < downclock)
194 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay && 236 downclock = dvo_timing->clock;
195 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
196 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
197 temp_mode->htotal == panel_fixed_mode->htotal &&
198 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
199 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
200 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
201 temp_mode->vtotal == panel_fixed_mode->vtotal &&
202 temp_mode->clock < temp_downclock) {
203 /*
204 * downclock is already found. But we expect
205 * to find the lower downclock.
206 */
207 temp_downclock = temp_mode->clock;
208 }
209 /* clear it to zero */
210 memset(temp_mode, 0, sizeof(*temp_mode));
211 } 237 }
212 kfree(temp_mode); 238
213 if (temp_downclock < panel_fixed_mode->clock && 239 if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
214 i915_lvds_downclock) {
215 dev_priv->lvds_downclock_avail = 1; 240 dev_priv->lvds_downclock_avail = 1;
216 dev_priv->lvds_downclock = temp_downclock; 241 dev_priv->lvds_downclock = downclock * 10;
217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. " 242 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
218 "Normal Clock %dKHz, downclock %dKHz\n", 243 "Normal Clock %dKHz, downclock %dKHz\n",
219 temp_downclock, panel_fixed_mode->clock); 244 panel_fixed_mode->clock, 10*downclock);
220 } 245 }
221 return;
222} 246}
223 247
224/* Try to find sdvo panel data */ 248/* Try to find sdvo panel data */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0f1c799afea1..393a39922e53 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/cpufreq.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/input.h> 29#include <linux/input.h>
29#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -1157,12 +1158,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1157 1158
1158 reg = TRANSCONF(pipe); 1159 reg = TRANSCONF(pipe);
1159 val = I915_READ(reg); 1160 val = I915_READ(reg);
1160 /* 1161
1161 * make the BPC in transcoder be consistent with 1162 if (HAS_PCH_IBX(dev_priv->dev)) {
1162 * that in pipeconf reg. 1163 /*
1163 */ 1164 * make the BPC in transcoder be consistent with
1164 val &= ~PIPE_BPC_MASK; 1165 * that in pipeconf reg.
1165 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; 1166 */
1167 val &= ~PIPE_BPC_MASK;
1168 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1169 }
1166 I915_WRITE(reg, val | TRANS_ENABLE); 1170 I915_WRITE(reg, val | TRANS_ENABLE);
1167 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1171 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1168 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1172 DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1380,6 +1384,28 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1380 disable_pch_hdmi(dev_priv, pipe, HDMID); 1384 disable_pch_hdmi(dev_priv, pipe, HDMID);
1381} 1385}
1382 1386
1387static void i8xx_disable_fbc(struct drm_device *dev)
1388{
1389 struct drm_i915_private *dev_priv = dev->dev_private;
1390 u32 fbc_ctl;
1391
1392 /* Disable compression */
1393 fbc_ctl = I915_READ(FBC_CONTROL);
1394 if ((fbc_ctl & FBC_CTL_EN) == 0)
1395 return;
1396
1397 fbc_ctl &= ~FBC_CTL_EN;
1398 I915_WRITE(FBC_CONTROL, fbc_ctl);
1399
1400 /* Wait for compressing bit to clear */
1401 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1402 DRM_DEBUG_KMS("FBC idle timed out\n");
1403 return;
1404 }
1405
1406 DRM_DEBUG_KMS("disabled FBC\n");
1407}
1408
1383static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1409static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1384{ 1410{
1385 struct drm_device *dev = crtc->dev; 1411 struct drm_device *dev = crtc->dev;
@@ -1388,36 +1414,25 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1388 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1414 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1389 struct drm_i915_gem_object *obj = intel_fb->obj; 1415 struct drm_i915_gem_object *obj = intel_fb->obj;
1390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1417 int cfb_pitch;
1391 int plane, i; 1418 int plane, i;
1392 u32 fbc_ctl, fbc_ctl2; 1419 u32 fbc_ctl, fbc_ctl2;
1393 1420
1394 if (fb->pitch == dev_priv->cfb_pitch && 1421 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1395 obj->fence_reg == dev_priv->cfb_fence && 1422 if (fb->pitch < cfb_pitch)
1396 intel_crtc->plane == dev_priv->cfb_plane && 1423 cfb_pitch = fb->pitch;
1397 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1398 return;
1399
1400 i8xx_disable_fbc(dev);
1401
1402 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1403
1404 if (fb->pitch < dev_priv->cfb_pitch)
1405 dev_priv->cfb_pitch = fb->pitch;
1406 1424
1407 /* FBC_CTL wants 64B units */ 1425 /* FBC_CTL wants 64B units */
1408 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1426 cfb_pitch = (cfb_pitch / 64) - 1;
1409 dev_priv->cfb_fence = obj->fence_reg; 1427 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1410 dev_priv->cfb_plane = intel_crtc->plane;
1411 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1412 1428
1413 /* Clear old tags */ 1429 /* Clear old tags */
1414 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 1430 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1415 I915_WRITE(FBC_TAG + (i * 4), 0); 1431 I915_WRITE(FBC_TAG + (i * 4), 0);
1416 1432
1417 /* Set it up... */ 1433 /* Set it up... */
1418 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 1434 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1419 if (obj->tiling_mode != I915_TILING_NONE) 1435 fbc_ctl2 |= plane;
1420 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1421 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 1436 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1422 I915_WRITE(FBC_FENCE_OFF, crtc->y); 1437 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1423 1438
@@ -1425,36 +1440,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1425 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1440 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1426 if (IS_I945GM(dev)) 1441 if (IS_I945GM(dev))
1427 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 1442 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1428 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1443 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1429 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1444 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1430 if (obj->tiling_mode != I915_TILING_NONE) 1445 fbc_ctl |= obj->fence_reg;
1431 fbc_ctl |= dev_priv->cfb_fence;
1432 I915_WRITE(FBC_CONTROL, fbc_ctl);
1433
1434 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1435 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1436}
1437
1438void i8xx_disable_fbc(struct drm_device *dev)
1439{
1440 struct drm_i915_private *dev_priv = dev->dev_private;
1441 u32 fbc_ctl;
1442
1443 /* Disable compression */
1444 fbc_ctl = I915_READ(FBC_CONTROL);
1445 if ((fbc_ctl & FBC_CTL_EN) == 0)
1446 return;
1447
1448 fbc_ctl &= ~FBC_CTL_EN;
1449 I915_WRITE(FBC_CONTROL, fbc_ctl); 1446 I915_WRITE(FBC_CONTROL, fbc_ctl);
1450 1447
1451 /* Wait for compressing bit to clear */ 1448 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1452 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 1449 cfb_pitch, crtc->y, intel_crtc->plane);
1453 DRM_DEBUG_KMS("FBC idle timed out\n");
1454 return;
1455 }
1456
1457 DRM_DEBUG_KMS("disabled FBC\n");
1458} 1450}
1459 1451
1460static bool i8xx_fbc_enabled(struct drm_device *dev) 1452static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -1476,30 +1468,9 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1476 unsigned long stall_watermark = 200; 1468 unsigned long stall_watermark = 200;
1477 u32 dpfc_ctl; 1469 u32 dpfc_ctl;
1478 1470
1479 dpfc_ctl = I915_READ(DPFC_CONTROL);
1480 if (dpfc_ctl & DPFC_CTL_EN) {
1481 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1482 dev_priv->cfb_fence == obj->fence_reg &&
1483 dev_priv->cfb_plane == intel_crtc->plane &&
1484 dev_priv->cfb_y == crtc->y)
1485 return;
1486
1487 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1488 intel_wait_for_vblank(dev, intel_crtc->pipe);
1489 }
1490
1491 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1492 dev_priv->cfb_fence = obj->fence_reg;
1493 dev_priv->cfb_plane = intel_crtc->plane;
1494 dev_priv->cfb_y = crtc->y;
1495
1496 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1471 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1497 if (obj->tiling_mode != I915_TILING_NONE) { 1472 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1498 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1473 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1499 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1500 } else {
1501 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1502 }
1503 1474
1504 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1475 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1505 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1476 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1512,7 +1483,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1512 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1483 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1513} 1484}
1514 1485
1515void g4x_disable_fbc(struct drm_device *dev) 1486static void g4x_disable_fbc(struct drm_device *dev)
1516{ 1487{
1517 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct drm_i915_private *dev_priv = dev->dev_private;
1518 u32 dpfc_ctl; 1489 u32 dpfc_ctl;
@@ -1567,32 +1538,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1567 u32 dpfc_ctl; 1538 u32 dpfc_ctl;
1568 1539
1569 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1540 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1570 if (dpfc_ctl & DPFC_CTL_EN) {
1571 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1572 dev_priv->cfb_fence == obj->fence_reg &&
1573 dev_priv->cfb_plane == intel_crtc->plane &&
1574 dev_priv->cfb_offset == obj->gtt_offset &&
1575 dev_priv->cfb_y == crtc->y)
1576 return;
1577
1578 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1579 intel_wait_for_vblank(dev, intel_crtc->pipe);
1580 }
1581
1582 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1583 dev_priv->cfb_fence = obj->fence_reg;
1584 dev_priv->cfb_plane = intel_crtc->plane;
1585 dev_priv->cfb_offset = obj->gtt_offset;
1586 dev_priv->cfb_y = crtc->y;
1587
1588 dpfc_ctl &= DPFC_RESERVED; 1541 dpfc_ctl &= DPFC_RESERVED;
1589 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1542 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1590 if (obj->tiling_mode != I915_TILING_NONE) { 1543 /* Set persistent mode for front-buffer rendering, ala X. */
1591 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); 1544 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1592 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 1545 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1593 } else { 1546 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1594 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1595 }
1596 1547
1597 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1548 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1598 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1549 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1604,7 +1555,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1604 1555
1605 if (IS_GEN6(dev)) { 1556 if (IS_GEN6(dev)) {
1606 I915_WRITE(SNB_DPFC_CTL_SA, 1557 I915_WRITE(SNB_DPFC_CTL_SA,
1607 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1558 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1608 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1559 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1609 sandybridge_blit_fbc_update(dev); 1560 sandybridge_blit_fbc_update(dev);
1610 } 1561 }
@@ -1612,7 +1563,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1612 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1563 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1613} 1564}
1614 1565
1615void ironlake_disable_fbc(struct drm_device *dev) 1566static void ironlake_disable_fbc(struct drm_device *dev)
1616{ 1567{
1617 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1618 u32 dpfc_ctl; 1569 u32 dpfc_ctl;
@@ -1644,24 +1595,109 @@ bool intel_fbc_enabled(struct drm_device *dev)
1644 return dev_priv->display.fbc_enabled(dev); 1595 return dev_priv->display.fbc_enabled(dev);
1645} 1596}
1646 1597
1647void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1598static void intel_fbc_work_fn(struct work_struct *__work)
1648{ 1599{
1649 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1600 struct intel_fbc_work *work =
1601 container_of(to_delayed_work(__work),
1602 struct intel_fbc_work, work);
1603 struct drm_device *dev = work->crtc->dev;
1604 struct drm_i915_private *dev_priv = dev->dev_private;
1605
1606 mutex_lock(&dev->struct_mutex);
1607 if (work == dev_priv->fbc_work) {
1608 /* Double check that we haven't switched fb without cancelling
1609 * the prior work.
1610 */
1611 if (work->crtc->fb == work->fb) {
1612 dev_priv->display.enable_fbc(work->crtc,
1613 work->interval);
1614
1615 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1616 dev_priv->cfb_fb = work->crtc->fb->base.id;
1617 dev_priv->cfb_y = work->crtc->y;
1618 }
1619
1620 dev_priv->fbc_work = NULL;
1621 }
1622 mutex_unlock(&dev->struct_mutex);
1623
1624 kfree(work);
1625}
1626
1627static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1628{
1629 if (dev_priv->fbc_work == NULL)
1630 return;
1631
1632 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1633
1634 /* Synchronisation is provided by struct_mutex and checking of
1635 * dev_priv->fbc_work, so we can perform the cancellation
1636 * entirely asynchronously.
1637 */
1638 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1639 /* tasklet was killed before being run, clean up */
1640 kfree(dev_priv->fbc_work);
1641
1642 /* Mark the work as no longer wanted so that if it does
1643 * wake-up (because the work was already running and waiting
1644 * for our mutex), it will discover that is no longer
1645 * necessary to run.
1646 */
1647 dev_priv->fbc_work = NULL;
1648}
1649
1650static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1651{
1652 struct intel_fbc_work *work;
1653 struct drm_device *dev = crtc->dev;
1654 struct drm_i915_private *dev_priv = dev->dev_private;
1650 1655
1651 if (!dev_priv->display.enable_fbc) 1656 if (!dev_priv->display.enable_fbc)
1652 return; 1657 return;
1653 1658
1654 dev_priv->display.enable_fbc(crtc, interval); 1659 intel_cancel_fbc_work(dev_priv);
1660
1661 work = kzalloc(sizeof *work, GFP_KERNEL);
1662 if (work == NULL) {
1663 dev_priv->display.enable_fbc(crtc, interval);
1664 return;
1665 }
1666
1667 work->crtc = crtc;
1668 work->fb = crtc->fb;
1669 work->interval = interval;
1670 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1671
1672 dev_priv->fbc_work = work;
1673
1674 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1675
1676 /* Delay the actual enabling to let pageflipping cease and the
1677 * display to settle before starting the compression. Note that
1678 * this delay also serves a second purpose: it allows for a
1679 * vblank to pass after disabling the FBC before we attempt
1680 * to modify the control registers.
1681 *
1682 * A more complicated solution would involve tracking vblanks
1683 * following the termination of the page-flipping sequence
1684 * and indeed performing the enable as a co-routine and not
1685 * waiting synchronously upon the vblank.
1686 */
1687 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1655} 1688}
1656 1689
1657void intel_disable_fbc(struct drm_device *dev) 1690void intel_disable_fbc(struct drm_device *dev)
1658{ 1691{
1659 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1660 1693
1694 intel_cancel_fbc_work(dev_priv);
1695
1661 if (!dev_priv->display.disable_fbc) 1696 if (!dev_priv->display.disable_fbc)
1662 return; 1697 return;
1663 1698
1664 dev_priv->display.disable_fbc(dev); 1699 dev_priv->display.disable_fbc(dev);
1700 dev_priv->cfb_plane = -1;
1665} 1701}
1666 1702
1667/** 1703/**
@@ -1760,8 +1796,13 @@ static void intel_update_fbc(struct drm_device *dev)
1760 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1796 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1761 goto out_disable; 1797 goto out_disable;
1762 } 1798 }
1763 if (obj->tiling_mode != I915_TILING_X) { 1799
1764 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1800 /* The use of a CPU fence is mandatory in order to detect writes
1801 * by the CPU to the scanout and trigger updates to the FBC.
1802 */
1803 if (obj->tiling_mode != I915_TILING_X ||
1804 obj->fence_reg == I915_FENCE_REG_NONE) {
1805 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1765 dev_priv->no_fbc_reason = FBC_NOT_TILED; 1806 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1766 goto out_disable; 1807 goto out_disable;
1767 } 1808 }
@@ -1770,6 +1811,44 @@ static void intel_update_fbc(struct drm_device *dev)
1770 if (in_dbg_master()) 1811 if (in_dbg_master())
1771 goto out_disable; 1812 goto out_disable;
1772 1813
1814 /* If the scanout has not changed, don't modify the FBC settings.
1815 * Note that we make the fundamental assumption that the fb->obj
1816 * cannot be unpinned (and have its GTT offset and fence revoked)
1817 * without first being decoupled from the scanout and FBC disabled.
1818 */
1819 if (dev_priv->cfb_plane == intel_crtc->plane &&
1820 dev_priv->cfb_fb == fb->base.id &&
1821 dev_priv->cfb_y == crtc->y)
1822 return;
1823
1824 if (intel_fbc_enabled(dev)) {
1825 /* We update FBC along two paths, after changing fb/crtc
1826 * configuration (modeswitching) and after page-flipping
1827 * finishes. For the latter, we know that not only did
1828 * we disable the FBC at the start of the page-flip
1829 * sequence, but also more than one vblank has passed.
1830 *
1831 * For the former case of modeswitching, it is possible
1832 * to switch between two FBC valid configurations
1833 * instantaneously so we do need to disable the FBC
1834 * before we can modify its control registers. We also
1835 * have to wait for the next vblank for that to take
1836 * effect. However, since we delay enabling FBC we can
1837 * assume that a vblank has passed since disabling and
1838 * that we can safely alter the registers in the deferred
1839 * callback.
1840 *
1841 * In the scenario that we go from a valid to invalid
1842 * and then back to valid FBC configuration we have
1843 * no strict enforcement that a vblank occurred since
1844 * disabling the FBC. However, along all current pipe
1845 * disabling paths we do need to wait for a vblank at
1846 * some point. And we wait before enabling FBC anyway.
1847 */
1848 DRM_DEBUG_KMS("disabling active FBC for update\n");
1849 intel_disable_fbc(dev);
1850 }
1851
1773 intel_enable_fbc(crtc, 500); 1852 intel_enable_fbc(crtc, 500);
1774 return; 1853 return;
1775 1854
@@ -1812,14 +1891,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1812 } 1891 }
1813 1892
1814 dev_priv->mm.interruptible = false; 1893 dev_priv->mm.interruptible = false;
1815 ret = i915_gem_object_pin(obj, alignment, true); 1894 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1816 if (ret) 1895 if (ret)
1817 goto err_interruptible; 1896 goto err_interruptible;
1818 1897
1819 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1820 if (ret)
1821 goto err_unpin;
1822
1823 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1898 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1824 * fence, whereas 965+ only requires a fence if using 1899 * fence, whereas 965+ only requires a fence if using
1825 * framebuffer compression. For simplicity, we always install 1900 * framebuffer compression. For simplicity, we always install
@@ -1841,10 +1916,8 @@ err_interruptible:
1841 return ret; 1916 return ret;
1842} 1917}
1843 1918
1844/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1919static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1845static int 1920 int x, int y)
1846intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1847 int x, int y, enum mode_set_atomic state)
1848{ 1921{
1849 struct drm_device *dev = crtc->dev; 1922 struct drm_device *dev = crtc->dev;
1850 struct drm_i915_private *dev_priv = dev->dev_private; 1923 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1887,7 +1960,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1887 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1960 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1888 break; 1961 break;
1889 default: 1962 default:
1890 DRM_ERROR("Unknown color depth\n"); 1963 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1891 return -EINVAL; 1964 return -EINVAL;
1892 } 1965 }
1893 if (INTEL_INFO(dev)->gen >= 4) { 1966 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1897,10 +1970,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1897 dspcntr &= ~DISPPLANE_TILED; 1970 dspcntr &= ~DISPPLANE_TILED;
1898 } 1971 }
1899 1972
1900 if (HAS_PCH_SPLIT(dev))
1901 /* must disable */
1902 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1903
1904 I915_WRITE(reg, dspcntr); 1973 I915_WRITE(reg, dspcntr);
1905 1974
1906 Start = obj->gtt_offset; 1975 Start = obj->gtt_offset;
@@ -1917,6 +1986,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1917 I915_WRITE(DSPADDR(plane), Start + Offset); 1986 I915_WRITE(DSPADDR(plane), Start + Offset);
1918 POSTING_READ(reg); 1987 POSTING_READ(reg);
1919 1988
1989 return 0;
1990}
1991
1992static int ironlake_update_plane(struct drm_crtc *crtc,
1993 struct drm_framebuffer *fb, int x, int y)
1994{
1995 struct drm_device *dev = crtc->dev;
1996 struct drm_i915_private *dev_priv = dev->dev_private;
1997 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1998 struct intel_framebuffer *intel_fb;
1999 struct drm_i915_gem_object *obj;
2000 int plane = intel_crtc->plane;
2001 unsigned long Start, Offset;
2002 u32 dspcntr;
2003 u32 reg;
2004
2005 switch (plane) {
2006 case 0:
2007 case 1:
2008 break;
2009 default:
2010 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2011 return -EINVAL;
2012 }
2013
2014 intel_fb = to_intel_framebuffer(fb);
2015 obj = intel_fb->obj;
2016
2017 reg = DSPCNTR(plane);
2018 dspcntr = I915_READ(reg);
2019 /* Mask out pixel format bits in case we change it */
2020 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2021 switch (fb->bits_per_pixel) {
2022 case 8:
2023 dspcntr |= DISPPLANE_8BPP;
2024 break;
2025 case 16:
2026 if (fb->depth != 16)
2027 return -EINVAL;
2028
2029 dspcntr |= DISPPLANE_16BPP;
2030 break;
2031 case 24:
2032 case 32:
2033 if (fb->depth == 24)
2034 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2035 else if (fb->depth == 30)
2036 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2037 else
2038 return -EINVAL;
2039 break;
2040 default:
2041 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2042 return -EINVAL;
2043 }
2044
2045 if (obj->tiling_mode != I915_TILING_NONE)
2046 dspcntr |= DISPPLANE_TILED;
2047 else
2048 dspcntr &= ~DISPPLANE_TILED;
2049
2050 /* must disable */
2051 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2052
2053 I915_WRITE(reg, dspcntr);
2054
2055 Start = obj->gtt_offset;
2056 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2057
2058 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2059 Start, Offset, x, y, fb->pitch);
2060 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2061 I915_WRITE(DSPSURF(plane), Start);
2062 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2063 I915_WRITE(DSPADDR(plane), Offset);
2064 POSTING_READ(reg);
2065
2066 return 0;
2067}
2068
2069/* Assume fb object is pinned & idle & fenced and just update base pointers */
2070static int
2071intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2072 int x, int y, enum mode_set_atomic state)
2073{
2074 struct drm_device *dev = crtc->dev;
2075 struct drm_i915_private *dev_priv = dev->dev_private;
2076 int ret;
2077
2078 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2079 if (ret)
2080 return ret;
2081
1920 intel_update_fbc(dev); 2082 intel_update_fbc(dev);
1921 intel_increase_pllclock(crtc); 2083 intel_increase_pllclock(crtc);
1922 2084
@@ -1971,7 +2133,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1971 * This should only fail upon a hung GPU, in which case we 2133 * This should only fail upon a hung GPU, in which case we
1972 * can safely continue. 2134 * can safely continue.
1973 */ 2135 */
1974 ret = i915_gem_object_flush_gpu(obj); 2136 ret = i915_gem_object_finish_gpu(obj);
1975 (void) ret; 2137 (void) ret;
1976 } 2138 }
1977 2139
@@ -2622,6 +2784,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2622 /* For PCH DP, enable TRANS_DP_CTL */ 2784 /* For PCH DP, enable TRANS_DP_CTL */
2623 if (HAS_PCH_CPT(dev) && 2785 if (HAS_PCH_CPT(dev) &&
2624 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2786 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2787 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2625 reg = TRANS_DP_CTL(pipe); 2788 reg = TRANS_DP_CTL(pipe);
2626 temp = I915_READ(reg); 2789 temp = I915_READ(reg);
2627 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2790 temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -2629,7 +2792,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2629 TRANS_DP_BPC_MASK); 2792 TRANS_DP_BPC_MASK);
2630 temp |= (TRANS_DP_OUTPUT_ENABLE | 2793 temp |= (TRANS_DP_OUTPUT_ENABLE |
2631 TRANS_DP_ENH_FRAMING); 2794 TRANS_DP_ENH_FRAMING);
2632 temp |= TRANS_DP_8BPC; 2795 temp |= bpc << 9; /* same format but at 11:9 */
2633 2796
2634 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2797 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2635 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2798 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2732,9 +2895,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2732 2895
2733 intel_disable_plane(dev_priv, plane, pipe); 2896 intel_disable_plane(dev_priv, plane, pipe);
2734 2897
2735 if (dev_priv->cfb_plane == plane && 2898 if (dev_priv->cfb_plane == plane)
2736 dev_priv->display.disable_fbc) 2899 intel_disable_fbc(dev);
2737 dev_priv->display.disable_fbc(dev);
2738 2900
2739 intel_disable_pipe(dev_priv, pipe); 2901 intel_disable_pipe(dev_priv, pipe);
2740 2902
@@ -2898,9 +3060,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
2898 intel_crtc_dpms_overlay(intel_crtc, false); 3060 intel_crtc_dpms_overlay(intel_crtc, false);
2899 intel_crtc_update_cursor(crtc, false); 3061 intel_crtc_update_cursor(crtc, false);
2900 3062
2901 if (dev_priv->cfb_plane == plane && 3063 if (dev_priv->cfb_plane == plane)
2902 dev_priv->display.disable_fbc) 3064 intel_disable_fbc(dev);
2903 dev_priv->display.disable_fbc(dev);
2904 3065
2905 intel_disable_plane(dev_priv, plane, pipe); 3066 intel_disable_plane(dev_priv, plane, pipe);
2906 intel_disable_pipe(dev_priv, pipe); 3067 intel_disable_pipe(dev_priv, pipe);
@@ -4309,6 +4470,133 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4309 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4470 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4310} 4471}
4311 4472
4473/**
4474 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4475 * @crtc: CRTC structure
4476 *
4477 * A pipe may be connected to one or more outputs. Based on the depth of the
4478 * attached framebuffer, choose a good color depth to use on the pipe.
4479 *
4480 * If possible, match the pipe depth to the fb depth. In some cases, this
4481 * isn't ideal, because the connected output supports a lesser or restricted
4482 * set of depths. Resolve that here:
4483 * LVDS typically supports only 6bpc, so clamp down in that case
4484 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4485 * Displays may support a restricted set as well, check EDID and clamp as
4486 * appropriate.
4487 *
4488 * RETURNS:
4489 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4490 * true if they don't match).
4491 */
4492static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4493 unsigned int *pipe_bpp)
4494{
4495 struct drm_device *dev = crtc->dev;
4496 struct drm_i915_private *dev_priv = dev->dev_private;
4497 struct drm_encoder *encoder;
4498 struct drm_connector *connector;
4499 unsigned int display_bpc = UINT_MAX, bpc;
4500
4501 /* Walk the encoders & connectors on this crtc, get min bpc */
4502 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4503 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4504
4505 if (encoder->crtc != crtc)
4506 continue;
4507
4508 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4509 unsigned int lvds_bpc;
4510
4511 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4512 LVDS_A3_POWER_UP)
4513 lvds_bpc = 8;
4514 else
4515 lvds_bpc = 6;
4516
4517 if (lvds_bpc < display_bpc) {
4518 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4519 display_bpc = lvds_bpc;
4520 }
4521 continue;
4522 }
4523
4524 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4525 /* Use VBT settings if we have an eDP panel */
4526 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4527
4528 if (edp_bpc < display_bpc) {
4529 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4530 display_bpc = edp_bpc;
4531 }
4532 continue;
4533 }
4534
4535 /* Not one of the known troublemakers, check the EDID */
4536 list_for_each_entry(connector, &dev->mode_config.connector_list,
4537 head) {
4538 if (connector->encoder != encoder)
4539 continue;
4540
4541 if (connector->display_info.bpc < display_bpc) {
4542 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4543 display_bpc = connector->display_info.bpc;
4544 }
4545 }
4546
4547 /*
4548 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4549 * through, clamp it down. (Note: >12bpc will be caught below.)
4550 */
4551 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4552 if (display_bpc > 8 && display_bpc < 12) {
4553 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4554 display_bpc = 12;
4555 } else {
4556 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4557 display_bpc = 8;
4558 }
4559 }
4560 }
4561
4562 /*
4563 * We could just drive the pipe at the highest bpc all the time and
4564 * enable dithering as needed, but that costs bandwidth. So choose
4565 * the minimum value that expresses the full color range of the fb but
4566 * also stays within the max display bpc discovered above.
4567 */
4568
4569 switch (crtc->fb->depth) {
4570 case 8:
4571 bpc = 8; /* since we go through a colormap */
4572 break;
4573 case 15:
4574 case 16:
4575 bpc = 6; /* min is 18bpp */
4576 break;
4577 case 24:
4578 bpc = min((unsigned int)8, display_bpc);
4579 break;
4580 case 30:
4581 bpc = min((unsigned int)10, display_bpc);
4582 break;
4583 case 48:
4584 bpc = min((unsigned int)12, display_bpc);
4585 break;
4586 default:
4587 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4588 bpc = min((unsigned int)8, display_bpc);
4589 break;
4590 }
4591
4592 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4593 bpc, display_bpc);
4594
4595 *pipe_bpp = bpc * 3;
4596
4597 return display_bpc != bpc;
4598}
4599
4312static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4600static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4313 struct drm_display_mode *mode, 4601 struct drm_display_mode *mode,
4314 struct drm_display_mode *adjusted_mode, 4602 struct drm_display_mode *adjusted_mode,
@@ -4721,7 +5009,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4721 struct fdi_m_n m_n = {0}; 5009 struct fdi_m_n m_n = {0};
4722 u32 temp; 5010 u32 temp;
4723 u32 lvds_sync = 0; 5011 u32 lvds_sync = 0;
4724 int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; 5012 int target_clock, pixel_multiplier, lane, link_bw, factor;
5013 unsigned int pipe_bpp;
5014 bool dither;
4725 5015
4726 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 5016 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4727 if (encoder->base.crtc != crtc) 5017 if (encoder->base.crtc != crtc)
@@ -4848,56 +5138,37 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4848 /* determine panel color depth */ 5138 /* determine panel color depth */
4849 temp = I915_READ(PIPECONF(pipe)); 5139 temp = I915_READ(PIPECONF(pipe));
4850 temp &= ~PIPE_BPC_MASK; 5140 temp &= ~PIPE_BPC_MASK;
4851 if (is_lvds) { 5141 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
4852 /* the BPC will be 6 if it is 18-bit LVDS panel */ 5142 switch (pipe_bpp) {
4853 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 5143 case 18:
4854 temp |= PIPE_8BPC; 5144 temp |= PIPE_6BPC;
4855 else
4856 temp |= PIPE_6BPC;
4857 } else if (has_edp_encoder) {
4858 switch (dev_priv->edp.bpp/3) {
4859 case 8:
4860 temp |= PIPE_8BPC;
4861 break;
4862 case 10:
4863 temp |= PIPE_10BPC;
4864 break;
4865 case 6:
4866 temp |= PIPE_6BPC;
4867 break;
4868 case 12:
4869 temp |= PIPE_12BPC;
4870 break;
4871 }
4872 } else
4873 temp |= PIPE_8BPC;
4874 I915_WRITE(PIPECONF(pipe), temp);
4875
4876 switch (temp & PIPE_BPC_MASK) {
4877 case PIPE_8BPC:
4878 bpp = 24;
4879 break; 5145 break;
4880 case PIPE_10BPC: 5146 case 24:
4881 bpp = 30; 5147 temp |= PIPE_8BPC;
4882 break; 5148 break;
4883 case PIPE_6BPC: 5149 case 30:
4884 bpp = 18; 5150 temp |= PIPE_10BPC;
4885 break; 5151 break;
4886 case PIPE_12BPC: 5152 case 36:
4887 bpp = 36; 5153 temp |= PIPE_12BPC;
4888 break; 5154 break;
4889 default: 5155 default:
4890 DRM_ERROR("unknown pipe bpc value\n"); 5156 WARN(1, "intel_choose_pipe_bpp returned invalid value\n");
4891 bpp = 24; 5157 temp |= PIPE_8BPC;
5158 pipe_bpp = 24;
5159 break;
4892 } 5160 }
4893 5161
5162 intel_crtc->bpp = pipe_bpp;
5163 I915_WRITE(PIPECONF(pipe), temp);
5164
4894 if (!lane) { 5165 if (!lane) {
4895 /* 5166 /*
4896 * Account for spread spectrum to avoid 5167 * Account for spread spectrum to avoid
4897 * oversubscribing the link. Max center spread 5168 * oversubscribing the link. Max center spread
4898 * is 2.5%; use 5% for safety's sake. 5169 * is 2.5%; use 5% for safety's sake.
4899 */ 5170 */
4900 u32 bps = target_clock * bpp * 21 / 20; 5171 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4901 lane = bps / (link_bw * 8) + 1; 5172 lane = bps / (link_bw * 8) + 1;
4902 } 5173 }
4903 5174
@@ -4905,7 +5176,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4905 5176
4906 if (pixel_multiplier > 1) 5177 if (pixel_multiplier > 1)
4907 link_bw *= pixel_multiplier; 5178 link_bw *= pixel_multiplier;
4908 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 5179 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5180 &m_n);
4909 5181
4910 /* Ironlake: try to setup display ref clock before DPLL 5182 /* Ironlake: try to setup display ref clock before DPLL
4911 * enabling. This is only under driver's control after 5183 * enabling. This is only under driver's control after
@@ -5108,14 +5380,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5108 I915_WRITE(PCH_LVDS, temp); 5380 I915_WRITE(PCH_LVDS, temp);
5109 } 5381 }
5110 5382
5111 /* set the dithering flag and clear for anything other than a panel. */
5112 pipeconf &= ~PIPECONF_DITHER_EN; 5383 pipeconf &= ~PIPECONF_DITHER_EN;
5113 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5384 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5114 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5385 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5115 pipeconf |= PIPECONF_DITHER_EN; 5386 pipeconf |= PIPECONF_DITHER_EN;
5116 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5387 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5117 } 5388 }
5118
5119 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5389 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5120 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5390 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5121 } else { 5391 } else {
@@ -5435,21 +5705,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5435 goto fail_locked; 5705 goto fail_locked;
5436 } 5706 }
5437 5707
5438 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 5708 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5439 if (ret) {
5440 DRM_ERROR("failed to pin cursor bo\n");
5441 goto fail_locked;
5442 }
5443
5444 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
5445 if (ret) { 5709 if (ret) {
5446 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5710 DRM_ERROR("failed to move cursor bo into the GTT\n");
5447 goto fail_unpin; 5711 goto fail_locked;
5448 } 5712 }
5449 5713
5450 ret = i915_gem_object_put_fence(obj); 5714 ret = i915_gem_object_put_fence(obj);
5451 if (ret) { 5715 if (ret) {
5452 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5716 DRM_ERROR("failed to release fence for cursor");
5453 goto fail_unpin; 5717 goto fail_unpin;
5454 } 5718 }
5455 5719
@@ -6152,6 +6416,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6152 drm_gem_object_unreference(&work->pending_flip_obj->base); 6416 drm_gem_object_unreference(&work->pending_flip_obj->base);
6153 drm_gem_object_unreference(&work->old_fb_obj->base); 6417 drm_gem_object_unreference(&work->old_fb_obj->base);
6154 6418
6419 intel_update_fbc(work->dev);
6155 mutex_unlock(&work->dev->struct_mutex); 6420 mutex_unlock(&work->dev->struct_mutex);
6156 kfree(work); 6421 kfree(work);
6157} 6422}
@@ -6516,6 +6781,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6516 if (ret) 6781 if (ret)
6517 goto cleanup_pending; 6782 goto cleanup_pending;
6518 6783
6784 intel_disable_fbc(dev);
6519 mutex_unlock(&dev->struct_mutex); 6785 mutex_unlock(&dev->struct_mutex);
6520 6786
6521 trace_i915_flip_request(intel_crtc->plane, obj); 6787 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6644,6 +6910,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6644 6910
6645 intel_crtc_reset(&intel_crtc->base); 6911 intel_crtc_reset(&intel_crtc->base);
6646 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 6912 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6913 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6647 6914
6648 if (HAS_PCH_SPLIT(dev)) { 6915 if (HAS_PCH_SPLIT(dev)) {
6649 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6916 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6870,6 +7137,11 @@ int intel_framebuffer_init(struct drm_device *dev,
6870 switch (mode_cmd->bpp) { 7137 switch (mode_cmd->bpp) {
6871 case 8: 7138 case 8:
6872 case 16: 7139 case 16:
7140 /* Only pre-ILK can handle 5:5:5 */
7141 if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
7142 return -EINVAL;
7143 break;
7144
6873 case 24: 7145 case 24:
6874 case 32: 7146 case 32:
6875 break; 7147 break;
@@ -7284,6 +7556,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7284 mutex_unlock(&dev_priv->dev->struct_mutex); 7556 mutex_unlock(&dev_priv->dev->struct_mutex);
7285} 7557}
7286 7558
7559void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7560{
7561 int min_freq = 15;
7562 int gpu_freq, ia_freq, max_ia_freq;
7563 int scaling_factor = 180;
7564
7565 max_ia_freq = cpufreq_quick_get_max(0);
7566 /*
7567 * Default to measured freq if none found, PCU will ensure we don't go
7568 * over
7569 */
7570 if (!max_ia_freq)
7571 max_ia_freq = tsc_khz;
7572
7573 /* Convert from kHz to MHz */
7574 max_ia_freq /= 1000;
7575
7576 mutex_lock(&dev_priv->dev->struct_mutex);
7577
7578 /*
7579 * For each potential GPU frequency, load a ring frequency we'd like
7580 * to use for memory access. We do this by specifying the IA frequency
7581 * the PCU should use as a reference to determine the ring frequency.
7582 */
7583 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7584 gpu_freq--) {
7585 int diff = dev_priv->max_delay - gpu_freq;
7586
7587 /*
7588 * For GPU frequencies less than 750MHz, just use the lowest
7589 * ring freq.
7590 */
7591 if (gpu_freq < min_freq)
7592 ia_freq = 800;
7593 else
7594 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7595 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7596
7597 I915_WRITE(GEN6_PCODE_DATA,
7598 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7599 gpu_freq);
7600 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7601 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7602 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7603 GEN6_PCODE_READY) == 0, 10)) {
7604 DRM_ERROR("pcode write of freq table timed out\n");
7605 continue;
7606 }
7607 }
7608
7609 mutex_unlock(&dev_priv->dev->struct_mutex);
7610}
7611
7287static void ironlake_init_clock_gating(struct drm_device *dev) 7612static void ironlake_init_clock_gating(struct drm_device *dev)
7288{ 7613{
7289 struct drm_i915_private *dev_priv = dev->dev_private; 7614 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7640,9 +7965,11 @@ static void intel_init_display(struct drm_device *dev)
7640 if (HAS_PCH_SPLIT(dev)) { 7965 if (HAS_PCH_SPLIT(dev)) {
7641 dev_priv->display.dpms = ironlake_crtc_dpms; 7966 dev_priv->display.dpms = ironlake_crtc_dpms;
7642 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 7967 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7968 dev_priv->display.update_plane = ironlake_update_plane;
7643 } else { 7969 } else {
7644 dev_priv->display.dpms = i9xx_crtc_dpms; 7970 dev_priv->display.dpms = i9xx_crtc_dpms;
7645 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 7971 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7972 dev_priv->display.update_plane = i9xx_update_plane;
7646 } 7973 }
7647 7974
7648 if (I915_HAS_FBC(dev)) { 7975 if (I915_HAS_FBC(dev)) {
@@ -7939,8 +8266,10 @@ void intel_modeset_init(struct drm_device *dev)
7939 intel_init_emon(dev); 8266 intel_init_emon(dev);
7940 } 8267 }
7941 8268
7942 if (IS_GEN6(dev)) 8269 if (IS_GEN6(dev) || IS_GEN7(dev)) {
7943 gen6_enable_rps(dev_priv); 8270 gen6_enable_rps(dev_priv);
8271 gen6_update_ring_freq(dev_priv);
8272 }
7944 8273
7945 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 8274 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7946 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 8275 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -7976,12 +8305,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
7976 intel_increase_pllclock(crtc); 8305 intel_increase_pllclock(crtc);
7977 } 8306 }
7978 8307
7979 if (dev_priv->display.disable_fbc) 8308 intel_disable_fbc(dev);
7980 dev_priv->display.disable_fbc(dev);
7981 8309
7982 if (IS_IRONLAKE_M(dev)) 8310 if (IS_IRONLAKE_M(dev))
7983 ironlake_disable_drps(dev); 8311 ironlake_disable_drps(dev);
7984 if (IS_GEN6(dev)) 8312 if (IS_GEN6(dev) || IS_GEN7(dev))
7985 gen6_disable_rps(dev); 8313 gen6_disable_rps(dev);
7986 8314
7987 if (IS_IRONLAKE_M(dev)) 8315 if (IS_IRONLAKE_M(dev))
@@ -7994,6 +8322,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
7994 drm_irq_uninstall(dev); 8322 drm_irq_uninstall(dev);
7995 cancel_work_sync(&dev_priv->hotplug_work); 8323 cancel_work_sync(&dev_priv->hotplug_work);
7996 8324
8325 /* flush any delayed tasks or pending work */
8326 flush_scheduled_work();
8327
7997 /* Shut off idle work before the crtcs get freed. */ 8328 /* Shut off idle work before the crtcs get freed. */
7998 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7999 intel_crtc = to_intel_crtc(crtc); 8330 intel_crtc = to_intel_crtc(crtc);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index e2aced6eec4c..f797fb58ba9c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -178,12 +178,14 @@ intel_dp_link_clock(uint8_t link_bw)
178static int 178static int
179intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) 179intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
180{ 180{
181 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_crtc *crtc = intel_dp->base.base.crtc;
182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
183 int bpp = 24;
182 184
183 if (is_edp(intel_dp)) 185 if (intel_crtc)
184 return (pixel_clock * dev_priv->edp.bpp + 7) / 8; 186 bpp = intel_crtc->bpp;
185 else 187
186 return pixel_clock * 3; 188 return (pixel_clock * bpp + 7) / 8;
187} 189}
188 190
189static int 191static int
@@ -681,7 +683,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
681 struct drm_encoder *encoder; 683 struct drm_encoder *encoder;
682 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = dev->dev_private;
683 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
684 int lane_count = 4, bpp = 24; 686 int lane_count = 4;
685 struct intel_dp_m_n m_n; 687 struct intel_dp_m_n m_n;
686 int pipe = intel_crtc->pipe; 688 int pipe = intel_crtc->pipe;
687 689
@@ -700,7 +702,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
700 break; 702 break;
701 } else if (is_edp(intel_dp)) { 703 } else if (is_edp(intel_dp)) {
702 lane_count = dev_priv->edp.lanes; 704 lane_count = dev_priv->edp.lanes;
703 bpp = dev_priv->edp.bpp;
704 break; 705 break;
705 } 706 }
706 } 707 }
@@ -710,7 +711,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
710 * the number of bytes_per_pixel post-LUT, which we always 711 * the number of bytes_per_pixel post-LUT, which we always
711 * set up for 8-bits of R/G/B, or 3 bytes total. 712 * set up for 8-bits of R/G/B, or 3 bytes total.
712 */ 713 */
713 intel_dp_compute_m_n(bpp, lane_count, 714 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
714 mode->clock, adjusted_mode->clock, &m_n); 715 mode->clock, adjusted_mode->clock, &m_n);
715 716
716 if (HAS_PCH_SPLIT(dev)) { 717 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9ffa61eb4d7e..6e990f9760ef 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -170,6 +170,7 @@ struct intel_crtc {
170 int16_t cursor_x, cursor_y; 170 int16_t cursor_x, cursor_y;
171 int16_t cursor_width, cursor_height; 171 int16_t cursor_width, cursor_height;
172 bool cursor_visible; 172 bool cursor_visible;
173 unsigned int bpp;
173}; 174};
174 175
175#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 176#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -233,6 +234,13 @@ struct intel_unpin_work {
233 bool enable_stall_check; 234 bool enable_stall_check;
234}; 235};
235 236
237struct intel_fbc_work {
238 struct delayed_work work;
239 struct drm_crtc *crtc;
240 struct drm_framebuffer *fb;
241 int interval;
242};
243
236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 244int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 245extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
238 246
@@ -317,6 +325,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
317extern void ironlake_enable_drps(struct drm_device *dev); 325extern void ironlake_enable_drps(struct drm_device *dev);
318extern void ironlake_disable_drps(struct drm_device *dev); 326extern void ironlake_disable_drps(struct drm_device *dev);
319extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 327extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
328extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
320extern void gen6_disable_rps(struct drm_device *dev); 329extern void gen6_disable_rps(struct drm_device *dev);
321extern void intel_init_emon(struct drm_device *dev); 330extern void intel_init_emon(struct drm_device *dev);
322 331
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index aa0a8e83142e..1ed8e6903915 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -124,12 +124,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
124 u32 sdvox; 124 u32 sdvox;
125 125
126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; 126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
127 sdvox |= intel_hdmi->color_range; 127 if (!HAS_PCH_SPLIT(dev))
128 sdvox |= intel_hdmi->color_range;
128 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 129 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
129 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 130 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
130 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 131 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
131 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 132 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
132 133
134 if (intel_crtc->bpp > 24)
135 sdvox |= COLOR_FORMAT_12bpc;
136 else
137 sdvox |= COLOR_FORMAT_8bpc;
138
133 /* Required on CPT */ 139 /* Required on CPT */
134 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) 140 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
135 sdvox |= HDMI_MODE_SELECT; 141 sdvox |= HDMI_MODE_SELECT;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d2c710422908..b7c5ddb564d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -297,19 +297,26 @@ static int intel_opregion_video_event(struct notifier_block *nb,
297 /* The only video events relevant to opregion are 0x80. These indicate 297 /* The only video events relevant to opregion are 0x80. These indicate
298 either a docking event, lid switch or display switch request. In 298 either a docking event, lid switch or display switch request. In
299 Linux, these are handled by the dock, button and video drivers. 299 Linux, these are handled by the dock, button and video drivers.
300 We might want to fix the video driver to be opregion-aware in 300 */
301 future, but right now we just indicate to the firmware that the
302 request has been handled */
303 301
304 struct opregion_acpi *acpi; 302 struct opregion_acpi *acpi;
303 struct acpi_bus_event *event = data;
304 int ret = NOTIFY_OK;
305
306 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
307 return NOTIFY_DONE;
305 308
306 if (!system_opregion) 309 if (!system_opregion)
307 return NOTIFY_DONE; 310 return NOTIFY_DONE;
308 311
309 acpi = system_opregion->acpi; 312 acpi = system_opregion->acpi;
313
314 if (event->type == 0x80 && !(acpi->cevt & 0x1))
315 ret = NOTIFY_BAD;
316
310 acpi->csts = 0; 317 acpi->csts = 0;
311 318
312 return NOTIFY_OK; 319 return ret;
313} 320}
314 321
315static struct notifier_block intel_opregion_notifier = { 322static struct notifier_block intel_opregion_notifier = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9e2959bc91cd..d36038086826 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
773 if (ret != 0) 773 if (ret != 0)
774 return ret; 774 return ret;
775 775
776 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); 776 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
777 if (ret != 0) 777 if (ret != 0)
778 return ret; 778 return ret;
779 779
780 ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
781 if (ret != 0)
782 goto out_unpin;
783
784 ret = i915_gem_object_put_fence(new_bo); 780 ret = i915_gem_object_put_fence(new_bo);
785 if (ret) 781 if (ret)
786 goto out_unpin; 782 goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 95c4b1429935..e9615685a39c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err; 237 goto err;
238 } 238 }
239 obj->cache_level = I915_CACHE_LLC; 239
240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
240 241
241 ret = i915_gem_object_pin(obj, 4096, true); 242 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret) 243 if (ret)
@@ -776,7 +777,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
776 ret = -ENOMEM; 777 ret = -ENOMEM;
777 goto err; 778 goto err;
778 } 779 }
779 obj->cache_level = I915_CACHE_LLC; 780
781 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
780 782
781 ret = i915_gem_object_pin(obj, 4096, true); 783 ret = i915_gem_object_pin(obj, 4096, true);
782 if (ret != 0) { 784 if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 113e4e7264cd..210d570fd516 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1236,6 +1236,8 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1236 struct drm_connector *connector) 1236 struct drm_connector *connector)
1237{ 1237{
1238 struct drm_encoder *encoder = &intel_tv->base.base; 1238 struct drm_encoder *encoder = &intel_tv->base.base;
1239 struct drm_crtc *crtc = encoder->crtc;
1240 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1239 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
1240 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1241 unsigned long irqflags; 1243 unsigned long irqflags;
@@ -1258,6 +1260,10 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1258 /* Poll for TV detection */ 1260 /* Poll for TV detection */
1259 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); 1261 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
1260 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1262 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1263 if (intel_crtc->pipe == 1)
1264 tv_ctl |= TV_ENC_PIPEB_SELECT;
1265 else
1266 tv_ctl &= ~TV_ENC_PIPEB_SELECT;
1261 1267
1262 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); 1268 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
1263 tv_dac |= (TVDAC_STATE_CHG_EN | 1269 tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1277,26 +1283,26 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1277 to_intel_crtc(intel_tv->base.base.crtc)->pipe); 1283 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1278 1284
1279 type = -1; 1285 type = -1;
1280 if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { 1286 tv_dac = I915_READ(TV_DAC);
1281 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); 1287 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
1282 /* 1288 /*
1283 * A B C 1289 * A B C
1284 * 0 1 1 Composite 1290 * 0 1 1 Composite
1285 * 1 0 X svideo 1291 * 1 0 X svideo
1286 * 0 0 0 Component 1292 * 0 0 0 Component
1287 */ 1293 */
1288 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1294 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1289 DRM_DEBUG_KMS("Detected Composite TV connection\n"); 1295 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1290 type = DRM_MODE_CONNECTOR_Composite; 1296 type = DRM_MODE_CONNECTOR_Composite;
1291 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1297 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1292 DRM_DEBUG_KMS("Detected S-Video TV connection\n"); 1298 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1293 type = DRM_MODE_CONNECTOR_SVIDEO; 1299 type = DRM_MODE_CONNECTOR_SVIDEO;
1294 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1300 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1295 DRM_DEBUG_KMS("Detected Component TV connection\n"); 1301 DRM_DEBUG_KMS("Detected Component TV connection\n");
1296 type = DRM_MODE_CONNECTOR_Component; 1302 type = DRM_MODE_CONNECTOR_Component;
1297 } else { 1303 } else {
1298 DRM_DEBUG_KMS("Unrecognised TV connection\n"); 1304 DRM_DEBUG_KMS("Unrecognised TV connection\n");
1299 } 1305 type = -1;
1300 } 1306 }
1301 1307
1302 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); 1308 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 729d5fd7c88d..b311faba34f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -135,13 +135,14 @@ static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
135 int i; 135 int i;
136 136
137 if (dev_priv->card_type >= NV_50) { 137 if (dev_priv->card_type >= NV_50) {
138 uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8; 138 u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
139 139 if (!addr) {
140 if (!vbios_vram) 140 addr = (u64)nv_rd32(dev, 0x1700) << 16;
141 vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000; 141 addr += 0xf0000;
142 }
142 143
143 old_bar0_pramin = nv_rd32(dev, 0x1700); 144 old_bar0_pramin = nv_rd32(dev, 0x1700);
144 nv_wr32(dev, 0x1700, vbios_vram >> 16); 145 nv_wr32(dev, 0x1700, addr >> 16);
145 } 146 }
146 147
147 /* bail if no rom signature */ 148 /* bail if no rom signature */
@@ -5186,7 +5187,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5186 load_table_ptr = ROM16(bios->data[bitentry->offset]); 5187 load_table_ptr = ROM16(bios->data[bitentry->offset]);
5187 5188
5188 if (load_table_ptr == 0x0) { 5189 if (load_table_ptr == 0x0) {
5189 NV_ERROR(dev, "Pointer to BIT loadval table invalid\n"); 5190 NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
5190 return -EINVAL; 5191 return -EINVAL;
5191 } 5192 }
5192 5193
@@ -5965,6 +5966,12 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5965 if (cte->type == DCB_CONNECTOR_HDMI_1) 5966 if (cte->type == DCB_CONNECTOR_HDMI_1)
5966 cte->type = DCB_CONNECTOR_DVI_I; 5967 cte->type = DCB_CONNECTOR_DVI_I;
5967 } 5968 }
5969
5970 /* Gigabyte GV-NX86T512H */
5971 if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
5972 if (cte->type == DCB_CONNECTOR_HDMI_1)
5973 cte->type = DCB_CONNECTOR_DVI_I;
5974 }
5968} 5975}
5969 5976
5970static const u8 hpd_gpio[16] = { 5977static const u8 hpd_gpio[16] = {
@@ -6377,6 +6384,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6377 } 6384 }
6378 } 6385 }
6379 6386
6387 /* Some other twisted XFX board (rhbz#694914)
6388 *
6389 * The DVI/VGA encoder combo that's supposed to represent the
6390 * DVI-I connector actually point at two different ones, and
6391 * the HDMI connector ends up paired with the VGA instead.
6392 *
6393 * Connector table is missing anything for VGA at all, pointing it
6394 * an invalid conntab entry 2 so we figure it out ourself.
6395 */
6396 if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
6397 if (idx == 0) {
6398 *conn = 0x02002300; /* VGA, connector 2 */
6399 *conf = 0x00000028;
6400 } else
6401 if (idx == 1) {
6402 *conn = 0x01010312; /* DVI, connector 0 */
6403 *conf = 0x00020030;
6404 } else
6405 if (idx == 2) {
6406 *conn = 0x04020310; /* VGA, connector 0 */
6407 *conf = 0x00000028;
6408 } else
6409 if (idx == 3) {
6410 *conn = 0x02021322; /* HDMI, connector 1 */
6411 *conf = 0x00020010;
6412 } else {
6413 *conn = 0x0000000e; /* EOL */
6414 *conf = 0x00000000;
6415 }
6416 }
6417
6380 return true; 6418 return true;
6381} 6419}
6382 6420
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2ad49cbf7c8b..890d50e4d682 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
56 kfree(nvbo); 52 kfree(nvbo);
57} 53}
58 54
59static void 55static void
60nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 56nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
61 int *align, int *size, int *page_shift) 57 int *align, int *size)
62{ 58{
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 59 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
64 60
@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
82 } 78 }
83 } 79 }
84 } else { 80 } else {
85 if (likely(dev_priv->chan_vm)) { 81 *size = roundup(*size, (1 << nvbo->page_shift));
86 if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024) 82 *align = max((1 << nvbo->page_shift), *align);
87 *page_shift = dev_priv->chan_vm->lpg_shift;
88 else
89 *page_shift = dev_priv->chan_vm->spg_shift;
90 } else {
91 *page_shift = 12;
92 }
93
94 *size = roundup(*size, (1 << *page_shift));
95 *align = max((1 << *page_shift), *align);
96 } 83 }
97 84
98 *size = roundup(*size, PAGE_SIZE); 85 *size = roundup(*size, PAGE_SIZE);
99} 86}
100 87
101int 88int
102nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 89nouveau_bo_new(struct drm_device *dev, int size, int align,
103 int size, int align, uint32_t flags, uint32_t tile_mode, 90 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
104 uint32_t tile_flags, struct nouveau_bo **pnvbo) 91 struct nouveau_bo **pnvbo)
105{ 92{
106 struct drm_nouveau_private *dev_priv = dev->dev_private; 93 struct drm_nouveau_private *dev_priv = dev->dev_private;
107 struct nouveau_bo *nvbo; 94 struct nouveau_bo *nvbo;
108 int ret = 0, page_shift = 0; 95 int ret;
109 96
110 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 97 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
111 if (!nvbo) 98 if (!nvbo)
112 return -ENOMEM; 99 return -ENOMEM;
113 INIT_LIST_HEAD(&nvbo->head); 100 INIT_LIST_HEAD(&nvbo->head);
114 INIT_LIST_HEAD(&nvbo->entry); 101 INIT_LIST_HEAD(&nvbo->entry);
102 INIT_LIST_HEAD(&nvbo->vma_list);
115 nvbo->tile_mode = tile_mode; 103 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags; 104 nvbo->tile_flags = tile_flags;
117 nvbo->bo.bdev = &dev_priv->ttm.bdev; 105 nvbo->bo.bdev = &dev_priv->ttm.bdev;
118 106
119 nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift); 107 nvbo->page_shift = 12;
120 align >>= PAGE_SHIFT; 108 if (dev_priv->bar1_vm) {
121 109 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
122 if (dev_priv->chan_vm) { 110 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) {
126 kfree(nvbo);
127 return ret;
128 }
129 } 111 }
130 112
113 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
131 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 114 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
132 nouveau_bo_placement_set(nvbo, flags, 0); 115 nouveau_bo_placement_set(nvbo, flags, 0);
133 116
134 nvbo->channel = chan;
135 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 117 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
136 ttm_bo_type_device, &nvbo->placement, align, 0, 118 ttm_bo_type_device, &nvbo->placement,
137 false, NULL, size, nouveau_bo_del_ttm); 119 align >> PAGE_SHIFT, 0, false, NULL, size,
120 nouveau_bo_del_ttm);
138 if (ret) { 121 if (ret) {
139 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 122 /* ttm will call nouveau_bo_del_ttm if it fails.. */
140 return ret; 123 return ret;
141 } 124 }
142 nvbo->channel = NULL;
143 125
144 if (nvbo->vma.node)
145 nvbo->bo.offset = nvbo->vma.offset;
146 *pnvbo = nvbo; 126 *pnvbo = nvbo;
147 return 0; 127 return 0;
148} 128}
@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
312 if (ret) 292 if (ret)
313 return ret; 293 return ret;
314 294
315 if (nvbo->vma.node)
316 nvbo->bo.offset = nvbo->vma.offset;
317 return 0; 295 return 0;
318} 296}
319 297
@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
440 TTM_MEMTYPE_FLAG_CMA; 418 TTM_MEMTYPE_FLAG_CMA;
441 man->available_caching = TTM_PL_MASK_CACHING; 419 man->available_caching = TTM_PL_MASK_CACHING;
442 man->default_caching = TTM_PL_FLAG_CACHED; 420 man->default_caching = TTM_PL_FLAG_CACHED;
443 man->gpu_offset = dev_priv->gart_info.aper_base;
444 break; 421 break;
445 default: 422 default:
446 NV_ERROR(dev, "Unknown GART type: %d\n", 423 NV_ERROR(dev, "Unknown GART type: %d\n",
@@ -501,19 +478,12 @@ static int
501nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 478nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
502 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 479 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
503{ 480{
504 struct nouveau_mem *old_node = old_mem->mm_node; 481 struct nouveau_mem *node = old_mem->mm_node;
505 struct nouveau_mem *new_node = new_mem->mm_node; 482 u64 src_offset = node->vma[0].offset;
506 struct nouveau_bo *nvbo = nouveau_bo(bo); 483 u64 dst_offset = node->vma[1].offset;
507 u32 page_count = new_mem->num_pages; 484 u32 page_count = new_mem->num_pages;
508 u64 src_offset, dst_offset;
509 int ret; 485 int ret;
510 486
511 src_offset = old_node->tmp_vma.offset;
512 if (new_node->tmp_vma.node)
513 dst_offset = new_node->tmp_vma.offset;
514 else
515 dst_offset = nvbo->vma.offset;
516
517 page_count = new_mem->num_pages; 487 page_count = new_mem->num_pages;
518 while (page_count) { 488 while (page_count) {
519 int line_count = (page_count > 2047) ? 2047 : page_count; 489 int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -547,19 +517,13 @@ static int
547nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 517nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
548 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 518 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
549{ 519{
550 struct nouveau_mem *old_node = old_mem->mm_node; 520 struct nouveau_mem *node = old_mem->mm_node;
551 struct nouveau_mem *new_node = new_mem->mm_node;
552 struct nouveau_bo *nvbo = nouveau_bo(bo); 521 struct nouveau_bo *nvbo = nouveau_bo(bo);
553 u64 length = (new_mem->num_pages << PAGE_SHIFT); 522 u64 length = (new_mem->num_pages << PAGE_SHIFT);
554 u64 src_offset, dst_offset; 523 u64 src_offset = node->vma[0].offset;
524 u64 dst_offset = node->vma[1].offset;
555 int ret; 525 int ret;
556 526
557 src_offset = old_node->tmp_vma.offset;
558 if (new_node->tmp_vma.node)
559 dst_offset = new_node->tmp_vma.offset;
560 else
561 dst_offset = nvbo->vma.offset;
562
563 while (length) { 527 while (length) {
564 u32 amount, stride, height; 528 u32 amount, stride, height;
565 529
@@ -695,6 +659,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
695} 659}
696 660
697static int 661static int
662nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
663 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
664{
665 struct nouveau_mem *node = mem->mm_node;
666 int ret;
667
668 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
669 node->page_shift, NV_MEM_ACCESS_RO, vma);
670 if (ret)
671 return ret;
672
673 if (mem->mem_type == TTM_PL_VRAM)
674 nouveau_vm_map(vma, node);
675 else
676 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
677 node, node->pages);
678
679 return 0;
680}
681
682static int
698nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 683nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
699 bool no_wait_reserve, bool no_wait_gpu, 684 bool no_wait_reserve, bool no_wait_gpu,
700 struct ttm_mem_reg *new_mem) 685 struct ttm_mem_reg *new_mem)
@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
711 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 696 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
712 } 697 }
713 698
714 /* create temporary vma for old memory, this will get cleaned 699 /* create temporary vmas for the transfer and attach them to the
715 * up after ttm destroys the ttm_mem_reg 700 * old nouveau_mem node, these will get cleaned up after ttm has
701 * destroyed the ttm_mem_reg
716 */ 702 */
717 if (dev_priv->card_type >= NV_50) { 703 if (dev_priv->card_type >= NV_50) {
718 struct nouveau_mem *node = old_mem->mm_node; 704 struct nouveau_mem *node = old_mem->mm_node;
719 if (!node->tmp_vma.node) {
720 u32 page_shift = nvbo->vma.node->type;
721 if (old_mem->mem_type == TTM_PL_TT)
722 page_shift = nvbo->vma.vm->spg_shift;
723
724 ret = nouveau_vm_get(chan->vm,
725 old_mem->num_pages << PAGE_SHIFT,
726 page_shift, NV_MEM_ACCESS_RO,
727 &node->tmp_vma);
728 if (ret)
729 goto out;
730 }
731 705
732 if (old_mem->mem_type == TTM_PL_VRAM) 706 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
733 nouveau_vm_map(&node->tmp_vma, node); 707 if (ret)
734 else { 708 goto out;
735 nouveau_vm_map_sg(&node->tmp_vma, 0, 709
736 old_mem->num_pages << PAGE_SHIFT, 710 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
737 node, node->pages); 711 if (ret)
738 } 712 goto out;
739 } 713 }
740 714
741 if (dev_priv->card_type < NV_50) 715 if (dev_priv->card_type < NV_50)
@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
762 bool no_wait_reserve, bool no_wait_gpu, 736 bool no_wait_reserve, bool no_wait_gpu,
763 struct ttm_mem_reg *new_mem) 737 struct ttm_mem_reg *new_mem)
764{ 738{
765 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
766 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 739 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
767 struct ttm_placement placement; 740 struct ttm_placement placement;
768 struct ttm_mem_reg tmp_mem; 741 struct ttm_mem_reg tmp_mem;
@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
782 if (ret) 755 if (ret)
783 goto out; 756 goto out;
784 757
785 if (dev_priv->card_type >= NV_50) {
786 struct nouveau_bo *nvbo = nouveau_bo(bo);
787 struct nouveau_mem *node = tmp_mem.mm_node;
788 struct nouveau_vma *vma = &nvbo->vma;
789 if (vma->node->type != vma->vm->spg_shift)
790 vma = &node->tmp_vma;
791 nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
792 node, node->pages);
793 }
794
795 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 758 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
796
797 if (dev_priv->card_type >= NV_50) {
798 struct nouveau_bo *nvbo = nouveau_bo(bo);
799 nouveau_vm_unmap(&nvbo->vma);
800 }
801
802 if (ret) 759 if (ret)
803 goto out; 760 goto out;
804 761
@@ -844,30 +801,22 @@ out:
844static void 801static void
845nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) 802nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
846{ 803{
847 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
848 struct nouveau_mem *node = new_mem->mm_node; 804 struct nouveau_mem *node = new_mem->mm_node;
849 struct nouveau_bo *nvbo = nouveau_bo(bo); 805 struct nouveau_bo *nvbo = nouveau_bo(bo);
850 struct nouveau_vma *vma = &nvbo->vma; 806 struct nouveau_vma *vma;
851 struct nouveau_vm *vm = vma->vm; 807
852 808 list_for_each_entry(vma, &nvbo->vma_list, head) {
853 if (dev_priv->card_type < NV_50) 809 if (new_mem->mem_type == TTM_PL_VRAM) {
854 return; 810 nouveau_vm_map(vma, new_mem->mm_node);
855 811 } else
856 switch (new_mem->mem_type) { 812 if (new_mem->mem_type == TTM_PL_TT &&
857 case TTM_PL_VRAM: 813 nvbo->page_shift == vma->vm->spg_shift) {
858 nouveau_vm_map(vma, node); 814 nouveau_vm_map_sg(vma, 0, new_mem->
859 break; 815 num_pages << PAGE_SHIFT,
860 case TTM_PL_TT: 816 node, node->pages);
861 if (vma->node->type != vm->spg_shift) { 817 } else {
862 nouveau_vm_unmap(vma); 818 nouveau_vm_unmap(vma);
863 vma = &node->tmp_vma;
864 } 819 }
865 nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
866 node, node->pages);
867 break;
868 default:
869 nouveau_vm_unmap(&nvbo->vma);
870 break;
871 } 820 }
872} 821}
873 822
@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = {
1113 .io_mem_free = &nouveau_ttm_io_mem_free, 1062 .io_mem_free = &nouveau_ttm_io_mem_free,
1114}; 1063};
1115 1064
1065struct nouveau_vma *
1066nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1067{
1068 struct nouveau_vma *vma;
1069 list_for_each_entry(vma, &nvbo->vma_list, head) {
1070 if (vma->vm == vm)
1071 return vma;
1072 }
1073
1074 return NULL;
1075}
1076
1077int
1078nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1079 struct nouveau_vma *vma)
1080{
1081 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1082 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1083 int ret;
1084
1085 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1086 NV_MEM_ACCESS_RW, vma);
1087 if (ret)
1088 return ret;
1089
1090 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1091 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1092 else
1093 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1094 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1095
1096 list_add_tail(&vma->head, &nvbo->vma_list);
1097 vma->refcount = 1;
1098 return 0;
1099}
1100
1101void
1102nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1103{
1104 if (vma->node) {
1105 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1106 spin_lock(&nvbo->bo.bdev->fence_lock);
1107 ttm_bo_wait(&nvbo->bo, false, false, false);
1108 spin_unlock(&nvbo->bo.bdev->fence_lock);
1109 nouveau_vm_unmap(vma);
1110 }
1111
1112 nouveau_vm_put(vma);
1113 list_del(&vma->head);
1114 }
1115}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a7583a8ddb01..b0d753f45bbd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,40 +27,63 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
30#include "nouveau_ramht.h"
30 31
31static int 32static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) 33nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
33{ 34{
35 u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
34 struct drm_device *dev = chan->dev; 36 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 37 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo; 38 int ret;
37 struct nouveau_gpuobj *pushbuf = NULL; 39
38 int ret = 0; 40 /* allocate buffer object */
41 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
42 if (ret)
43 goto out;
44
45 ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
46 if (ret)
47 goto out;
48
49 ret = nouveau_bo_map(chan->pushbuf_bo);
50 if (ret)
51 goto out;
39 52
53 /* create DMA object covering the entire memtype where the push
54 * buffer resides, userspace can submit its own push buffers from
55 * anywhere within the same memtype.
56 */
57 chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
40 if (dev_priv->card_type >= NV_50) { 58 if (dev_priv->card_type >= NV_50) {
59 ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
60 &chan->pushbuf_vma);
61 if (ret)
62 goto out;
63
41 if (dev_priv->card_type < NV_C0) { 64 if (dev_priv->card_type < NV_C0) {
42 ret = nouveau_gpuobj_dma_new(chan, 65 ret = nouveau_gpuobj_dma_new(chan,
43 NV_CLASS_DMA_IN_MEMORY, 0, 66 NV_CLASS_DMA_IN_MEMORY, 0,
44 (1ULL << 40), 67 (1ULL << 40),
45 NV_MEM_ACCESS_RO, 68 NV_MEM_ACCESS_RO,
46 NV_MEM_TARGET_VM, 69 NV_MEM_TARGET_VM,
47 &pushbuf); 70 &chan->pushbuf);
48 } 71 }
49 chan->pushbuf_base = pb->bo.offset; 72 chan->pushbuf_base = chan->pushbuf_vma.offset;
50 } else 73 } else
51 if (pb->bo.mem.mem_type == TTM_PL_TT) { 74 if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
52 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 75 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
53 dev_priv->gart_info.aper_size, 76 dev_priv->gart_info.aper_size,
54 NV_MEM_ACCESS_RO, 77 NV_MEM_ACCESS_RO,
55 NV_MEM_TARGET_GART, &pushbuf); 78 NV_MEM_TARGET_GART,
56 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 79 &chan->pushbuf);
57 } else 80 } else
58 if (dev_priv->card_type != NV_04) { 81 if (dev_priv->card_type != NV_04) {
59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 82 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
60 dev_priv->fb_available_size, 83 dev_priv->fb_available_size,
61 NV_MEM_ACCESS_RO, 84 NV_MEM_ACCESS_RO,
62 NV_MEM_TARGET_VRAM, &pushbuf); 85 NV_MEM_TARGET_VRAM,
63 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 86 &chan->pushbuf);
64 } else { 87 } else {
65 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 88 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
66 * exact reason for existing :) PCI access to cmdbuf in 89 * exact reason for existing :) PCI access to cmdbuf in
@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
70 pci_resource_start(dev->pdev, 1), 93 pci_resource_start(dev->pdev, 1),
71 dev_priv->fb_available_size, 94 dev_priv->fb_available_size,
72 NV_MEM_ACCESS_RO, 95 NV_MEM_ACCESS_RO,
73 NV_MEM_TARGET_PCI, &pushbuf); 96 NV_MEM_TARGET_PCI,
74 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 97 &chan->pushbuf);
75 } 98 }
76 99
77 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); 100out:
78 nouveau_gpuobj_ref(NULL, &pushbuf);
79 return ret;
80}
81
82static struct nouveau_bo *
83nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
84{
85 struct nouveau_bo *pushbuf = NULL;
86 int location, ret;
87
88 if (nouveau_vram_pushbuf)
89 location = TTM_PL_FLAG_VRAM;
90 else
91 location = TTM_PL_FLAG_TT;
92
93 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
94 if (ret) {
95 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
96 return NULL;
97 }
98
99 ret = nouveau_bo_pin(pushbuf, location);
100 if (ret) {
101 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
102 nouveau_bo_ref(NULL, &pushbuf);
103 return NULL;
104 }
105
106 ret = nouveau_bo_map(pushbuf);
107 if (ret) { 101 if (ret) {
108 nouveau_bo_unpin(pushbuf); 102 NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
109 nouveau_bo_ref(NULL, &pushbuf); 103 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
110 return NULL; 104 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
105 if (chan->pushbuf_bo) {
106 nouveau_bo_unmap(chan->pushbuf_bo);
107 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
108 }
111 } 109 }
112 110
113 return pushbuf; 111 return 0;
114} 112}
115 113
116/* allocates and initializes a fifo for user space consumption */ 114/* allocates and initializes a fifo for user space consumption */
@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
121{ 119{
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 120 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 121 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
122 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
124 struct nouveau_channel *chan; 123 struct nouveau_channel *chan;
125 unsigned long flags; 124 unsigned long flags;
126 int ret; 125 int ret;
@@ -160,19 +159,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
160 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
161 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
162 161
163 /* Allocate DMA push buffer */ 162 /* setup channel's memory and vm */
164 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); 163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
165 if (!chan->pushbuf_bo) { 164 if (ret) {
166 ret = -ENOMEM; 165 NV_ERROR(dev, "gpuobj %d\n", ret);
167 NV_ERROR(dev, "pushbuf %d\n", ret);
168 nouveau_channel_put(&chan); 166 nouveau_channel_put(&chan);
169 return ret; 167 return ret;
170 } 168 }
171 169
172 nouveau_dma_pre_init(chan);
173 chan->user_put = 0x40;
174 chan->user_get = 0x44;
175
176 /* Allocate space for per-channel fixed notifier memory */ 170 /* Allocate space for per-channel fixed notifier memory */
177 ret = nouveau_notifier_init_channel(chan); 171 ret = nouveau_notifier_init_channel(chan);
178 if (ret) { 172 if (ret) {
@@ -181,21 +175,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
181 return ret; 175 return ret;
182 } 176 }
183 177
184 /* Setup channel's default objects */ 178 /* Allocate DMA push buffer */
185 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 179 ret = nouveau_channel_pushbuf_init(chan);
186 if (ret) { 180 if (ret) {
187 NV_ERROR(dev, "gpuobj %d\n", ret); 181 NV_ERROR(dev, "pushbuf %d\n", ret);
188 nouveau_channel_put(&chan); 182 nouveau_channel_put(&chan);
189 return ret; 183 return ret;
190 } 184 }
191 185
192 /* Create a dma object for the push buffer */ 186 nouveau_dma_pre_init(chan);
193 ret = nouveau_channel_pushbuf_ctxdma_init(chan); 187 chan->user_put = 0x40;
194 if (ret) { 188 chan->user_get = 0x44;
195 NV_ERROR(dev, "pbctxdma %d\n", ret);
196 nouveau_channel_put(&chan);
197 return ret;
198 }
199 189
200 /* disable the fifo caches */ 190 /* disable the fifo caches */
201 pfifo->reassign(dev, false); 191 pfifo->reassign(dev, false);
@@ -220,6 +210,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
220 nouveau_debugfs_channel_init(chan); 210 nouveau_debugfs_channel_init(chan);
221 211
222 NV_DEBUG(dev, "channel %d initialised\n", chan->id); 212 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
213 if (fpriv) {
214 spin_lock(&fpriv->lock);
215 list_add(&chan->list, &fpriv->channels);
216 spin_unlock(&fpriv->lock);
217 }
223 *chan_ret = chan; 218 *chan_ret = chan;
224 return 0; 219 return 0;
225} 220}
@@ -236,29 +231,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref)
236} 231}
237 232
238struct nouveau_channel * 233struct nouveau_channel *
239nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) 234nouveau_channel_get(struct drm_file *file_priv, int id)
240{ 235{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 236 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
242 struct nouveau_channel *chan; 237 struct nouveau_channel *chan;
243 unsigned long flags;
244
245 if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
246 return ERR_PTR(-EINVAL);
247
248 spin_lock_irqsave(&dev_priv->channels.lock, flags);
249 chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
250 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
251
252 if (unlikely(!chan))
253 return ERR_PTR(-EINVAL);
254 238
255 if (unlikely(file_priv && chan->file_priv != file_priv)) { 239 spin_lock(&fpriv->lock);
256 nouveau_channel_put_unlocked(&chan); 240 list_for_each_entry(chan, &fpriv->channels, list) {
257 return ERR_PTR(-EINVAL); 241 if (chan->id == id) {
242 chan = nouveau_channel_get_unlocked(chan);
243 spin_unlock(&fpriv->lock);
244 mutex_lock(&chan->mutex);
245 return chan;
246 }
258 } 247 }
248 spin_unlock(&fpriv->lock);
259 249
260 mutex_lock(&chan->mutex); 250 return ERR_PTR(-EINVAL);
261 return chan;
262} 251}
263 252
264void 253void
@@ -312,12 +301,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
312 /* destroy any resources the channel owned */ 301 /* destroy any resources the channel owned */
313 nouveau_gpuobj_ref(NULL, &chan->pushbuf); 302 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
314 if (chan->pushbuf_bo) { 303 if (chan->pushbuf_bo) {
304 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
315 nouveau_bo_unmap(chan->pushbuf_bo); 305 nouveau_bo_unmap(chan->pushbuf_bo);
316 nouveau_bo_unpin(chan->pushbuf_bo); 306 nouveau_bo_unpin(chan->pushbuf_bo);
317 nouveau_bo_ref(NULL, &chan->pushbuf_bo); 307 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
318 } 308 }
319 nouveau_gpuobj_channel_takedown(chan); 309 nouveau_ramht_ref(NULL, &chan->ramht, chan);
320 nouveau_notifier_takedown_channel(chan); 310 nouveau_notifier_takedown_channel(chan);
311 nouveau_gpuobj_channel_takedown(chan);
321 312
322 nouveau_channel_ref(NULL, pchan); 313 nouveau_channel_ref(NULL, pchan);
323} 314}
@@ -383,10 +374,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
383 374
384 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 375 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
385 for (i = 0; i < engine->fifo.channels; i++) { 376 for (i = 0; i < engine->fifo.channels; i++) {
386 chan = nouveau_channel_get(dev, file_priv, i); 377 chan = nouveau_channel_get(file_priv, i);
387 if (IS_ERR(chan)) 378 if (IS_ERR(chan))
388 continue; 379 continue;
389 380
381 list_del(&chan->list);
390 atomic_dec(&chan->users); 382 atomic_dec(&chan->users);
391 nouveau_channel_put(&chan); 383 nouveau_channel_put(&chan);
392 } 384 }
@@ -459,10 +451,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
459 struct drm_nouveau_channel_free *req = data; 451 struct drm_nouveau_channel_free *req = data;
460 struct nouveau_channel *chan; 452 struct nouveau_channel *chan;
461 453
462 chan = nouveau_channel_get(dev, file_priv, req->channel); 454 chan = nouveau_channel_get(file_priv, req->channel);
463 if (IS_ERR(chan)) 455 if (IS_ERR(chan))
464 return PTR_ERR(chan); 456 return PTR_ERR(chan);
465 457
458 list_del(&chan->list);
466 atomic_dec(&chan->users); 459 atomic_dec(&chan->users);
467 nouveau_channel_put(&chan); 460 nouveau_channel_put(&chan);
468 return 0; 461 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1595d0b6e815..939d4df07777 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -40,7 +40,7 @@
40static void nouveau_connector_hotplug(void *, int); 40static void nouveau_connector_hotplug(void *, int);
41 41
42static struct nouveau_encoder * 42static struct nouveau_encoder *
43find_encoder_by_type(struct drm_connector *connector, int type) 43find_encoder(struct drm_connector *connector, int type)
44{ 44{
45 struct drm_device *dev = connector->dev; 45 struct drm_device *dev = connector->dev;
46 struct nouveau_encoder *nv_encoder; 46 struct nouveau_encoder *nv_encoder;
@@ -170,8 +170,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
170 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); 170 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
171 171
172 if (!dn || 172 if (!dn ||
173 !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) || 173 !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) ||
174 (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG)))) 174 (nv_encoder = find_encoder(connector, OUTPUT_ANALOG))))
175 return NULL; 175 return NULL;
176 176
177 for_each_child_of_node(dn, cn) { 177 for_each_child_of_node(dn, cn) {
@@ -233,6 +233,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
233 struct drm_device *dev = connector->dev; 233 struct drm_device *dev = connector->dev;
234 struct nouveau_connector *nv_connector = nouveau_connector(connector); 234 struct nouveau_connector *nv_connector = nouveau_connector(connector);
235 struct nouveau_encoder *nv_encoder = NULL; 235 struct nouveau_encoder *nv_encoder = NULL;
236 struct nouveau_encoder *nv_partner;
236 struct nouveau_i2c_chan *i2c; 237 struct nouveau_i2c_chan *i2c;
237 int type; 238 int type;
238 239
@@ -266,19 +267,22 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
266 * same i2c channel so the value returned from ddc_detect 267 * same i2c channel so the value returned from ddc_detect
267 * isn't necessarily correct. 268 * isn't necessarily correct.
268 */ 269 */
269 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { 270 nv_partner = NULL;
271 if (nv_encoder->dcb->type == OUTPUT_TMDS)
272 nv_partner = find_encoder(connector, OUTPUT_ANALOG);
273 if (nv_encoder->dcb->type == OUTPUT_ANALOG)
274 nv_partner = find_encoder(connector, OUTPUT_TMDS);
275
276 if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG &&
277 nv_partner->dcb->type == OUTPUT_TMDS) ||
278 (nv_encoder->dcb->type == OUTPUT_TMDS &&
279 nv_partner->dcb->type == OUTPUT_ANALOG))) {
270 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 280 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
271 type = OUTPUT_TMDS; 281 type = OUTPUT_TMDS;
272 else 282 else
273 type = OUTPUT_ANALOG; 283 type = OUTPUT_ANALOG;
274 284
275 nv_encoder = find_encoder_by_type(connector, type); 285 nv_encoder = find_encoder(connector, type);
276 if (!nv_encoder) {
277 NV_ERROR(dev, "Detected %d encoder on %s, "
278 "but no object!\n", type,
279 drm_get_connector_name(connector));
280 return connector_status_disconnected;
281 }
282 } 286 }
283 287
284 nouveau_connector_set_encoder(connector, nv_encoder); 288 nouveau_connector_set_encoder(connector, nv_encoder);
@@ -292,9 +296,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
292 } 296 }
293 297
294detect_analog: 298detect_analog:
295 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 299 nv_encoder = find_encoder(connector, OUTPUT_ANALOG);
296 if (!nv_encoder && !nouveau_tv_disable) 300 if (!nv_encoder && !nouveau_tv_disable)
297 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 301 nv_encoder = find_encoder(connector, OUTPUT_TV);
298 if (nv_encoder && force) { 302 if (nv_encoder && force) {
299 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 303 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
300 struct drm_encoder_helper_funcs *helper = 304 struct drm_encoder_helper_funcs *helper =
@@ -327,7 +331,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
327 nv_connector->edid = NULL; 331 nv_connector->edid = NULL;
328 } 332 }
329 333
330 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 334 nv_encoder = find_encoder(connector, OUTPUT_LVDS);
331 if (!nv_encoder) 335 if (!nv_encoder)
332 return connector_status_disconnected; 336 return connector_status_disconnected;
333 337
@@ -405,7 +409,7 @@ nouveau_connector_force(struct drm_connector *connector)
405 } else 409 } else
406 type = OUTPUT_ANY; 410 type = OUTPUT_ANY;
407 411
408 nv_encoder = find_encoder_by_type(connector, type); 412 nv_encoder = find_encoder(connector, type);
409 if (!nv_encoder) { 413 if (!nv_encoder) {
410 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", 414 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
411 drm_get_connector_name(connector)); 415 drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 568caedd7216..00bc6eaad558 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
167 int delta, int length) 167 int delta, int length)
168{ 168{
169 struct nouveau_bo *pb = chan->pushbuf_bo; 169 struct nouveau_bo *pb = chan->pushbuf_bo;
170 uint64_t offset = bo->bo.offset + delta; 170 struct nouveau_vma *vma;
171 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 171 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
172 u64 offset;
173
174 vma = nouveau_bo_vma_find(bo, chan->vm);
175 BUG_ON(!vma);
176 offset = vma->offset + delta;
172 177
173 BUG_ON(chan->dma.ib_free < 1); 178 BUG_ON(chan->dma.ib_free < 1);
174 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 179 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 02c6f37d8bd7..b30ddd8d2e2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0;
73module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 73module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
74 74
75MODULE_PARM_DESC(noaccel, "Disable all acceleration"); 75MODULE_PARM_DESC(noaccel, "Disable all acceleration");
76int nouveau_noaccel = 0; 76int nouveau_noaccel = -1;
77module_param_named(noaccel, nouveau_noaccel, int, 0400); 77module_param_named(noaccel, nouveau_noaccel, int, 0400);
78 78
79MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); 79MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
119int nouveau_msi; 119int nouveau_msi;
120module_param_named(msi, nouveau_msi, int, 0400); 120module_param_named(msi, nouveau_msi, int, 0400);
121 121
122MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
123int nouveau_ctxfw;
124module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
125
122int nouveau_fbpercrtc; 126int nouveau_fbpercrtc;
123#if 0 127#if 0
124module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); 128module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -210,10 +214,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
210 pfifo->unload_context(dev); 214 pfifo->unload_context(dev);
211 215
212 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 216 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
213 if (dev_priv->eng[e]) { 217 if (!dev_priv->eng[e])
214 ret = dev_priv->eng[e]->fini(dev, e); 218 continue;
215 if (ret) 219
216 goto out_abort; 220 ret = dev_priv->eng[e]->fini(dev, e, true);
221 if (ret) {
222 NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
223 goto out_abort;
217 } 224 }
218 } 225 }
219 226
@@ -354,7 +361,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
354 361
355 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 362 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
356 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 363 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
357 u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; 364 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
358 365
359 nv_crtc->cursor.set_offset(nv_crtc, offset); 366 nv_crtc->cursor.set_offset(nv_crtc, offset);
360 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, 367 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
@@ -389,7 +396,9 @@ static struct drm_driver driver = {
389 .firstopen = nouveau_firstopen, 396 .firstopen = nouveau_firstopen,
390 .lastclose = nouveau_lastclose, 397 .lastclose = nouveau_lastclose,
391 .unload = nouveau_unload, 398 .unload = nouveau_unload,
399 .open = nouveau_open,
392 .preclose = nouveau_preclose, 400 .preclose = nouveau_preclose,
401 .postclose = nouveau_postclose,
393#if defined(CONFIG_DRM_NOUVEAU_DEBUG) 402#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
394 .debugfs_init = nouveau_debugfs_init, 403 .debugfs_init = nouveau_debugfs_init,
395 .debugfs_cleanup = nouveau_debugfs_takedown, 404 .debugfs_cleanup = nouveau_debugfs_takedown,
@@ -420,6 +429,8 @@ static struct drm_driver driver = {
420 429
421 .gem_init_object = nouveau_gem_object_new, 430 .gem_init_object = nouveau_gem_object_new,
422 .gem_free_object = nouveau_gem_object_del, 431 .gem_free_object = nouveau_gem_object_del,
432 .gem_open_object = nouveau_gem_object_open,
433 .gem_close_object = nouveau_gem_object_close,
423 434
424 .name = DRIVER_NAME, 435 .name = DRIVER_NAME,
425 .desc = DRIVER_DESC, 436 .desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9c56331941e2..d7d51deb34b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,9 +46,17 @@
46#include "ttm/ttm_module.h" 46#include "ttm/ttm_module.h"
47 47
48struct nouveau_fpriv { 48struct nouveau_fpriv {
49 struct ttm_object_file *tfile; 49 spinlock_t lock;
50 struct list_head channels;
51 struct nouveau_vm *vm;
50}; 52};
51 53
54static inline struct nouveau_fpriv *
55nouveau_fpriv(struct drm_file *file_priv)
56{
57 return file_priv ? file_priv->driver_priv : NULL;
58}
59
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 60#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53 61
54#include "nouveau_drm.h" 62#include "nouveau_drm.h"
@@ -69,7 +77,7 @@ struct nouveau_mem {
69 struct drm_device *dev; 77 struct drm_device *dev;
70 78
71 struct nouveau_vma bar_vma; 79 struct nouveau_vma bar_vma;
72 struct nouveau_vma tmp_vma; 80 struct nouveau_vma vma[2];
73 u8 page_shift; 81 u8 page_shift;
74 82
75 struct drm_mm_node *tag; 83 struct drm_mm_node *tag;
@@ -107,7 +115,8 @@ struct nouveau_bo {
107 115
108 struct nouveau_channel *channel; 116 struct nouveau_channel *channel;
109 117
110 struct nouveau_vma vma; 118 struct list_head vma_list;
119 unsigned page_shift;
111 120
112 uint32_t tile_mode; 121 uint32_t tile_mode;
113 uint32_t tile_flags; 122 uint32_t tile_flags;
@@ -176,9 +185,10 @@ struct nouveau_gpuobj {
176 uint32_t flags; 185 uint32_t flags;
177 186
178 u32 size; 187 u32 size;
179 u32 pinst; 188 u32 pinst; /* PRAMIN BAR offset */
180 u32 cinst; 189 u32 cinst; /* Channel offset */
181 u64 vinst; 190 u64 vinst; /* VRAM address */
191 u64 linst; /* VM address */
182 192
183 uint32_t engine; 193 uint32_t engine;
184 uint32_t class; 194 uint32_t class;
@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class {
201 211
202struct nouveau_channel { 212struct nouveau_channel {
203 struct drm_device *dev; 213 struct drm_device *dev;
214 struct list_head list;
204 int id; 215 int id;
205 216
206 /* references to the channel data structure */ 217 /* references to the channel data structure */
@@ -228,15 +239,18 @@ struct nouveau_channel {
228 uint32_t sequence; 239 uint32_t sequence;
229 uint32_t sequence_ack; 240 uint32_t sequence_ack;
230 atomic_t last_sequence_irq; 241 atomic_t last_sequence_irq;
242 struct nouveau_vma vma;
231 } fence; 243 } fence;
232 244
233 /* DMA push buffer */ 245 /* DMA push buffer */
234 struct nouveau_gpuobj *pushbuf; 246 struct nouveau_gpuobj *pushbuf;
235 struct nouveau_bo *pushbuf_bo; 247 struct nouveau_bo *pushbuf_bo;
248 struct nouveau_vma pushbuf_vma;
236 uint32_t pushbuf_base; 249 uint32_t pushbuf_base;
237 250
238 /* Notifier memory */ 251 /* Notifier memory */
239 struct nouveau_bo *notifier_bo; 252 struct nouveau_bo *notifier_bo;
253 struct nouveau_vma notifier_vma;
240 struct drm_mm notifier_heap; 254 struct drm_mm notifier_heap;
241 255
242 /* PFIFO context */ 256 /* PFIFO context */
@@ -278,6 +292,7 @@ struct nouveau_channel {
278 292
279 uint32_t sw_subchannel[8]; 293 uint32_t sw_subchannel[8];
280 294
295 struct nouveau_vma dispc_vma[2];
281 struct { 296 struct {
282 struct nouveau_gpuobj *vblsem; 297 struct nouveau_gpuobj *vblsem;
283 uint32_t vblsem_head; 298 uint32_t vblsem_head;
@@ -297,7 +312,7 @@ struct nouveau_channel {
297struct nouveau_exec_engine { 312struct nouveau_exec_engine {
298 void (*destroy)(struct drm_device *, int engine); 313 void (*destroy)(struct drm_device *, int engine);
299 int (*init)(struct drm_device *, int engine); 314 int (*init)(struct drm_device *, int engine);
300 int (*fini)(struct drm_device *, int engine); 315 int (*fini)(struct drm_device *, int engine, bool suspend);
301 int (*context_new)(struct nouveau_channel *, int engine); 316 int (*context_new)(struct nouveau_channel *, int engine);
302 void (*context_del)(struct nouveau_channel *, int engine); 317 void (*context_del)(struct nouveau_channel *, int engine);
303 int (*object_new)(struct nouveau_channel *, int engine, 318 int (*object_new)(struct nouveau_channel *, int engine,
@@ -314,7 +329,8 @@ struct nouveau_instmem_engine {
314 int (*suspend)(struct drm_device *dev); 329 int (*suspend)(struct drm_device *dev);
315 void (*resume)(struct drm_device *dev); 330 void (*resume)(struct drm_device *dev);
316 331
317 int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); 332 int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
333 u32 size, u32 align);
318 void (*put)(struct nouveau_gpuobj *); 334 void (*put)(struct nouveau_gpuobj *);
319 int (*map)(struct nouveau_gpuobj *); 335 int (*map)(struct nouveau_gpuobj *);
320 void (*unmap)(struct nouveau_gpuobj *); 336 void (*unmap)(struct nouveau_gpuobj *);
@@ -445,9 +461,9 @@ struct nouveau_pm_level {
445struct nouveau_pm_temp_sensor_constants { 461struct nouveau_pm_temp_sensor_constants {
446 u16 offset_constant; 462 u16 offset_constant;
447 s16 offset_mult; 463 s16 offset_mult;
448 u16 offset_div; 464 s16 offset_div;
449 u16 slope_mult; 465 s16 slope_mult;
450 u16 slope_div; 466 s16 slope_div;
451}; 467};
452 468
453struct nouveau_pm_threshold_temp { 469struct nouveau_pm_threshold_temp {
@@ -488,7 +504,10 @@ struct nouveau_pm_engine {
488}; 504};
489 505
490struct nouveau_vram_engine { 506struct nouveau_vram_engine {
507 struct nouveau_mm *mm;
508
491 int (*init)(struct drm_device *); 509 int (*init)(struct drm_device *);
510 void (*takedown)(struct drm_device *dev);
492 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, 511 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
493 u32 type, struct nouveau_mem **); 512 u32 type, struct nouveau_mem **);
494 void (*put)(struct drm_device *, struct nouveau_mem **); 513 void (*put)(struct drm_device *, struct nouveau_mem **);
@@ -608,6 +627,7 @@ enum nouveau_card_type {
608 627
609struct drm_nouveau_private { 628struct drm_nouveau_private {
610 struct drm_device *dev; 629 struct drm_device *dev;
630 bool noaccel;
611 631
612 /* the card type, takes NV_* as values */ 632 /* the card type, takes NV_* as values */
613 enum nouveau_card_type card_type; 633 enum nouveau_card_type card_type;
@@ -700,7 +720,6 @@ struct drm_nouveau_private {
700 /* VRAM/fb configuration */ 720 /* VRAM/fb configuration */
701 uint64_t vram_size; 721 uint64_t vram_size;
702 uint64_t vram_sys_base; 722 uint64_t vram_sys_base;
703 u32 vram_rblock_size;
704 723
705 uint64_t fb_phys; 724 uint64_t fb_phys;
706 uint64_t fb_available_size; 725 uint64_t fb_available_size;
@@ -784,12 +803,15 @@ extern int nouveau_override_conntype;
784extern char *nouveau_perflvl; 803extern char *nouveau_perflvl;
785extern int nouveau_perflvl_wr; 804extern int nouveau_perflvl_wr;
786extern int nouveau_msi; 805extern int nouveau_msi;
806extern int nouveau_ctxfw;
787 807
788extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 808extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
789extern int nouveau_pci_resume(struct pci_dev *pdev); 809extern int nouveau_pci_resume(struct pci_dev *pdev);
790 810
791/* nouveau_state.c */ 811/* nouveau_state.c */
812extern int nouveau_open(struct drm_device *, struct drm_file *);
792extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 813extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
814extern void nouveau_postclose(struct drm_device *, struct drm_file *);
793extern int nouveau_load(struct drm_device *, unsigned long flags); 815extern int nouveau_load(struct drm_device *, unsigned long flags);
794extern int nouveau_firstopen(struct drm_device *); 816extern int nouveau_firstopen(struct drm_device *);
795extern void nouveau_lastclose(struct drm_device *); 817extern void nouveau_lastclose(struct drm_device *);
@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
847extern struct nouveau_channel * 869extern struct nouveau_channel *
848nouveau_channel_get_unlocked(struct nouveau_channel *); 870nouveau_channel_get_unlocked(struct nouveau_channel *);
849extern struct nouveau_channel * 871extern struct nouveau_channel *
850nouveau_channel_get(struct drm_device *, struct drm_file *, int id); 872nouveau_channel_get(struct drm_file *, int id);
851extern void nouveau_channel_put_unlocked(struct nouveau_channel **); 873extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
852extern void nouveau_channel_put(struct nouveau_channel **); 874extern void nouveau_channel_put(struct nouveau_channel **);
853extern void nouveau_channel_ref(struct nouveau_channel *chan, 875extern void nouveau_channel_ref(struct nouveau_channel *chan,
@@ -1120,7 +1142,6 @@ extern int nvc0_fifo_unload_context(struct drm_device *);
1120 1142
1121/* nv04_graph.c */ 1143/* nv04_graph.c */
1122extern int nv04_graph_create(struct drm_device *); 1144extern int nv04_graph_create(struct drm_device *);
1123extern void nv04_graph_fifo_access(struct drm_device *, bool);
1124extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); 1145extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
1125extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, 1146extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1126 u32 class, u32 mthd, u32 data); 1147 u32 class, u32 mthd, u32 data);
@@ -1169,7 +1190,8 @@ extern int nv04_instmem_init(struct drm_device *);
1169extern void nv04_instmem_takedown(struct drm_device *); 1190extern void nv04_instmem_takedown(struct drm_device *);
1170extern int nv04_instmem_suspend(struct drm_device *); 1191extern int nv04_instmem_suspend(struct drm_device *);
1171extern void nv04_instmem_resume(struct drm_device *); 1192extern void nv04_instmem_resume(struct drm_device *);
1172extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); 1193extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1194 u32 size, u32 align);
1173extern void nv04_instmem_put(struct nouveau_gpuobj *); 1195extern void nv04_instmem_put(struct nouveau_gpuobj *);
1174extern int nv04_instmem_map(struct nouveau_gpuobj *); 1196extern int nv04_instmem_map(struct nouveau_gpuobj *);
1175extern void nv04_instmem_unmap(struct nouveau_gpuobj *); 1197extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
@@ -1180,7 +1202,8 @@ extern int nv50_instmem_init(struct drm_device *);
1180extern void nv50_instmem_takedown(struct drm_device *); 1202extern void nv50_instmem_takedown(struct drm_device *);
1181extern int nv50_instmem_suspend(struct drm_device *); 1203extern int nv50_instmem_suspend(struct drm_device *);
1182extern void nv50_instmem_resume(struct drm_device *); 1204extern void nv50_instmem_resume(struct drm_device *);
1183extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); 1205extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1206 u32 size, u32 align);
1184extern void nv50_instmem_put(struct nouveau_gpuobj *); 1207extern void nv50_instmem_put(struct nouveau_gpuobj *);
1185extern int nv50_instmem_map(struct nouveau_gpuobj *); 1208extern int nv50_instmem_map(struct nouveau_gpuobj *);
1186extern void nv50_instmem_unmap(struct nouveau_gpuobj *); 1209extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
@@ -1247,10 +1270,9 @@ extern int nv04_crtc_create(struct drm_device *, int index);
1247 1270
1248/* nouveau_bo.c */ 1271/* nouveau_bo.c */
1249extern struct ttm_bo_driver nouveau_bo_driver; 1272extern struct ttm_bo_driver nouveau_bo_driver;
1250extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, 1273extern int nouveau_bo_new(struct drm_device *, int size, int align,
1251 int size, int align, uint32_t flags, 1274 uint32_t flags, uint32_t tile_mode,
1252 uint32_t tile_mode, uint32_t tile_flags, 1275 uint32_t tile_flags, struct nouveau_bo **);
1253 struct nouveau_bo **);
1254extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); 1276extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1255extern int nouveau_bo_unpin(struct nouveau_bo *); 1277extern int nouveau_bo_unpin(struct nouveau_bo *);
1256extern int nouveau_bo_map(struct nouveau_bo *); 1278extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1265,6 +1287,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1265extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 1287extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1266 bool no_wait_reserve, bool no_wait_gpu); 1288 bool no_wait_reserve, bool no_wait_gpu);
1267 1289
1290extern struct nouveau_vma *
1291nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
1292extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1293 struct nouveau_vma *);
1294extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1295
1268/* nouveau_fence.c */ 1296/* nouveau_fence.c */
1269struct nouveau_fence; 1297struct nouveau_fence;
1270extern int nouveau_fence_init(struct drm_device *); 1298extern int nouveau_fence_init(struct drm_device *);
@@ -1310,12 +1338,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1310} 1338}
1311 1339
1312/* nouveau_gem.c */ 1340/* nouveau_gem.c */
1313extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, 1341extern int nouveau_gem_new(struct drm_device *, int size, int align,
1314 int size, int align, uint32_t domain, 1342 uint32_t domain, uint32_t tile_mode,
1315 uint32_t tile_mode, uint32_t tile_flags, 1343 uint32_t tile_flags, struct nouveau_bo **);
1316 struct nouveau_bo **);
1317extern int nouveau_gem_object_new(struct drm_gem_object *); 1344extern int nouveau_gem_object_new(struct drm_gem_object *);
1318extern void nouveau_gem_object_del(struct drm_gem_object *); 1345extern void nouveau_gem_object_del(struct drm_gem_object *);
1346extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
1347extern void nouveau_gem_object_close(struct drm_gem_object *,
1348 struct drm_file *);
1319extern int nouveau_gem_ioctl_new(struct drm_device *, void *, 1349extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1320 struct drm_file *); 1350 struct drm_file *);
1321extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, 1351extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index a3a88ad00f86..95c843e684bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,7 @@
30struct nouveau_framebuffer { 30struct nouveau_framebuffer {
31 struct drm_framebuffer base; 31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo; 32 struct nouveau_bo *nvbo;
33 struct nouveau_vma vma;
33 u32 r_dma; 34 u32 r_dma;
34 u32 r_format; 35 u32 r_format;
35 u32 r_pitch; 36 u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 39aee6d4daf8..14a8627efe4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
279 struct fb_info *info; 279 struct fb_info *info;
280 struct drm_framebuffer *fb; 280 struct drm_framebuffer *fb;
281 struct nouveau_framebuffer *nouveau_fb; 281 struct nouveau_framebuffer *nouveau_fb;
282 struct nouveau_channel *chan;
282 struct nouveau_bo *nvbo; 283 struct nouveau_bo *nvbo;
283 struct drm_mode_fb_cmd mode_cmd; 284 struct drm_mode_fb_cmd mode_cmd;
284 struct pci_dev *pdev = dev->pdev; 285 struct pci_dev *pdev = dev->pdev;
@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
296 size = mode_cmd.pitch * mode_cmd.height; 297 size = mode_cmd.pitch * mode_cmd.height;
297 size = roundup(size, PAGE_SIZE); 298 size = roundup(size, PAGE_SIZE);
298 299
299 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, 300 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
300 NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); 301 0, 0x0000, &nvbo);
301 if (ret) { 302 if (ret) {
302 NV_ERROR(dev, "failed to allocate framebuffer\n"); 303 NV_ERROR(dev, "failed to allocate framebuffer\n");
303 goto out; 304 goto out;
@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
318 goto out; 319 goto out;
319 } 320 }
320 321
322 chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
323 if (chan && dev_priv->card_type >= NV_50) {
324 ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
325 if (ret) {
326 NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
327 chan = NULL;
328 }
329 }
330
321 mutex_lock(&dev->struct_mutex); 331 mutex_lock(&dev->struct_mutex);
322 332
323 info = framebuffer_alloc(0, device); 333 info = framebuffer_alloc(0, device);
@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
448 458
449 if (nouveau_fb->nvbo) { 459 if (nouveau_fb->nvbo) {
450 nouveau_bo_unmap(nouveau_fb->nvbo); 460 nouveau_bo_unmap(nouveau_fb->nvbo);
461 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
451 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 462 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
452 nouveau_fb->nvbo = NULL; 463 nouveau_fb->nvbo = NULL;
453 } 464 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7347075ca5b8..8d02d875376d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
336{ 336{
337 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 337 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
338 struct nouveau_fence *fence = NULL; 338 struct nouveau_fence *fence = NULL;
339 u64 offset = chan->fence.vma.offset + sema->mem->start;
339 int ret; 340 int ret;
340 341
341 if (dev_priv->chipset < 0x84) { 342 if (dev_priv->chipset < 0x84) {
@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
345 346
346 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3); 347 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema); 348 OUT_RING (chan, NvSema);
348 OUT_RING (chan, sema->mem->start); 349 OUT_RING (chan, offset);
349 OUT_RING (chan, 1); 350 OUT_RING (chan, 1);
350 } else 351 } else
351 if (dev_priv->chipset < 0xc0) { 352 if (dev_priv->chipset < 0xc0) {
352 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
353 u64 offset = vma->offset + sema->mem->start;
354
355 ret = RING_SPACE(chan, 7); 353 ret = RING_SPACE(chan, 7);
356 if (ret) 354 if (ret)
357 return ret; 355 return ret;
@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
364 OUT_RING (chan, 1); 362 OUT_RING (chan, 1);
365 OUT_RING (chan, 1); /* ACQUIRE_EQ */ 363 OUT_RING (chan, 1); /* ACQUIRE_EQ */
366 } else { 364 } else {
367 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
368 u64 offset = vma->offset + sema->mem->start;
369
370 ret = RING_SPACE(chan, 5); 365 ret = RING_SPACE(chan, 5);
371 if (ret) 366 if (ret)
372 return ret; 367 return ret;
@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
394{ 389{
395 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 390 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
396 struct nouveau_fence *fence = NULL; 391 struct nouveau_fence *fence = NULL;
392 u64 offset = chan->fence.vma.offset + sema->mem->start;
397 int ret; 393 int ret;
398 394
399 if (dev_priv->chipset < 0x84) { 395 if (dev_priv->chipset < 0x84) {
@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
403 399
404 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2); 400 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
405 OUT_RING (chan, NvSema); 401 OUT_RING (chan, NvSema);
406 OUT_RING (chan, sema->mem->start); 402 OUT_RING (chan, offset);
407 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); 403 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
408 OUT_RING (chan, 1); 404 OUT_RING (chan, 1);
409 } else 405 } else
410 if (dev_priv->chipset < 0xc0) { 406 if (dev_priv->chipset < 0xc0) {
411 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
412 u64 offset = vma->offset + sema->mem->start;
413
414 ret = RING_SPACE(chan, 7); 407 ret = RING_SPACE(chan, 7);
415 if (ret) 408 if (ret)
416 return ret; 409 return ret;
@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
423 OUT_RING (chan, 1); 416 OUT_RING (chan, 1);
424 OUT_RING (chan, 2); /* RELEASE */ 417 OUT_RING (chan, 2); /* RELEASE */
425 } else { 418 } else {
426 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
427 u64 offset = vma->offset + sema->mem->start;
428
429 ret = RING_SPACE(chan, 5); 419 ret = RING_SPACE(chan, 5);
430 if (ret) 420 if (ret)
431 return ret; 421 return ret;
@@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
540 nouveau_gpuobj_ref(NULL, &obj); 530 nouveau_gpuobj_ref(NULL, &obj);
541 if (ret) 531 if (ret)
542 return ret; 532 return ret;
533 } else {
534 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma);
537 if (ret)
538 return ret;
543 } 539 }
544 540
545 INIT_LIST_HEAD(&chan->fence.pending); 541 INIT_LIST_HEAD(&chan->fence.pending);
@@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
551void 547void
552nouveau_fence_channel_fini(struct nouveau_channel *chan) 548nouveau_fence_channel_fini(struct nouveau_channel *chan)
553{ 549{
550 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
554 struct nouveau_fence *tmp, *fence; 551 struct nouveau_fence *tmp, *fence;
555 552
556 spin_lock(&chan->fence.lock); 553 spin_lock(&chan->fence.lock);
557
558 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { 554 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
559 fence->signalled = true; 555 fence->signalled = true;
560 list_del(&fence->entry); 556 list_del(&fence->entry);
@@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
564 560
565 kref_put(&fence->refcount, nouveau_fence_del); 561 kref_put(&fence->refcount, nouveau_fence_del);
566 } 562 }
567
568 spin_unlock(&chan->fence.lock); 563 spin_unlock(&chan->fence.lock);
564
565 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
569} 566}
570 567
571int 568int
@@ -577,7 +574,7 @@ nouveau_fence_init(struct drm_device *dev)
577 574
578 /* Create a shared VRAM heap for cross-channel sync. */ 575 /* Create a shared VRAM heap for cross-channel sync. */
579 if (USE_SEMA(dev)) { 576 if (USE_SEMA(dev)) {
580 ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, 577 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
581 0, 0, &dev_priv->fence.bo); 578 0, 0, &dev_priv->fence.bo);
582 if (ret) 579 if (ret)
583 return ret; 580 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b52e46018245..5f0bc57fdaab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -60,9 +60,71 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
60} 60}
61 61
62int 62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, 63nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
64 int size, int align, uint32_t domain, uint32_t tile_mode, 64{
65 uint32_t tile_flags, struct nouveau_bo **pnvbo) 65 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
66 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
67 struct nouveau_vma *vma;
68 int ret;
69
70 if (!fpriv->vm)
71 return 0;
72
73 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
74 if (ret)
75 return ret;
76
77 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
78 if (!vma) {
79 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
80 if (!vma) {
81 ret = -ENOMEM;
82 goto out;
83 }
84
85 ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
86 if (ret) {
87 kfree(vma);
88 goto out;
89 }
90 } else {
91 vma->refcount++;
92 }
93
94out:
95 ttm_bo_unreserve(&nvbo->bo);
96 return ret;
97}
98
99void
100nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
101{
102 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
103 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
104 struct nouveau_vma *vma;
105 int ret;
106
107 if (!fpriv->vm)
108 return;
109
110 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
111 if (ret)
112 return;
113
114 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
115 if (vma) {
116 if (--vma->refcount == 0) {
117 nouveau_bo_vma_del(nvbo, vma);
118 kfree(vma);
119 }
120 }
121 ttm_bo_unreserve(&nvbo->bo);
122}
123
124int
125nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
126 uint32_t tile_mode, uint32_t tile_flags,
127 struct nouveau_bo **pnvbo)
66{ 128{
67 struct drm_nouveau_private *dev_priv = dev->dev_private; 129 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_bo *nvbo; 130 struct nouveau_bo *nvbo;
@@ -76,7 +138,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
76 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) 138 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
77 flags |= TTM_PL_FLAG_SYSTEM; 139 flags |= TTM_PL_FLAG_SYSTEM;
78 140
79 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, 141 ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
80 tile_flags, pnvbo); 142 tile_flags, pnvbo);
81 if (ret) 143 if (ret)
82 return ret; 144 return ret;
@@ -103,17 +165,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
103} 165}
104 166
105static int 167static int
106nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) 168nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
169 struct drm_nouveau_gem_info *rep)
107{ 170{
171 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
108 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 172 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
173 struct nouveau_vma *vma;
109 174
110 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 175 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
111 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 176 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
112 else 177 else
113 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 178 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
114 179
115 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
116 rep->offset = nvbo->bo.offset; 180 rep->offset = nvbo->bo.offset;
181 if (fpriv->vm) {
182 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
183 if (!vma)
184 return -EINVAL;
185
186 rep->offset = vma->offset;
187 }
188
189 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
117 rep->map_handle = nvbo->bo.addr_space_offset; 190 rep->map_handle = nvbo->bo.addr_space_offset;
118 rep->tile_mode = nvbo->tile_mode; 191 rep->tile_mode = nvbo->tile_mode;
119 rep->tile_flags = nvbo->tile_flags; 192 rep->tile_flags = nvbo->tile_flags;
@@ -127,7 +200,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
127 struct drm_nouveau_private *dev_priv = dev->dev_private; 200 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 struct drm_nouveau_gem_new *req = data; 201 struct drm_nouveau_gem_new *req = data;
129 struct nouveau_bo *nvbo = NULL; 202 struct nouveau_bo *nvbo = NULL;
130 struct nouveau_channel *chan = NULL;
131 int ret = 0; 203 int ret = 0;
132 204
133 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) 205 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
@@ -138,28 +210,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
138 return -EINVAL; 210 return -EINVAL;
139 } 211 }
140 212
141 if (req->channel_hint) { 213 ret = nouveau_gem_new(dev, req->info.size, req->align,
142 chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
143 if (IS_ERR(chan))
144 return PTR_ERR(chan);
145 }
146
147 ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
148 req->info.domain, req->info.tile_mode, 214 req->info.domain, req->info.tile_mode,
149 req->info.tile_flags, &nvbo); 215 req->info.tile_flags, &nvbo);
150 if (chan)
151 nouveau_channel_put(&chan);
152 if (ret) 216 if (ret)
153 return ret; 217 return ret;
154 218
155 ret = nouveau_gem_info(nvbo->gem, &req->info);
156 if (ret)
157 goto out;
158
159 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 219 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
220 if (ret == 0) {
221 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
222 if (ret)
223 drm_gem_handle_delete(file_priv, req->info.handle);
224 }
225
160 /* drop reference from allocate - handle holds it now */ 226 /* drop reference from allocate - handle holds it now */
161 drm_gem_object_unreference_unlocked(nvbo->gem); 227 drm_gem_object_unreference_unlocked(nvbo->gem);
162out:
163 return ret; 228 return ret;
164} 229}
165 230
@@ -318,6 +383,7 @@ static int
318validate_list(struct nouveau_channel *chan, struct list_head *list, 383validate_list(struct nouveau_channel *chan, struct list_head *list,
319 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 384 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
320{ 385{
386 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
321 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 387 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
322 (void __force __user *)(uintptr_t)user_pbbo_ptr; 388 (void __force __user *)(uintptr_t)user_pbbo_ptr;
323 struct drm_device *dev = chan->dev; 389 struct drm_device *dev = chan->dev;
@@ -356,24 +422,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
356 return ret; 422 return ret;
357 } 423 }
358 424
359 if (nvbo->bo.offset == b->presumed.offset && 425 if (dev_priv->card_type < NV_50) {
360 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 426 if (nvbo->bo.offset == b->presumed.offset &&
361 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 427 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
362 (nvbo->bo.mem.mem_type == TTM_PL_TT && 428 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
363 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 429 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
364 continue; 430 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
431 continue;
365 432
366 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 433 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
367 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 434 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
368 else 435 else
369 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 436 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
370 b->presumed.offset = nvbo->bo.offset; 437 b->presumed.offset = nvbo->bo.offset;
371 b->presumed.valid = 0; 438 b->presumed.valid = 0;
372 relocs++; 439 relocs++;
373 440
374 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, 441 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
375 &b->presumed, sizeof(b->presumed))) 442 &b->presumed, sizeof(b->presumed)))
376 return -EFAULT; 443 return -EFAULT;
444 }
377 } 445 }
378 446
379 return relocs; 447 return relocs;
@@ -548,7 +616,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
548 struct nouveau_fence *fence = NULL; 616 struct nouveau_fence *fence = NULL;
549 int i, j, ret = 0, do_reloc = 0; 617 int i, j, ret = 0, do_reloc = 0;
550 618
551 chan = nouveau_channel_get(dev, file_priv, req->channel); 619 chan = nouveau_channel_get(file_priv, req->channel);
552 if (IS_ERR(chan)) 620 if (IS_ERR(chan))
553 return PTR_ERR(chan); 621 return PTR_ERR(chan);
554 622
@@ -782,7 +850,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
782 if (!gem) 850 if (!gem)
783 return -ENOENT; 851 return -ENOENT;
784 852
785 ret = nouveau_gem_info(gem, req); 853 ret = nouveau_gem_info(file_priv, gem, req);
786 drm_gem_object_unreference_unlocked(gem); 854 drm_gem_object_unreference_unlocked(gem);
787 return ret; 855 return ret;
788} 856}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 2ba7265bc967..868c7fd74854 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -79,7 +79,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
79 int i; 79 int i;
80 80
81 stat = nv_rd32(dev, NV03_PMC_INTR_0); 81 stat = nv_rd32(dev, NV03_PMC_INTR_0);
82 if (!stat) 82 if (stat == 0 || stat == ~0)
83 return IRQ_NONE; 83 return IRQ_NONE;
84 84
85 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 85 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5ee14d216ce8..f9ae2fc3d6f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (0 && drm_pci_device_is_pcie(dev) && 400 if (0 && pci_is_pcie(dev->pdev) &&
401 dev_priv->chipset > 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -423,38 +423,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
423 return ret; 423 return ret;
424 } 424 }
425 425
426 /* reserve space at end of VRAM for PRAMIN */
427 if (dev_priv->card_type >= NV_50) {
428 dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
429 } else
430 if (dev_priv->card_type >= NV_40) {
431 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
432 u32 rsvd;
433
434 /* estimate grctx size, the magics come from nv40_grctx.c */
435 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
436 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
437 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
438 else rsvd = 0x4a40 * vs;
439 rsvd += 16 * 1024;
440 rsvd *= dev_priv->engine.fifo.channels;
441
442 /* pciegart table */
443 if (drm_pci_device_is_pcie(dev))
444 rsvd += 512 * 1024;
445
446 /* object storage */
447 rsvd += 512 * 1024;
448
449 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
450 } else {
451 dev_priv->ramin_rsvd_vram = 512 * 1024;
452 }
453
454 ret = dev_priv->engine.vram.init(dev);
455 if (ret)
456 return ret;
457
458 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); 426 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
459 if (dev_priv->vram_sys_base) { 427 if (dev_priv->vram_sys_base) {
460 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", 428 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
@@ -479,7 +447,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
479 } 447 }
480 448
481 if (dev_priv->card_type < NV_50) { 449 if (dev_priv->card_type < NV_50) {
482 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, 450 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
483 0, 0, &dev_priv->vga_ram); 451 0, 0, &dev_priv->vga_ram);
484 if (ret == 0) 452 if (ret == 0)
485 ret = nouveau_bo_pin(dev_priv->vga_ram, 453 ret = nouveau_bo_pin(dev_priv->vga_ram,
@@ -729,37 +697,31 @@ nouveau_mem_timing_fini(struct drm_device *dev)
729} 697}
730 698
731static int 699static int
732nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) 700nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
733{ 701{
734 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 702 /* nothing to do */
735 struct nouveau_mm *mm;
736 u64 size, block, rsvd;
737 int ret;
738
739 rsvd = (256 * 1024); /* vga memory */
740 size = (p_size << PAGE_SHIFT) - rsvd;
741 block = dev_priv->vram_rblock_size;
742
743 ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
744 if (ret)
745 return ret;
746
747 man->priv = mm;
748 return 0; 703 return 0;
749} 704}
750 705
751static int 706static int
752nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 707nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
753{ 708{
754 struct nouveau_mm *mm = man->priv; 709 /* nothing to do */
755 int ret; 710 return 0;
711}
756 712
757 ret = nouveau_mm_fini(&mm); 713static inline void
758 if (ret) 714nouveau_mem_node_cleanup(struct nouveau_mem *node)
759 return ret; 715{
716 if (node->vma[0].node) {
717 nouveau_vm_unmap(&node->vma[0]);
718 nouveau_vm_put(&node->vma[0]);
719 }
760 720
761 man->priv = NULL; 721 if (node->vma[1].node) {
762 return 0; 722 nouveau_vm_unmap(&node->vma[1]);
723 nouveau_vm_put(&node->vma[1]);
724 }
763} 725}
764 726
765static void 727static void
@@ -768,14 +730,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
768{ 730{
769 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 731 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
770 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 732 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
771 struct nouveau_mem *node = mem->mm_node;
772 struct drm_device *dev = dev_priv->dev; 733 struct drm_device *dev = dev_priv->dev;
773 734
774 if (node->tmp_vma.node) { 735 nouveau_mem_node_cleanup(mem->mm_node);
775 nouveau_vm_unmap(&node->tmp_vma);
776 nouveau_vm_put(&node->tmp_vma);
777 }
778
779 vram->put(dev, (struct nouveau_mem **)&mem->mm_node); 736 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
780} 737}
781 738
@@ -794,7 +751,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
794 int ret; 751 int ret;
795 752
796 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 753 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
797 size_nc = 1 << nvbo->vma.node->type; 754 size_nc = 1 << nvbo->page_shift;
798 755
799 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 756 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
800 mem->page_alignment << PAGE_SHIFT, size_nc, 757 mem->page_alignment << PAGE_SHIFT, size_nc,
@@ -804,9 +761,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
804 return (ret == -ENOSPC) ? 0 : ret; 761 return (ret == -ENOSPC) ? 0 : ret;
805 } 762 }
806 763
807 node->page_shift = 12; 764 node->page_shift = nvbo->page_shift;
808 if (nvbo->vma.node)
809 node->page_shift = nvbo->vma.node->type;
810 765
811 mem->mm_node = node; 766 mem->mm_node = node;
812 mem->start = node->offset >> PAGE_SHIFT; 767 mem->start = node->offset >> PAGE_SHIFT;
@@ -862,15 +817,9 @@ static void
862nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 817nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
863 struct ttm_mem_reg *mem) 818 struct ttm_mem_reg *mem)
864{ 819{
865 struct nouveau_mem *node = mem->mm_node; 820 nouveau_mem_node_cleanup(mem->mm_node);
866 821 kfree(mem->mm_node);
867 if (node->tmp_vma.node) {
868 nouveau_vm_unmap(&node->tmp_vma);
869 nouveau_vm_put(&node->tmp_vma);
870 }
871
872 mem->mm_node = NULL; 822 mem->mm_node = NULL;
873 kfree(node);
874} 823}
875 824
876static int 825static int
@@ -880,11 +829,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
880 struct ttm_mem_reg *mem) 829 struct ttm_mem_reg *mem)
881{ 830{
882 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 831 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
883 struct nouveau_bo *nvbo = nouveau_bo(bo);
884 struct nouveau_vma *vma = &nvbo->vma;
885 struct nouveau_vm *vm = vma->vm;
886 struct nouveau_mem *node; 832 struct nouveau_mem *node;
887 int ret;
888 833
889 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 834 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
890 dev_priv->gart_info.aper_size)) 835 dev_priv->gart_info.aper_size))
@@ -893,24 +838,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
893 node = kzalloc(sizeof(*node), GFP_KERNEL); 838 node = kzalloc(sizeof(*node), GFP_KERNEL);
894 if (!node) 839 if (!node)
895 return -ENOMEM; 840 return -ENOMEM;
841 node->page_shift = 12;
896 842
897 /* This node must be for evicting large-paged VRAM
898 * to system memory. Due to a nv50 limitation of
899 * not being able to mix large/small pages within
900 * the same PDE, we need to create a temporary
901 * small-paged VMA for the eviction.
902 */
903 if (vma->node->type != vm->spg_shift) {
904 ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
905 vm->spg_shift, NV_MEM_ACCESS_RW,
906 &node->tmp_vma);
907 if (ret) {
908 kfree(node);
909 return ret;
910 }
911 }
912
913 node->page_shift = nvbo->vma.node->type;
914 mem->mm_node = node; 843 mem->mm_node = node;
915 mem->start = 0; 844 mem->start = 0;
916 return 0; 845 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 7609756b6faf..1640dec3b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -158,11 +158,18 @@ int
158nouveau_mm_fini(struct nouveau_mm **prmm) 158nouveau_mm_fini(struct nouveau_mm **prmm)
159{ 159{
160 struct nouveau_mm *rmm = *prmm; 160 struct nouveau_mm *rmm = *prmm;
161 struct nouveau_mm_node *heap = 161 struct nouveau_mm_node *node, *heap =
162 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); 162 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
163 163
164 if (!list_is_singular(&rmm->nodes)) 164 if (!list_is_singular(&rmm->nodes)) {
165 printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
166 list_for_each_entry(node, &rmm->nodes, nl_entry) {
167 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
168 node->type, node->offset, node->length);
169 }
170 WARN_ON(1);
165 return -EBUSY; 171 return -EBUSY;
172 }
166 173
167 kfree(heap); 174 kfree(heap);
168 kfree(rmm); 175 kfree(rmm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 1f7483aae9a4..b9c016d21553 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
52void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); 52void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
53 53
54int nv50_vram_init(struct drm_device *); 54int nv50_vram_init(struct drm_device *);
55void nv50_vram_fini(struct drm_device *);
55int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, 56int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
56 u32 memtype, struct nouveau_mem **); 57 u32 memtype, struct nouveau_mem **);
57void nv50_vram_del(struct drm_device *, struct nouveau_mem **); 58void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5b39718ae1f8..6abdbe6530a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,6 +34,7 @@ int
34nouveau_notifier_init_channel(struct nouveau_channel *chan) 34nouveau_notifier_init_channel(struct nouveau_channel *chan)
35{ 35{
36 struct drm_device *dev = chan->dev; 36 struct drm_device *dev = chan->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_bo *ntfy = NULL; 38 struct nouveau_bo *ntfy = NULL;
38 uint32_t flags, ttmpl; 39 uint32_t flags, ttmpl;
39 int ret; 40 int ret;
@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
46 ttmpl = TTM_PL_FLAG_TT; 47 ttmpl = TTM_PL_FLAG_TT;
47 } 48 }
48 49
49 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 50 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
50 if (ret) 51 if (ret)
51 return ret; 52 return ret;
52 53
@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
58 if (ret) 59 if (ret)
59 goto out_err; 60 goto out_err;
60 61
62 if (dev_priv->card_type >= NV_50) {
63 ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
64 if (ret)
65 goto out_err;
66 }
67
61 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); 68 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
62 if (ret) 69 if (ret)
63 goto out_err; 70 goto out_err;
64 71
65 chan->notifier_bo = ntfy; 72 chan->notifier_bo = ntfy;
66out_err: 73out_err:
67 if (ret) 74 if (ret) {
75 nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
68 drm_gem_object_unreference_unlocked(ntfy->gem); 76 drm_gem_object_unreference_unlocked(ntfy->gem);
77 }
69 78
70 return ret; 79 return ret;
71} 80}
@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
78 if (!chan->notifier_bo) 87 if (!chan->notifier_bo)
79 return; 88 return;
80 89
90 nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
81 nouveau_bo_unmap(chan->notifier_bo); 91 nouveau_bo_unmap(chan->notifier_bo);
82 mutex_lock(&dev->struct_mutex); 92 mutex_lock(&dev->struct_mutex);
83 nouveau_bo_unpin(chan->notifier_bo); 93 nouveau_bo_unpin(chan->notifier_bo);
@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
122 target = NV_MEM_TARGET_VRAM; 132 target = NV_MEM_TARGET_VRAM;
123 else 133 else
124 target = NV_MEM_TARGET_GART; 134 target = NV_MEM_TARGET_GART;
125 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; 135 offset = chan->notifier_bo->bo.offset;
126 } else { 136 } else {
127 target = NV_MEM_TARGET_VM; 137 target = NV_MEM_TARGET_VM;
128 offset = chan->notifier_bo->vma.offset; 138 offset = chan->notifier_vma.offset;
129 } 139 }
130 offset += mem->start; 140 offset += mem->start;
131 141
@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
183 if (unlikely(dev_priv->card_type >= NV_C0)) 193 if (unlikely(dev_priv->card_type >= NV_C0))
184 return -EINVAL; 194 return -EINVAL;
185 195
186 chan = nouveau_channel_get(dev, file_priv, na->channel); 196 chan = nouveau_channel_get(file_priv, na->channel);
187 if (IS_ERR(chan)) 197 if (IS_ERR(chan))
188 return PTR_ERR(chan); 198 return PTR_ERR(chan);
189 199
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 8f97016f5b26..159b7c437d3f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
125 int ret = -EINVAL; 125 int ret = -EINVAL;
126 126
127 spin_lock_irqsave(&dev_priv->channels.lock, flags); 127 spin_lock_irqsave(&dev_priv->channels.lock, flags);
128 if (chid > 0 && chid < dev_priv->engine.fifo.channels) 128 if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
129 chan = dev_priv->channels.ptr[chid]; 129 chan = dev_priv->channels.ptr[chid];
130 if (chan) 130 if (chan)
131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); 131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
191 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); 191 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
192 spin_unlock(&dev_priv->ramin_lock); 192 spin_unlock(&dev_priv->ramin_lock);
193 193
194 if (chan) { 194 if (!(flags & NVOBJ_FLAG_VM) && chan) {
195 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); 195 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
196 if (ramin) 196 if (ramin)
197 ramin = drm_mm_get_block(ramin, size, align); 197 ramin = drm_mm_get_block(ramin, size, align);
@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
208 gpuobj->vinst = ramin->start + chan->ramin->vinst; 208 gpuobj->vinst = ramin->start + chan->ramin->vinst;
209 gpuobj->node = ramin; 209 gpuobj->node = ramin;
210 } else { 210 } else {
211 ret = instmem->get(gpuobj, size, align); 211 ret = instmem->get(gpuobj, chan, size, align);
212 if (ret) { 212 if (ret) {
213 nouveau_gpuobj_ref(NULL, &gpuobj); 213 nouveau_gpuobj_ref(NULL, &gpuobj);
214 return ret; 214 return ret;
@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
690 return 0; 690 return 0;
691} 691}
692 692
693static int
694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
695{
696 struct drm_device *dev = chan->dev;
697 struct nouveau_gpuobj *pgd = NULL;
698 struct nouveau_vm_pgd *vpgd;
699 int ret, i;
700
701 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
702 if (ret)
703 return ret;
704
705 /* create page directory for this vm if none currently exists,
706 * will be destroyed automagically when last reference to the
707 * vm is removed
708 */
709 if (list_empty(&vm->pgd_list)) {
710 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
711 if (ret)
712 return ret;
713 }
714 nouveau_vm_ref(vm, &chan->vm, pgd);
715 nouveau_gpuobj_ref(NULL, &pgd);
716
717 /* point channel at vm's page directory */
718 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
719 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
720 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
721 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
722 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
723
724 /* map display semaphore buffers into channel's vm */
725 for (i = 0; i < 2; i++) {
726 struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
727
728 ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
729 &chan->dispc_vma[i]);
730 if (ret)
731 return ret;
732 }
733
734 return 0;
735}
736
693int 737int
694nouveau_gpuobj_channel_init(struct nouveau_channel *chan, 738nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
695 uint32_t vram_h, uint32_t tt_h) 739 uint32_t vram_h, uint32_t tt_h)
696{ 740{
697 struct drm_device *dev = chan->dev; 741 struct drm_device *dev = chan->dev;
698 struct drm_nouveau_private *dev_priv = dev->dev_private; 742 struct drm_nouveau_private *dev_priv = dev->dev_private;
743 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
744 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
699 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 745 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
700 int ret, i; 746 int ret, i;
701 747
702 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 748 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
703 749 if (dev_priv->card_type == NV_C0)
704 if (dev_priv->card_type == NV_C0) { 750 return nvc0_gpuobj_channel_init(chan, vm);
705 struct nouveau_vm *vm = dev_priv->chan_vm;
706 struct nouveau_vm_pgd *vpgd;
707
708 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
709 &chan->ramin);
710 if (ret)
711 return ret;
712
713 nouveau_vm_ref(vm, &chan->vm, NULL);
714
715 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
716 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
717 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
718 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
719 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
720 return 0;
721 }
722 751
723 /* Allocate a chunk of memory for per-channel object storage */ 752 /* Allocate a chunk of memory for per-channel object storage */
724 ret = nouveau_gpuobj_channel_init_pramin(chan); 753 ret = nouveau_gpuobj_channel_init_pramin(chan);
@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
731 * - Allocate per-channel page-directory 760 * - Allocate per-channel page-directory
732 * - Link with shared channel VM 761 * - Link with shared channel VM
733 */ 762 */
734 if (dev_priv->chan_vm) { 763 if (vm) {
735 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 764 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
736 u64 vm_vinst = chan->ramin->vinst + pgd_offs; 765 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
737 u32 vm_pinst = chan->ramin->pinst; 766 u32 vm_pinst = chan->ramin->pinst;
@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
744 if (ret) 773 if (ret)
745 return ret; 774 return ret;
746 775
747 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); 776 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
748 } 777 }
749 778
750 /* RAMHT */ 779 /* RAMHT */
@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
768 struct nouveau_gpuobj *sem = NULL; 797 struct nouveau_gpuobj *sem = NULL;
769 struct nv50_display_crtc *dispc = 798 struct nv50_display_crtc *dispc =
770 &nv50_display(dev)->crtc[i]; 799 &nv50_display(dev)->crtc[i];
771 u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; 800 u64 offset = dispc->sem.bo->bo.offset;
772 801
773 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, 802 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
774 NV_MEM_ACCESS_RW, 803 NV_MEM_ACCESS_RW,
@@ -841,13 +870,22 @@ void
841nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 870nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
842{ 871{
843 struct drm_device *dev = chan->dev; 872 struct drm_device *dev = chan->dev;
873 struct drm_nouveau_private *dev_priv = dev->dev_private;
874 int i;
844 875
845 NV_DEBUG(dev, "ch%d\n", chan->id); 876 NV_DEBUG(dev, "ch%d\n", chan->id);
846 877
847 nouveau_ramht_ref(NULL, &chan->ramht, chan); 878 if (dev_priv->card_type >= NV_50) {
879 struct nv50_display *disp = nv50_display(dev);
880
881 for (i = 0; i < 2; i++) {
882 struct nv50_display_crtc *dispc = &disp->crtc[i];
883 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
884 }
848 885
849 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 886 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
850 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 887 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
888 }
851 889
852 if (drm_mm_initialized(&chan->ramin_heap)) 890 if (drm_mm_initialized(&chan->ramin_heap))
853 drm_mm_takedown(&chan->ramin_heap); 891 drm_mm_takedown(&chan->ramin_heap);
@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
909 if (init->handle == ~0) 947 if (init->handle == ~0)
910 return -EINVAL; 948 return -EINVAL;
911 949
912 chan = nouveau_channel_get(dev, file_priv, init->channel); 950 chan = nouveau_channel_get(file_priv, init->channel);
913 if (IS_ERR(chan)) 951 if (IS_ERR(chan))
914 return PTR_ERR(chan); 952 return PTR_ERR(chan);
915 953
@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
936 struct nouveau_channel *chan; 974 struct nouveau_channel *chan;
937 int ret; 975 int ret;
938 976
939 chan = nouveau_channel_get(dev, file_priv, objfree->channel); 977 chan = nouveau_channel_get(file_priv, objfree->channel);
940 if (IS_ERR(chan)) 978 if (IS_ERR(chan))
941 return PTR_ERR(chan); 979 return PTR_ERR(chan);
942 980
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 82fad914e648..c444cadbf849 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -429,7 +429,7 @@ nouveau_sgdma_init(struct drm_device *dev)
429 u32 aper_size, align; 429 u32 aper_size, align;
430 int ret; 430 int ret;
431 431
432 if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) 432 if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
433 aper_size = 512 * 1024 * 1024; 433 aper_size = 512 * 1024 * 1024;
434 else 434 else
435 aper_size = 64 * 1024 * 1024; 435 aper_size = 64 * 1024 * 1024;
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
458 dev_priv->gart_info.type = NOUVEAU_GART_HW; 458 dev_priv->gart_info.type = NOUVEAU_GART_HW;
459 dev_priv->gart_info.func = &nv50_sgdma_backend; 459 dev_priv->gart_info.func = &nv50_sgdma_backend;
460 } else 460 } else
461 if (0 && drm_pci_device_is_pcie(dev) && 461 if (0 && pci_is_pcie(dev->pdev) &&
462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
463 if (nv44_graph_class(dev)) { 463 if (nv44_graph_class(dev)) {
464 dev_priv->gart_info.func = &nv44_sgdma_backend; 464 dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 731acea865b5..10656e430b44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
91 engine->pm.clock_pre = nv04_pm_clock_pre; 91 engine->pm.clock_pre = nv04_pm_clock_pre;
92 engine->pm.clock_set = nv04_pm_clock_set; 92 engine->pm.clock_set = nv04_pm_clock_set;
93 engine->vram.init = nouveau_mem_detect; 93 engine->vram.init = nouveau_mem_detect;
94 engine->vram.takedown = nouveau_stub_takedown;
94 engine->vram.flags_valid = nouveau_mem_flags_valid; 95 engine->vram.flags_valid = nouveau_mem_flags_valid;
95 break; 96 break;
96 case 0x10: 97 case 0x10:
@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
139 engine->pm.clock_pre = nv04_pm_clock_pre; 140 engine->pm.clock_pre = nv04_pm_clock_pre;
140 engine->pm.clock_set = nv04_pm_clock_set; 141 engine->pm.clock_set = nv04_pm_clock_set;
141 engine->vram.init = nouveau_mem_detect; 142 engine->vram.init = nouveau_mem_detect;
143 engine->vram.takedown = nouveau_stub_takedown;
142 engine->vram.flags_valid = nouveau_mem_flags_valid; 144 engine->vram.flags_valid = nouveau_mem_flags_valid;
143 break; 145 break;
144 case 0x20: 146 case 0x20:
@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
187 engine->pm.clock_pre = nv04_pm_clock_pre; 189 engine->pm.clock_pre = nv04_pm_clock_pre;
188 engine->pm.clock_set = nv04_pm_clock_set; 190 engine->pm.clock_set = nv04_pm_clock_set;
189 engine->vram.init = nouveau_mem_detect; 191 engine->vram.init = nouveau_mem_detect;
192 engine->vram.takedown = nouveau_stub_takedown;
190 engine->vram.flags_valid = nouveau_mem_flags_valid; 193 engine->vram.flags_valid = nouveau_mem_flags_valid;
191 break; 194 break;
192 case 0x30: 195 case 0x30:
@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
237 engine->pm.voltage_get = nouveau_voltage_gpio_get; 240 engine->pm.voltage_get = nouveau_voltage_gpio_get;
238 engine->pm.voltage_set = nouveau_voltage_gpio_set; 241 engine->pm.voltage_set = nouveau_voltage_gpio_set;
239 engine->vram.init = nouveau_mem_detect; 242 engine->vram.init = nouveau_mem_detect;
243 engine->vram.takedown = nouveau_stub_takedown;
240 engine->vram.flags_valid = nouveau_mem_flags_valid; 244 engine->vram.flags_valid = nouveau_mem_flags_valid;
241 break; 245 break;
242 case 0x40: 246 case 0x40:
@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
289 engine->pm.voltage_set = nouveau_voltage_gpio_set; 293 engine->pm.voltage_set = nouveau_voltage_gpio_set;
290 engine->pm.temp_get = nv40_temp_get; 294 engine->pm.temp_get = nv40_temp_get;
291 engine->vram.init = nouveau_mem_detect; 295 engine->vram.init = nouveau_mem_detect;
296 engine->vram.takedown = nouveau_stub_takedown;
292 engine->vram.flags_valid = nouveau_mem_flags_valid; 297 engine->vram.flags_valid = nouveau_mem_flags_valid;
293 break; 298 break;
294 case 0x50: 299 case 0x50:
@@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
366 else 371 else
367 engine->pm.temp_get = nv40_temp_get; 372 engine->pm.temp_get = nv40_temp_get;
368 engine->vram.init = nv50_vram_init; 373 engine->vram.init = nv50_vram_init;
374 engine->vram.takedown = nv50_vram_fini;
369 engine->vram.get = nv50_vram_new; 375 engine->vram.get = nv50_vram_new;
370 engine->vram.put = nv50_vram_del; 376 engine->vram.put = nv50_vram_del;
371 engine->vram.flags_valid = nv50_vram_flags_valid; 377 engine->vram.flags_valid = nv50_vram_flags_valid;
@@ -411,9 +417,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
411 engine->gpio.irq_unregister = nv50_gpio_irq_unregister; 417 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
412 engine->gpio.irq_enable = nv50_gpio_irq_enable; 418 engine->gpio.irq_enable = nv50_gpio_irq_enable;
413 engine->vram.init = nvc0_vram_init; 419 engine->vram.init = nvc0_vram_init;
420 engine->vram.takedown = nv50_vram_fini;
414 engine->vram.get = nvc0_vram_new; 421 engine->vram.get = nvc0_vram_new;
415 engine->vram.put = nv50_vram_del; 422 engine->vram.put = nv50_vram_del;
416 engine->vram.flags_valid = nvc0_vram_flags_valid; 423 engine->vram.flags_valid = nvc0_vram_flags_valid;
424 engine->pm.temp_get = nv84_temp_get;
417 break; 425 break;
418 default: 426 default:
419 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); 427 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -447,8 +455,8 @@ nouveau_card_init_channel(struct drm_device *dev)
447 struct drm_nouveau_private *dev_priv = dev->dev_private; 455 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 int ret; 456 int ret;
449 457
450 ret = nouveau_channel_alloc(dev, &dev_priv->channel, 458 ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
451 (struct drm_file *)-2, NvDmaFB, NvDmaTT); 459 NvDmaFB, NvDmaTT);
452 if (ret) 460 if (ret)
453 return ret; 461 return ret;
454 462
@@ -527,7 +535,7 @@ nouveau_card_init(struct drm_device *dev)
527 535
528 nouveau_pm_init(dev); 536 nouveau_pm_init(dev);
529 537
530 ret = nouveau_mem_vram_init(dev); 538 ret = engine->vram.init(dev);
531 if (ret) 539 if (ret)
532 goto out_bios; 540 goto out_bios;
533 541
@@ -539,10 +547,14 @@ nouveau_card_init(struct drm_device *dev)
539 if (ret) 547 if (ret)
540 goto out_gpuobj; 548 goto out_gpuobj;
541 549
542 ret = nouveau_mem_gart_init(dev); 550 ret = nouveau_mem_vram_init(dev);
543 if (ret) 551 if (ret)
544 goto out_instmem; 552 goto out_instmem;
545 553
554 ret = nouveau_mem_gart_init(dev);
555 if (ret)
556 goto out_ttmvram;
557
546 /* PMC */ 558 /* PMC */
547 ret = engine->mc.init(dev); 559 ret = engine->mc.init(dev);
548 if (ret) 560 if (ret)
@@ -563,7 +575,7 @@ nouveau_card_init(struct drm_device *dev)
563 if (ret) 575 if (ret)
564 goto out_timer; 576 goto out_timer;
565 577
566 if (!nouveau_noaccel) { 578 if (!dev_priv->noaccel) {
567 switch (dev_priv->card_type) { 579 switch (dev_priv->card_type) {
568 case NV_04: 580 case NV_04:
569 nv04_graph_create(dev); 581 nv04_graph_create(dev);
@@ -675,14 +687,14 @@ out_vblank:
675 drm_vblank_cleanup(dev); 687 drm_vblank_cleanup(dev);
676 engine->display.destroy(dev); 688 engine->display.destroy(dev);
677out_fifo: 689out_fifo:
678 if (!nouveau_noaccel) 690 if (!dev_priv->noaccel)
679 engine->fifo.takedown(dev); 691 engine->fifo.takedown(dev);
680out_engine: 692out_engine:
681 if (!nouveau_noaccel) { 693 if (!dev_priv->noaccel) {
682 for (e = e - 1; e >= 0; e--) { 694 for (e = e - 1; e >= 0; e--) {
683 if (!dev_priv->eng[e]) 695 if (!dev_priv->eng[e])
684 continue; 696 continue;
685 dev_priv->eng[e]->fini(dev, e); 697 dev_priv->eng[e]->fini(dev, e, false);
686 dev_priv->eng[e]->destroy(dev,e ); 698 dev_priv->eng[e]->destroy(dev,e );
687 } 699 }
688 } 700 }
@@ -696,12 +708,14 @@ out_mc:
696 engine->mc.takedown(dev); 708 engine->mc.takedown(dev);
697out_gart: 709out_gart:
698 nouveau_mem_gart_fini(dev); 710 nouveau_mem_gart_fini(dev);
711out_ttmvram:
712 nouveau_mem_vram_fini(dev);
699out_instmem: 713out_instmem:
700 engine->instmem.takedown(dev); 714 engine->instmem.takedown(dev);
701out_gpuobj: 715out_gpuobj:
702 nouveau_gpuobj_takedown(dev); 716 nouveau_gpuobj_takedown(dev);
703out_vram: 717out_vram:
704 nouveau_mem_vram_fini(dev); 718 engine->vram.takedown(dev);
705out_bios: 719out_bios:
706 nouveau_pm_fini(dev); 720 nouveau_pm_fini(dev);
707 nouveau_bios_takedown(dev); 721 nouveau_bios_takedown(dev);
@@ -718,16 +732,21 @@ static void nouveau_card_takedown(struct drm_device *dev)
718 struct nouveau_engine *engine = &dev_priv->engine; 732 struct nouveau_engine *engine = &dev_priv->engine;
719 int e; 733 int e;
720 734
735 drm_kms_helper_poll_fini(dev);
736 nouveau_fbcon_fini(dev);
737
721 if (dev_priv->channel) { 738 if (dev_priv->channel) {
722 nouveau_fence_fini(dev);
723 nouveau_channel_put_unlocked(&dev_priv->channel); 739 nouveau_channel_put_unlocked(&dev_priv->channel);
740 nouveau_fence_fini(dev);
724 } 741 }
725 742
726 if (!nouveau_noaccel) { 743 engine->display.destroy(dev);
744
745 if (!dev_priv->noaccel) {
727 engine->fifo.takedown(dev); 746 engine->fifo.takedown(dev);
728 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 747 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
729 if (dev_priv->eng[e]) { 748 if (dev_priv->eng[e]) {
730 dev_priv->eng[e]->fini(dev, e); 749 dev_priv->eng[e]->fini(dev, e, false);
731 dev_priv->eng[e]->destroy(dev,e ); 750 dev_priv->eng[e]->destroy(dev,e );
732 } 751 }
733 } 752 }
@@ -748,10 +767,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
748 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 767 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
749 mutex_unlock(&dev->struct_mutex); 768 mutex_unlock(&dev->struct_mutex);
750 nouveau_mem_gart_fini(dev); 769 nouveau_mem_gart_fini(dev);
770 nouveau_mem_vram_fini(dev);
751 771
752 engine->instmem.takedown(dev); 772 engine->instmem.takedown(dev);
753 nouveau_gpuobj_takedown(dev); 773 nouveau_gpuobj_takedown(dev);
754 nouveau_mem_vram_fini(dev); 774 engine->vram.takedown(dev);
755 775
756 nouveau_irq_fini(dev); 776 nouveau_irq_fini(dev);
757 drm_vblank_cleanup(dev); 777 drm_vblank_cleanup(dev);
@@ -762,6 +782,41 @@ static void nouveau_card_takedown(struct drm_device *dev)
762 vga_client_register(dev->pdev, NULL, NULL, NULL); 782 vga_client_register(dev->pdev, NULL, NULL, NULL);
763} 783}
764 784
785int
786nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
787{
788 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 struct nouveau_fpriv *fpriv;
790 int ret;
791
792 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
793 if (unlikely(!fpriv))
794 return -ENOMEM;
795
796 spin_lock_init(&fpriv->lock);
797 INIT_LIST_HEAD(&fpriv->channels);
798
799 if (dev_priv->card_type == NV_50) {
800 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
801 &fpriv->vm);
802 if (ret) {
803 kfree(fpriv);
804 return ret;
805 }
806 } else
807 if (dev_priv->card_type >= NV_C0) {
808 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
809 &fpriv->vm);
810 if (ret) {
811 kfree(fpriv);
812 return ret;
813 }
814 }
815
816 file_priv->driver_priv = fpriv;
817 return 0;
818}
819
765/* here a client dies, release the stuff that was allocated for its 820/* here a client dies, release the stuff that was allocated for its
766 * file_priv */ 821 * file_priv */
767void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) 822void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -769,6 +824,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
769 nouveau_channel_cleanup(dev, file_priv); 824 nouveau_channel_cleanup(dev, file_priv);
770} 825}
771 826
827void
828nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
829{
830 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
831 nouveau_vm_ref(NULL, &fpriv->vm, NULL);
832 kfree(fpriv);
833}
834
772/* first module load, setup the mmio/fb mapping */ 835/* first module load, setup the mmio/fb mapping */
773/* KMS: we need mmio at load time, not when the first drm client opens. */ 836/* KMS: we need mmio at load time, not when the first drm client opens. */
774int nouveau_firstopen(struct drm_device *dev) 837int nouveau_firstopen(struct drm_device *dev)
@@ -933,6 +996,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
933 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", 996 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
934 dev_priv->card_type, reg0); 997 dev_priv->card_type, reg0);
935 998
999 /* Determine whether we'll attempt acceleration or not, some
1000 * cards are disabled by default here due to them being known
1001 * non-functional, or never been tested due to lack of hw.
1002 */
1003 dev_priv->noaccel = !!nouveau_noaccel;
1004 if (nouveau_noaccel == -1) {
1005 switch (dev_priv->chipset) {
1006 case 0xc1: /* known broken */
1007 case 0xc8: /* never tested */
1008 NV_INFO(dev, "acceleration disabled by default, pass "
1009 "noaccel=0 to force enable\n");
1010 dev_priv->noaccel = true;
1011 break;
1012 default:
1013 dev_priv->noaccel = false;
1014 break;
1015 }
1016 }
1017
936 ret = nouveau_remove_conflicting_drivers(dev); 1018 ret = nouveau_remove_conflicting_drivers(dev);
937 if (ret) 1019 if (ret)
938 goto err_mmio; 1020 goto err_mmio;
@@ -997,11 +1079,7 @@ void nouveau_lastclose(struct drm_device *dev)
997int nouveau_unload(struct drm_device *dev) 1079int nouveau_unload(struct drm_device *dev)
998{ 1080{
999 struct drm_nouveau_private *dev_priv = dev->dev_private; 1081 struct drm_nouveau_private *dev_priv = dev->dev_private;
1000 struct nouveau_engine *engine = &dev_priv->engine;
1001 1082
1002 drm_kms_helper_poll_fini(dev);
1003 nouveau_fbcon_fini(dev);
1004 engine->display.destroy(dev);
1005 nouveau_card_takedown(dev); 1083 nouveau_card_takedown(dev);
1006 1084
1007 iounmap(dev_priv->mmio); 1085 iounmap(dev_priv->mmio);
@@ -1031,7 +1109,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1031 case NOUVEAU_GETPARAM_BUS_TYPE: 1109 case NOUVEAU_GETPARAM_BUS_TYPE:
1032 if (drm_pci_device_is_agp(dev)) 1110 if (drm_pci_device_is_agp(dev))
1033 getparam->value = NV_AGP; 1111 getparam->value = NV_AGP;
1034 else if (drm_pci_device_is_pcie(dev)) 1112 else if (pci_is_pcie(dev->pdev))
1035 getparam->value = NV_PCIE; 1113 getparam->value = NV_PCIE;
1036 else 1114 else
1037 getparam->value = NV_PCI; 1115 getparam->value = NV_PCI;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 649b0413b09f..081ca7b03e8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
43 43
44 /* Set the default sensor's contants */ 44 /* Set the default sensor's contants */
45 sensor->offset_constant = 0; 45 sensor->offset_constant = 0;
46 sensor->offset_mult = 1; 46 sensor->offset_mult = 0;
47 sensor->offset_div = 1; 47 sensor->offset_div = 1;
48 sensor->slope_mult = 1; 48 sensor->slope_mult = 1;
49 sensor->slope_div = 1; 49 sensor->slope_div = 1;
@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
99 sensor->slope_mult = 431; 99 sensor->slope_mult = 431;
100 sensor->slope_div = 10000; 100 sensor->slope_div = 10000;
101 break; 101 break;
102
103 case 0x67:
104 sensor->offset_mult = -26149;
105 sensor->offset_div = 100;
106 sensor->slope_mult = 484;
107 sensor->slope_div = 10000;
108 break;
102 } 109 }
103 } 110 }
104 111
@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
109 116
110 /* Read the entries from the table */ 117 /* Read the entries from the table */
111 for (i = 0; i < entries; i++) { 118 for (i = 0; i < entries; i++) {
112 u16 value = ROM16(temp[1]); 119 s16 value = ROM16(temp[1]);
113 120
114 switch (temp[0]) { 121 switch (temp[0]) {
115 case 0x01: 122 case 0x01:
@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev)
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 167 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 168 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
162 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; 169 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
163 u32 offset = sensor->offset_mult / sensor->offset_div; 170 s32 offset = sensor->offset_mult / sensor->offset_div;
164 u32 sensor_calibration; 171 s32 sensor_calibration;
165 172
166 /* set up the sensors */ 173 /* set up the sensors */
167 sensor_calibration = 120 - offset - sensor->offset_constant; 174 sensor_calibration = 120 - offset - sensor->offset_constant;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 519a6b4bba46..244fd38fdb84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
369} 369}
370 370
371static void 371static void
372nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) 372nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
373{ 373{
374 struct nouveau_vm_pgd *vpgd, *tmp; 374 struct nouveau_vm_pgd *vpgd, *tmp;
375 struct nouveau_gpuobj *pgd = NULL;
375 376
376 if (!pgd) 377 if (!mpgd)
377 return; 378 return;
378 379
379 mutex_lock(&vm->mm->mutex); 380 mutex_lock(&vm->mm->mutex);
380 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 381 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
381 if (vpgd->obj != pgd) 382 if (vpgd->obj == mpgd) {
382 continue; 383 pgd = vpgd->obj;
383 384 list_del(&vpgd->head);
384 list_del(&vpgd->head); 385 kfree(vpgd);
385 nouveau_gpuobj_ref(NULL, &vpgd->obj); 386 break;
386 kfree(vpgd); 387 }
387 } 388 }
388 mutex_unlock(&vm->mm->mutex); 389 mutex_unlock(&vm->mm->mutex);
390
391 nouveau_gpuobj_ref(NULL, &pgd);
389} 392}
390 393
391static void 394static void
@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm)
396 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 399 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
397 nouveau_vm_unlink(vm, vpgd->obj); 400 nouveau_vm_unlink(vm, vpgd->obj);
398 } 401 }
399 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
400 402
403 nouveau_mm_fini(&vm->mm);
401 kfree(vm->pgt); 404 kfree(vm->pgt);
402 kfree(vm); 405 kfree(vm);
403} 406}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index c48a9fc2b47b..579ca8cc223c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -41,6 +41,8 @@ struct nouveau_vm_pgd {
41}; 41};
42 42
43struct nouveau_vma { 43struct nouveau_vma {
44 struct list_head head;
45 int refcount;
44 struct nouveau_vm *vm; 46 struct nouveau_vm *vm;
45 struct nouveau_mm_node *node; 47 struct nouveau_mm_node *node;
46 u64 offset; 48 u64 offset;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index f1a3ae491995..118261d4927a 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1035,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1035 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); 1035 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
1036 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 1036 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
1037 1037
1038 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1038 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
1039 0, 0x0000, &nv_crtc->cursor.nvbo); 1039 0, 0x0000, &nv_crtc->cursor.nvbo);
1040 if (!ret) { 1040 if (!ret) {
1041 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1041 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 3626ee7db3ba..dbdea8ed3925 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -450,13 +450,13 @@ nv04_graph_context_del(struct nouveau_channel *chan, int engine)
450 unsigned long flags; 450 unsigned long flags;
451 451
452 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 452 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
453 nv04_graph_fifo_access(dev, false); 453 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
454 454
455 /* Unload the context if it's the currently active one */ 455 /* Unload the context if it's the currently active one */
456 if (nv04_graph_channel(dev) == chan) 456 if (nv04_graph_channel(dev) == chan)
457 nv04_graph_unload_context(dev); 457 nv04_graph_unload_context(dev);
458 458
459 nv04_graph_fifo_access(dev, true); 459 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
460 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 460 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
461 461
462 /* Free the context resources */ 462 /* Free the context resources */
@@ -538,24 +538,18 @@ nv04_graph_init(struct drm_device *dev, int engine)
538} 538}
539 539
540static int 540static int
541nv04_graph_fini(struct drm_device *dev, int engine) 541nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
542{ 542{
543 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
544 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
545 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
546 return -EBUSY;
547 }
543 nv04_graph_unload_context(dev); 548 nv04_graph_unload_context(dev);
544 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 549 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
545 return 0; 550 return 0;
546} 551}
547 552
548void
549nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
550{
551 if (enabled)
552 nv_wr32(dev, NV04_PGRAPH_FIFO,
553 nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
554 else
555 nv_wr32(dev, NV04_PGRAPH_FIFO,
556 nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
557}
558
559static int 553static int
560nv04_graph_mthd_set_ref(struct nouveau_channel *chan, 554nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
561 u32 class, u32 mthd, u32 data) 555 u32 class, u32 mthd, u32 data)
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index b8611b955313..c1248e0740a3 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -28,6 +28,31 @@ int nv04_instmem_init(struct drm_device *dev)
28 /* RAMIN always available */ 28 /* RAMIN always available */
29 dev_priv->ramin_available = true; 29 dev_priv->ramin_available = true;
30 30
31 /* Reserve space at end of VRAM for PRAMIN */
32 if (dev_priv->card_type >= NV_40) {
33 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
34 u32 rsvd;
35
36 /* estimate grctx size, the magics come from nv40_grctx.c */
37 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
38 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels;
43
44 /* pciegart table */
45 if (pci_is_pcie(dev->pdev))
46 rsvd += 512 * 1024;
47
48 /* object storage */
49 rsvd += 512 * 1024;
50
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
54 }
55
31 /* Setup shared RAMHT */ 56 /* Setup shared RAMHT */
32 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, 57 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
33 NVOBJ_FLAG_ZERO_ALLOC, &ramht); 58 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
@@ -112,7 +137,8 @@ nv04_instmem_resume(struct drm_device *dev)
112} 137}
113 138
114int 139int
115nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) 140nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
116{ 142{
117 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
118 struct drm_mm_node *ramin = NULL; 144 struct drm_mm_node *ramin = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 0930c6cb88e0..7255e4a4d3f3 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -708,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); 708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); 709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); 710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
711 nv04_graph_fifo_access(dev, true); 711 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
712 nv04_graph_fifo_access(dev, false); 712 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
713 713
714 /* Restore the FIFO state */ 714 /* Restore the FIFO state */
715 for (i = 0; i < ARRAY_SIZE(fifo); i++) 715 for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -879,13 +879,13 @@ nv10_graph_context_del(struct nouveau_channel *chan, int engine)
879 unsigned long flags; 879 unsigned long flags;
880 880
881 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 881 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
882 nv04_graph_fifo_access(dev, false); 882 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
883 883
884 /* Unload the context if it's the currently active one */ 884 /* Unload the context if it's the currently active one */
885 if (nv10_graph_channel(dev) == chan) 885 if (nv10_graph_channel(dev) == chan)
886 nv10_graph_unload_context(dev); 886 nv10_graph_unload_context(dev);
887 887
888 nv04_graph_fifo_access(dev, true); 888 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
889 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 889 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
890 890
891 /* Free the context resources */ 891 /* Free the context resources */
@@ -957,8 +957,13 @@ nv10_graph_init(struct drm_device *dev, int engine)
957} 957}
958 958
959static int 959static int
960nv10_graph_fini(struct drm_device *dev, int engine) 960nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
961{ 961{
962 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
963 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
964 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
965 return -EBUSY;
966 }
962 nv10_graph_unload_context(dev); 967 nv10_graph_unload_context(dev);
963 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 968 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
964 return 0; 969 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index affc7d7dd029..183e37512ef9 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -454,13 +454,13 @@ nv20_graph_context_del(struct nouveau_channel *chan, int engine)
454 unsigned long flags; 454 unsigned long flags;
455 455
456 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 456 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
457 nv04_graph_fifo_access(dev, false); 457 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
458 458
459 /* Unload the context if it's the currently active one */ 459 /* Unload the context if it's the currently active one */
460 if (nv10_graph_channel(dev) == chan) 460 if (nv10_graph_channel(dev) == chan)
461 nv20_graph_unload_context(dev); 461 nv20_graph_unload_context(dev);
462 462
463 nv04_graph_fifo_access(dev, true); 463 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
464 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 464 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
465 465
466 /* Free the context resources */ 466 /* Free the context resources */
@@ -654,8 +654,13 @@ nv30_graph_init(struct drm_device *dev, int engine)
654} 654}
655 655
656int 656int
657nv20_graph_fini(struct drm_device *dev, int engine) 657nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
658{ 658{
659 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
660 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
661 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
662 return -EBUSY;
663 }
659 nv20_graph_unload_context(dev); 664 nv20_graph_unload_context(dev);
660 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); 665 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
661 return 0; 666 return 0;
@@ -753,6 +758,7 @@ nv20_graph_create(struct drm_device *dev)
753 break; 758 break;
754 default: 759 default:
755 NV_ERROR(dev, "PGRAPH: unknown chipset\n"); 760 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
761 kfree(pgraph);
756 return 0; 762 return 0;
757 } 763 }
758 } else { 764 } else {
@@ -774,6 +780,7 @@ nv20_graph_create(struct drm_device *dev)
774 break; 780 break;
775 default: 781 default:
776 NV_ERROR(dev, "PGRAPH: unknown chipset\n"); 782 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
783 kfree(pgraph);
777 return 0; 784 return 0;
778 } 785 }
779 } 786 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 5beb01b8ace1..ba14a93d8afa 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -35,89 +35,6 @@ struct nv40_graph_engine {
35 u32 grctx_size; 35 u32 grctx_size;
36}; 36};
37 37
38static struct nouveau_channel *
39nv40_graph_channel(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *grctx;
43 uint32_t inst;
44 int i;
45
46 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
47 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
48 return NULL;
49 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
50
51 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
52 if (!dev_priv->channels.ptr[i])
53 continue;
54
55 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
56 if (grctx && grctx->pinst == inst)
57 return dev_priv->channels.ptr[i];
58 }
59
60 return NULL;
61}
62
63static int
64nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
65{
66 uint32_t old_cp, tv = 1000, tmp;
67 int i;
68
69 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
70 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
71
72 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
73 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
74 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
75 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
76
77 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
78 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
79 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
80
81 nouveau_wait_for_idle(dev);
82
83 for (i = 0; i < tv; i++) {
84 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
85 break;
86 }
87
88 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
89
90 if (i == tv) {
91 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
92 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
93 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
94 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
95 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
96 NV_ERROR(dev, "0x40030C = 0x%08x\n",
97 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
98 return -EBUSY;
99 }
100
101 return 0;
102}
103
104static int
105nv40_graph_unload_context(struct drm_device *dev)
106{
107 uint32_t inst;
108 int ret;
109
110 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
111 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
112 return 0;
113 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
114
115 ret = nv40_graph_transfer_context(dev, inst, 1);
116
117 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
118 return ret;
119}
120
121static int 38static int
122nv40_graph_context_new(struct nouveau_channel *chan, int engine) 39nv40_graph_context_new(struct nouveau_channel *chan, int engine)
123{ 40{
@@ -163,16 +80,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
163 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 80 struct nouveau_gpuobj *grctx = chan->engctx[engine];
164 struct drm_device *dev = chan->dev; 81 struct drm_device *dev = chan->dev;
165 struct drm_nouveau_private *dev_priv = dev->dev_private; 82 struct drm_nouveau_private *dev_priv = dev->dev_private;
83 u32 inst = 0x01000000 | (grctx->pinst >> 4);
166 unsigned long flags; 84 unsigned long flags;
167 85
168 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 86 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
169 nv04_graph_fifo_access(dev, false); 87 nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
170 88 if (nv_rd32(dev, 0x40032c) == inst)
171 /* Unload the context if it's the currently active one */ 89 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
172 if (nv40_graph_channel(dev) == chan) 90 if (nv_rd32(dev, 0x400330) == inst)
173 nv40_graph_unload_context(dev); 91 nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
174 92 nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
175 nv04_graph_fifo_access(dev, true);
176 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 93 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
177 94
178 /* Free the context resources */ 95 /* Free the context resources */
@@ -429,9 +346,20 @@ nv40_graph_init(struct drm_device *dev, int engine)
429} 346}
430 347
431static int 348static int
432nv40_graph_fini(struct drm_device *dev, int engine) 349nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
433{ 350{
434 nv40_graph_unload_context(dev); 351 u32 inst = nv_rd32(dev, 0x40032c);
352 if (inst & 0x01000000) {
353 nv_wr32(dev, 0x400720, 0x00000000);
354 nv_wr32(dev, 0x400784, inst);
355 nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
356 nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
357 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
358 u32 insn = nv_rd32(dev, 0x400308);
359 NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
360 }
361 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
362 }
435 return 0; 363 return 0;
436} 364}
437 365
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c
index 6d2af292a2e3..ad03a0e1fc7d 100644
--- a/drivers/gpu/drm/nouveau/nv40_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c
@@ -137,7 +137,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
137} 137}
138 138
139static int 139static int
140nv40_mpeg_fini(struct drm_device *dev, int engine) 140nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
141{ 141{
142 /*XXX: context save? */ 142 /*XXX: context save? */
143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 143 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index ebabacf38da9..46ad59ea2185 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ? 104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); 107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
108 if (dev_priv->chipset != 0x50) { 108 if (dev_priv->chipset != 0x50) {
109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
110 OUT_RING(evo, NvEvoVRAM); 110 OUT_RING(evo, NvEvoVRAM);
@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
372 372
373 nouveau_bo_unmap(cursor); 373 nouveau_bo_unmap(cursor);
374 374
375 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); 375 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
376 nv_crtc->cursor.show(nv_crtc, true); 376 nv_crtc->cursor.show(nv_crtc, true);
377 377
378out: 378out:
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
546 } 546 }
547 } 547 }
548 548
549 nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; 549 nv_crtc->fb.offset = fb->nvbo->bo.offset;
550 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 550 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
551 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 551 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
552 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 552 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -747,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
747 } 747 }
748 nv_crtc->lut.depth = 0; 748 nv_crtc->lut.depth = 0;
749 749
750 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, 750 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
751 0, 0x0000, &nv_crtc->lut.nvbo); 751 0, 0x0000, &nv_crtc->lut.nvbo);
752 if (!ret) { 752 if (!ret) {
753 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); 753 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
@@ -773,7 +773,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
773 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); 773 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
774 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); 774 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
775 775
776 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 776 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
777 0, 0x0000, &nv_crtc->cursor.nvbo); 777 0, 0x0000, &nv_crtc->cursor.nvbo);
778 if (!ret) { 778 if (!ret) {
779 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 779 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 08da478ba544..db1a5f4b711d 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
415 415
416 /* synchronise with the rendering channel, if necessary */ 416 /* synchronise with the rendering channel, if necessary */
417 if (likely(chan)) { 417 if (likely(chan)) {
418 u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
419
420 ret = RING_SPACE(chan, 10); 418 ret = RING_SPACE(chan, 10);
421 if (ret) { 419 if (ret) {
422 WIND_RING(evo); 420 WIND_RING(evo);
@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
438 else 436 else
439 OUT_RING (chan, chan->vram_handle); 437 OUT_RING (chan, chan->vram_handle);
440 } else { 438 } else {
439 u64 offset = chan->dispc_vma[nv_crtc->index].offset;
440 offset += dispc->sem.offset;
441 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); 441 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
442 OUT_RING (chan, upper_32_bits(offset)); 442 OUT_RING (chan, upper_32_bits(offset));
443 OUT_RING (chan, lower_32_bits(offset)); 443 OUT_RING (chan, lower_32_bits(offset));
@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
484 OUT_RING (evo, 0x00000000); 484 OUT_RING (evo, 0x00000000);
485 OUT_RING (evo, 0x00000000); 485 OUT_RING (evo, 0x00000000);
486 BEGIN_RING(evo, 0, 0x0800, 5); 486 BEGIN_RING(evo, 0, 0x0800, 5);
487 OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8); 487 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
488 OUT_RING (evo, 0); 488 OUT_RING (evo, 0);
489 OUT_RING (evo, (fb->height << 16) | fb->width); 489 OUT_RING (evo, (fb->height << 16) | fb->width);
490 OUT_RING (evo, nv_fb->r_pitch); 490 OUT_RING (evo, nv_fb->r_pitch);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c8e83c1a4de8..c99d9751880c 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
38 return; 38 return;
39 *pevo = NULL; 39 *pevo = NULL;
40 40
41 nouveau_ramht_ref(NULL, &evo->ramht, evo);
41 nouveau_gpuobj_channel_takedown(evo); 42 nouveau_gpuobj_channel_takedown(evo);
42 nouveau_bo_unmap(evo->pushbuf_bo); 43 nouveau_bo_unmap(evo->pushbuf_bo);
43 nouveau_bo_ref(NULL, &evo->pushbuf_bo); 44 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
116 evo->user_get = 4; 117 evo->user_get = 4;
117 evo->user_put = 0; 118 evo->user_put = 0;
118 119
119 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, 120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
120 &evo->pushbuf_bo); 121 &evo->pushbuf_bo);
121 if (ret == 0) 122 if (ret == 0)
122 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
153{ 154{
154 struct drm_device *dev = evo->dev; 155 struct drm_device *dev = evo->dev;
155 int id = evo->id, ret, i; 156 int id = evo->id, ret, i;
156 u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; 157 u64 pushbuf = evo->pushbuf_bo->bo.offset;
157 u32 tmp; 158 u32 tmp;
158 159
159 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 160 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev)
331 if (ret) 332 if (ret)
332 goto err; 333 goto err;
333 334
334 ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, 335 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
335 0, 0x0000, &dispc->sem.bo); 336 0, 0x0000, &dispc->sem.bo);
336 if (!ret) { 337 if (!ret) {
337 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
338
339 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); 338 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
340 if (!ret) 339 if (!ret)
341 ret = nouveau_bo_map(dispc->sem.bo); 340 ret = nouveau_bo_map(dispc->sem.bo);
342 if (ret) 341 if (ret)
343 nouveau_bo_ref(NULL, &dispc->sem.bo); 342 nouveau_bo_ref(NULL, &dispc->sem.bo);
343 offset = dispc->sem.bo->bo.offset;
344 } 344 }
345 345
346 if (ret) 346 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 791ded1c5c6d..dc75a7206524 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
159 struct drm_device *dev = nfbdev->dev; 159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel; 161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; 162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
163 int ret, format; 163 int ret, format;
164 164
165 switch (info->var.bits_per_pixel) { 165 switch (info->var.bits_per_pixel) {
@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
247 OUT_RING(chan, info->fix.line_length); 247 OUT_RING(chan, info->fix.line_length);
248 OUT_RING(chan, info->var.xres_virtual); 248 OUT_RING(chan, info->var.xres_virtual);
249 OUT_RING(chan, info->var.yres_virtual); 249 OUT_RING(chan, info->var.yres_virtual);
250 OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); 250 OUT_RING(chan, upper_32_bits(fb->vma.offset));
251 OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); 251 OUT_RING(chan, lower_32_bits(fb->vma.offset));
252 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 252 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
253 OUT_RING(chan, format); 253 OUT_RING(chan, format);
254 OUT_RING(chan, 1); 254 OUT_RING(chan, 1);
@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
256 OUT_RING(chan, info->fix.line_length); 256 OUT_RING(chan, info->fix.line_length);
257 OUT_RING(chan, info->var.xres_virtual); 257 OUT_RING(chan, info->var.xres_virtual);
258 OUT_RING(chan, info->var.yres_virtual); 258 OUT_RING(chan, info->var.yres_virtual);
259 OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); 259 OUT_RING(chan, upper_32_bits(fb->vma.offset));
260 OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); 260 OUT_RING(chan, lower_32_bits(fb->vma.offset));
261 261
262 return 0; 262 return 0;
263} 263}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 40680f2b4231..d43c46caa76e 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -124,7 +124,6 @@ static void
124nv50_graph_init_reset(struct drm_device *dev) 124nv50_graph_init_reset(struct drm_device *dev)
125{ 125{
126 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); 126 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
127
128 NV_DEBUG(dev, "\n"); 127 NV_DEBUG(dev, "\n");
129 128
130 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); 129 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
@@ -254,9 +253,13 @@ nv50_graph_init(struct drm_device *dev, int engine)
254} 253}
255 254
256static int 255static int
257nv50_graph_fini(struct drm_device *dev, int engine) 256nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
258{ 257{
259 NV_DEBUG(dev, "\n"); 258 nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
259 if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
260 nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
261 return -EBUSY;
262 }
260 nv50_graph_unload_context(dev); 263 nv50_graph_unload_context(dev);
261 nv_wr32(dev, 0x40013c, 0x00000000); 264 nv_wr32(dev, 0x40013c, 0x00000000);
262 return 0; 265 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 4f95a1e5822e..a7c12c94a5a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -305,9 +305,9 @@ struct nv50_gpuobj_node {
305 u32 align; 305 u32 align;
306}; 306};
307 307
308
309int 308int
310nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) 309nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
310 u32 size, u32 align)
311{ 311{
312 struct drm_device *dev = gpuobj->dev; 312 struct drm_device *dev = gpuobj->dev;
313 struct drm_nouveau_private *dev_priv = dev->dev_private; 313 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) 336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
337 flags |= NV_MEM_ACCESS_SYS; 337 flags |= NV_MEM_ACCESS_SYS;
338 338
339 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, 339 ret = nouveau_vm_get(chan->vm, size, 12, flags,
340 &node->chan_vma); 340 &node->chan_vma);
341 if (ret) { 341 if (ret) {
342 vram->put(dev, &node->vram); 342 vram->put(dev, &node->vram);
@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
345 } 345 }
346 346
347 nouveau_vm_map(&node->chan_vma, node->vram); 347 nouveau_vm_map(&node->chan_vma, node->vram);
348 gpuobj->vinst = node->chan_vma.offset; 348 gpuobj->linst = node->chan_vma.offset;
349 } 349 }
350 350
351 gpuobj->size = size; 351 gpuobj->size = size;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index 1dc5913f78c5..b57a2d180ad2 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -160,7 +160,7 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
160} 160}
161 161
162static int 162static int
163nv50_mpeg_fini(struct drm_device *dev, int engine) 163nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
164{ 164{
165 /*XXX: context save for s/r */ 165 /*XXX: context save for s/r */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c25c59386420..ffe8b483b7b0 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -318,6 +318,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
318 uint32_t tmp; 318 uint32_t tmp;
319 319
320 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); 320 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
321 if (!tmp)
322 tmp = nv_rd32(dev, 0x610798 + (or * 8));
321 323
322 switch ((tmp & 0x00000f00) >> 8) { 324 switch ((tmp & 0x00000f00) >> 8) {
323 case 8: 325 case 8:
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 1a0dd491a0e4..40b84f22d819 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
156 pinstmem->flush(vm->dev); 156 pinstmem->flush(vm->dev);
157 157
158 /* BAR */ 158 /* BAR */
159 if (vm != dev_priv->chan_vm) { 159 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
160 nv50_vm_flush_engine(vm->dev, 6); 160 nv50_vm_flush_engine(vm->dev, 6);
161 return; 161 return;
162 } 162 }
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index ffbc3d8cf5be..af32daecd1ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,9 +51,7 @@ void
51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) 51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{ 52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 54 struct nouveau_mm *mm = dev_priv->engine.vram.mm;
55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
56 struct nouveau_mm *mm = man->priv;
57 struct nouveau_mm_node *this; 55 struct nouveau_mm_node *this;
58 struct nouveau_mem *mem; 56 struct nouveau_mem *mem;
59 57
@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
84 u32 memtype, struct nouveau_mem **pmem) 82 u32 memtype, struct nouveau_mem **pmem)
85{ 83{
86 struct drm_nouveau_private *dev_priv = dev->dev_private; 84 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 85 struct nouveau_mm *mm = dev_priv->engine.vram.mm;
88 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
89 struct nouveau_mm *mm = man->priv;
90 struct nouveau_mm_node *r; 86 struct nouveau_mm_node *r;
91 struct nouveau_mem *mem; 87 struct nouveau_mem *mem;
92 int comp = (memtype & 0x300) >> 8; 88 int comp = (memtype & 0x300) >> 8;
@@ -190,22 +186,35 @@ int
190nv50_vram_init(struct drm_device *dev) 186nv50_vram_init(struct drm_device *dev)
191{ 187{
192 struct drm_nouveau_private *dev_priv = dev->dev_private; 188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
190 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
191 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
192 u32 rblock, length;
193 193
194 dev_priv->vram_size = nv_rd32(dev, 0x10020c); 194 dev_priv->vram_size = nv_rd32(dev, 0x10020c);
195 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; 195 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
196 dev_priv->vram_size &= 0xffffffff00ULL; 196 dev_priv->vram_size &= 0xffffffff00ULL;
197 197
198 switch (dev_priv->chipset) { 198 /* IGPs, no funky reordering happens here, they don't have VRAM */
199 case 0xaa: 199 if (dev_priv->chipset == 0xaa ||
200 case 0xac: 200 dev_priv->chipset == 0xac ||
201 case 0xaf: 201 dev_priv->chipset == 0xaf) {
202 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; 202 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
203 dev_priv->vram_rblock_size = 4096; 203 rblock = 4096 >> 12;
204 break; 204 } else {
205 default: 205 rblock = nv50_vram_rblock(dev) >> 12;
206 dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
207 break;
208 } 206 }
209 207
210 return 0; 208 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
209
210 return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
211}
212
213void
214nv50_vram_fini(struct drm_device *dev)
215{
216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
218
219 nouveau_mm_fini(&vram->mm);
211} 220}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index 75b809a51748..edece9c616eb 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -138,7 +138,7 @@ nv84_crypt_isr(struct drm_device *dev)
138} 138}
139 139
140static int 140static int
141nv84_crypt_fini(struct drm_device *dev, int engine) 141nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
142{ 142{
143 nv_wr32(dev, 0x102140, 0x00000000); 143 nv_wr32(dev, 0x102140, 0x00000000);
144 return 0; 144 return 0;
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index b86820a61220..8f356d58e409 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -140,7 +140,7 @@ nva3_copy_init(struct drm_device *dev, int engine)
140} 140}
141 141
142static int 142static int
143nva3_copy_fini(struct drm_device *dev, int engine) 143nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
144{ 144{
145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000); 145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146 146
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
index 208fa7ab3f42..dddf006f6d88 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
48 struct nouveau_gpuobj *ctx = NULL; 48 struct nouveau_gpuobj *ctx = NULL;
49 int ret; 49 int ret;
50 50
51 ret = nouveau_gpuobj_new(dev, NULL, 256, 256, 51 ret = nouveau_gpuobj_new(dev, chan, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | 52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx); 53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret) 54 if (ret)
55 return ret; 55 return ret;
56 56
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst)); 57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst)); 58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
59 dev_priv->engine.instmem.flush(dev); 59 dev_priv->engine.instmem.flush(dev);
60 60
61 chan->engctx[engine] = ctx; 61 chan->engctx[engine] = ctx;
@@ -127,7 +127,7 @@ nvc0_copy_init(struct drm_device *dev, int engine)
127} 127}
128 128
129static int 129static int
130nvc0_copy_fini(struct drm_device *dev, int engine) 130nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
131{ 131{
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine); 132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
133 133
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 26a996025dd2..08e6b118f021 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2011 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -23,16 +23,80 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26 26#include "drm.h"
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29
30struct nvc0_fb_priv {
31 struct page *r100c10_page;
32 dma_addr_t r100c10;
33};
34
35static void
36nvc0_fb_destroy(struct drm_device *dev)
37{
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
40 struct nvc0_fb_priv *priv = pfb->priv;
41
42 if (priv->r100c10_page) {
43 pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
44 PCI_DMA_BIDIRECTIONAL);
45 __free_page(priv->r100c10_page);
46 }
47
48 kfree(priv);
49 pfb->priv = NULL;
50}
51
52static int
53nvc0_fb_create(struct drm_device *dev)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
57 struct nvc0_fb_priv *priv;
58
59 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
60 if (!priv)
61 return -ENOMEM;
62 pfb->priv = priv;
63
64 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
65 if (!priv->r100c10_page) {
66 nvc0_fb_destroy(dev);
67 return -ENOMEM;
68 }
69
70 priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
71 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
72 if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
73 nvc0_fb_destroy(dev);
74 return -EFAULT;
75 }
76
77 return 0;
78}
28 79
29int 80int
30nvc0_fb_init(struct drm_device *dev) 81nvc0_fb_init(struct drm_device *dev)
31{ 82{
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
84 struct nvc0_fb_priv *priv;
85 int ret;
86
87 if (!dev_priv->engine.fb.priv) {
88 ret = nvc0_fb_create(dev);
89 if (ret)
90 return ret;
91 }
92 priv = dev_priv->engine.fb.priv;
93
94 nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
32 return 0; 95 return 0;
33} 96}
34 97
35void 98void
36nvc0_fb_takedown(struct drm_device *dev) 99nvc0_fb_takedown(struct drm_device *dev)
37{ 100{
101 nvc0_fb_destroy(dev);
38} 102}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fa5d4c234383..a495e48197ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
159 struct drm_device *dev = nfbdev->dev; 159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel; 161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; 162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
163 int ret, format; 163 int ret, format;
164 164
165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); 165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); 203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 204 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); 205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset)); 206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset)); 207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); 208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 209 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); 210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
249 OUT_RING (chan, info->fix.line_length); 249 OUT_RING (chan, info->fix.line_length);
250 OUT_RING (chan, info->var.xres_virtual); 250 OUT_RING (chan, info->var.xres_virtual);
251 OUT_RING (chan, info->var.yres_virtual); 251 OUT_RING (chan, info->var.yres_virtual);
252 OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); 252 OUT_RING (chan, upper_32_bits(fb->vma.offset));
253 OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); 253 OUT_RING (chan, lower_32_bits(fb->vma.offset));
254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); 254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
255 OUT_RING (chan, format); 255 OUT_RING (chan, format);
256 OUT_RING (chan, 1); 256 OUT_RING (chan, 1);
@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
260 OUT_RING (chan, info->fix.line_length); 260 OUT_RING (chan, info->fix.line_length);
261 OUT_RING (chan, info->var.xres_virtual); 261 OUT_RING (chan, info->var.xres_virtual);
262 OUT_RING (chan, info->var.yres_virtual); 262 OUT_RING (chan, info->var.yres_virtual);
263 OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); 263 OUT_RING (chan, upper_32_bits(fb->vma.offset));
264 OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); 264 OUT_RING (chan, lower_32_bits(fb->vma.offset));
265 FIRE_RING (chan); 265 FIRE_RING (chan);
266 266
267 return 0; 267 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index fb4f5943e01b..6f9f341c3e86 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev)
210 int i; 210 int i;
211 211
212 for (i = 0; i < 128; i++) { 212 for (i = 0; i < 128; i++) {
213 if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1)) 213 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
214 continue; 214 continue;
215 215
216 nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000); 216 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
217 nv_wr32(dev, 0x002634, i); 217 nv_wr32(dev, 0x002634, i);
218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { 218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", 219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index ca6db204d644..5b2f6f420468 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -28,7 +28,34 @@
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_mm.h" 30#include "nouveau_mm.h"
31
31#include "nvc0_graph.h" 32#include "nvc0_graph.h"
33#include "nvc0_grhub.fuc.h"
34#include "nvc0_grgpc.fuc.h"
35
36static void
37nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
38{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
40 nv_rd32(dev, base + 0x400));
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
47}
48
49static void
50nvc0_graph_ctxctl_debug(struct drm_device *dev)
51{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
53 u32 gpc;
54
55 nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
56 for (gpc = 0; gpc < gpcnr; gpc++)
57 nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
58}
32 59
33static int 60static int
34nvc0_graph_load_context(struct nouveau_channel *chan) 61nvc0_graph_load_context(struct nouveau_channel *chan)
@@ -72,24 +99,44 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
72 if (!ctx) 99 if (!ctx)
73 return -ENOMEM; 100 return -ENOMEM;
74 101
75 nvc0_graph_load_context(chan); 102 if (!nouveau_ctxfw) {
76 103 nv_wr32(dev, 0x409840, 0x80000000);
77 nv_wo32(grch->grctx, 0x1c, 1); 104 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
78 nv_wo32(grch->grctx, 0x20, 0); 105 nv_wr32(dev, 0x409504, 0x00000001);
79 nv_wo32(grch->grctx, 0x28, 0); 106 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
80 nv_wo32(grch->grctx, 0x2c, 0); 107 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
81 dev_priv->engine.instmem.flush(dev); 108 nvc0_graph_ctxctl_debug(dev);
82 109 ret = -EBUSY;
83 ret = nvc0_grctx_generate(chan); 110 goto err;
84 if (ret) { 111 }
85 kfree(ctx); 112 } else {
86 return ret; 113 nvc0_graph_load_context(chan);
114
115 nv_wo32(grch->grctx, 0x1c, 1);
116 nv_wo32(grch->grctx, 0x20, 0);
117 nv_wo32(grch->grctx, 0x28, 0);
118 nv_wo32(grch->grctx, 0x2c, 0);
119 dev_priv->engine.instmem.flush(dev);
87 } 120 }
88 121
89 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); 122 ret = nvc0_grctx_generate(chan);
90 if (ret) { 123 if (ret)
91 kfree(ctx); 124 goto err;
92 return ret; 125
126 if (!nouveau_ctxfw) {
127 nv_wr32(dev, 0x409840, 0x80000000);
128 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
129 nv_wr32(dev, 0x409504, 0x00000002);
130 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
131 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
132 nvc0_graph_ctxctl_debug(dev);
133 ret = -EBUSY;
134 goto err;
135 }
136 } else {
137 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
138 if (ret)
139 goto err;
93 } 140 }
94 141
95 for (i = 0; i < priv->grctx_size; i += 4) 142 for (i = 0; i < priv->grctx_size; i += 4)
@@ -97,6 +144,10 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
97 144
98 priv->grctx_vals = ctx; 145 priv->grctx_vals = ctx;
99 return 0; 146 return 0;
147
148err:
149 kfree(ctx);
150 return ret;
100} 151}
101 152
102static int 153static int
@@ -108,50 +159,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
108 int i = 0, gpc, tp, ret; 159 int i = 0, gpc, tp, ret;
109 u32 magic; 160 u32 magic;
110 161
111 ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM, 162 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
112 &grch->unk408004); 163 &grch->unk408004);
113 if (ret) 164 if (ret)
114 return ret; 165 return ret;
115 166
116 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM, 167 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
117 &grch->unk40800c); 168 &grch->unk40800c);
118 if (ret) 169 if (ret)
119 return ret; 170 return ret;
120 171
121 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, 172 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
122 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, 173 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
123 &grch->unk418810); 174 &grch->unk418810);
124 if (ret) 175 if (ret)
125 return ret; 176 return ret;
126 177
127 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM, 178 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
128 &grch->mmio); 179 &grch->mmio);
129 if (ret) 180 if (ret)
130 return ret; 181 return ret;
131 182
132 183
133 nv_wo32(grch->mmio, i++ * 4, 0x00408004); 184 nv_wo32(grch->mmio, i++ * 4, 0x00408004);
134 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); 185 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
135 nv_wo32(grch->mmio, i++ * 4, 0x00408008); 186 nv_wo32(grch->mmio, i++ * 4, 0x00408008);
136 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 187 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
137 188
138 nv_wo32(grch->mmio, i++ * 4, 0x0040800c); 189 nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
139 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); 190 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
140 nv_wo32(grch->mmio, i++ * 4, 0x00408010); 191 nv_wo32(grch->mmio, i++ * 4, 0x00408010);
141 nv_wo32(grch->mmio, i++ * 4, 0x80000000); 192 nv_wo32(grch->mmio, i++ * 4, 0x80000000);
142 193
143 nv_wo32(grch->mmio, i++ * 4, 0x00418810); 194 nv_wo32(grch->mmio, i++ * 4, 0x00418810);
144 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12); 195 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
145 nv_wo32(grch->mmio, i++ * 4, 0x00419848); 196 nv_wo32(grch->mmio, i++ * 4, 0x00419848);
146 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12); 197 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
147 198
148 nv_wo32(grch->mmio, i++ * 4, 0x00419004); 199 nv_wo32(grch->mmio, i++ * 4, 0x00419004);
149 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); 200 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
150 nv_wo32(grch->mmio, i++ * 4, 0x00419008); 201 nv_wo32(grch->mmio, i++ * 4, 0x00419008);
151 nv_wo32(grch->mmio, i++ * 4, 0x00000000); 202 nv_wo32(grch->mmio, i++ * 4, 0x00000000);
152 203
153 nv_wo32(grch->mmio, i++ * 4, 0x00418808); 204 nv_wo32(grch->mmio, i++ * 4, 0x00418808);
154 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); 205 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
155 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 206 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
156 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 207 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
157 208
@@ -159,7 +210,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
159 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 210 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
160 nv_wo32(grch->mmio, i++ * 4, magic); 211 nv_wo32(grch->mmio, i++ * 4, magic);
161 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 212 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
162 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) { 213 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
163 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 214 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
164 nv_wo32(grch->mmio, i++ * 4, reg); 215 nv_wo32(grch->mmio, i++ * 4, reg);
165 nv_wo32(grch->mmio, i++ * 4, magic); 216 nv_wo32(grch->mmio, i++ * 4, magic);
@@ -186,7 +237,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
186 return -ENOMEM; 237 return -ENOMEM;
187 chan->engctx[NVOBJ_ENGINE_GR] = grch; 238 chan->engctx[NVOBJ_ENGINE_GR] = grch;
188 239
189 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 240 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
190 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, 241 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
191 &grch->grctx); 242 &grch->grctx);
192 if (ret) 243 if (ret)
@@ -197,8 +248,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
197 if (ret) 248 if (ret)
198 goto error; 249 goto error;
199 250
200 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4); 251 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
201 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst)); 252 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
202 pinstmem->flush(dev); 253 pinstmem->flush(dev);
203 254
204 if (!priv->grctx_vals) { 255 if (!priv->grctx_vals) {
@@ -210,15 +261,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
210 for (i = 0; i < priv->grctx_size; i += 4) 261 for (i = 0; i < priv->grctx_size; i += 4)
211 nv_wo32(grctx, i, priv->grctx_vals[i / 4]); 262 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
212 263
213 nv_wo32(grctx, 0xf4, 0); 264 if (!nouveau_ctxfw) {
214 nv_wo32(grctx, 0xf8, 0); 265 nv_wo32(grctx, 0x00, grch->mmio_nr);
215 nv_wo32(grctx, 0x10, grch->mmio_nr); 266 nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
216 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); 267 } else {
217 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); 268 nv_wo32(grctx, 0xf4, 0);
218 nv_wo32(grctx, 0x1c, 1); 269 nv_wo32(grctx, 0xf8, 0);
219 nv_wo32(grctx, 0x20, 0); 270 nv_wo32(grctx, 0x10, grch->mmio_nr);
220 nv_wo32(grctx, 0x28, 0); 271 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
221 nv_wo32(grctx, 0x2c, 0); 272 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
273 nv_wo32(grctx, 0x1c, 1);
274 nv_wo32(grctx, 0x20, 0);
275 nv_wo32(grctx, 0x28, 0);
276 nv_wo32(grctx, 0x2c, 0);
277 }
222 pinstmem->flush(dev); 278 pinstmem->flush(dev);
223 return 0; 279 return 0;
224 280
@@ -248,7 +304,7 @@ nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
248} 304}
249 305
250static int 306static int
251nvc0_graph_fini(struct drm_device *dev, int engine) 307nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
252{ 308{
253 return 0; 309 return 0;
254} 310}
@@ -296,6 +352,7 @@ static void
296nvc0_graph_init_gpc_0(struct drm_device *dev) 352nvc0_graph_init_gpc_0(struct drm_device *dev)
297{ 353{
298 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 354 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
355 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
299 u32 data[TP_MAX / 8]; 356 u32 data[TP_MAX / 8];
300 u8 tpnr[GPC_MAX]; 357 u8 tpnr[GPC_MAX];
301 int i, gpc, tpc; 358 int i, gpc, tpc;
@@ -307,13 +364,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
307 * 465: 3/4/4/0 4 7 364 * 465: 3/4/4/0 4 7
308 * 470: 3/3/4/4 5 5 365 * 470: 3/3/4/4 5 5
309 * 480: 3/4/4/4 6 6 366 * 480: 3/4/4/4 6 6
310 *
311 * magicgpc918
312 * 450: 00200000 00000000001000000000000000000000
313 * 460: 00124925 00000000000100100100100100100101
314 * 465: 000ba2e9 00000000000010111010001011101001
315 * 470: 00092493 00000000000010010010010010010011
316 * 480: 00088889 00000000000010001000100010001001
317 */ 367 */
318 368
319 memset(data, 0x00, sizeof(data)); 369 memset(data, 0x00, sizeof(data));
@@ -336,10 +386,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
336 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | 386 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
337 priv->tp_nr[gpc]); 387 priv->tp_nr[gpc]);
338 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); 388 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
339 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918); 389 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
340 } 390 }
341 391
342 nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918); 392 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
343 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); 393 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
344} 394}
345 395
@@ -419,8 +469,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
419static int 469static int
420nvc0_graph_init_ctxctl(struct drm_device *dev) 470nvc0_graph_init_ctxctl(struct drm_device *dev)
421{ 471{
472 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); 473 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
423 u32 r000260; 474 u32 r000260;
475 int i;
476
477 if (!nouveau_ctxfw) {
478 /* load HUB microcode */
479 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
480 nv_wr32(dev, 0x4091c0, 0x01000000);
481 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
482 nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
483
484 nv_wr32(dev, 0x409180, 0x01000000);
485 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
486 if ((i & 0x3f) == 0)
487 nv_wr32(dev, 0x409188, i >> 6);
488 nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
489 }
490
491 /* load GPC microcode */
492 nv_wr32(dev, 0x41a1c0, 0x01000000);
493 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
494 nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
495
496 nv_wr32(dev, 0x41a180, 0x01000000);
497 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
498 if ((i & 0x3f) == 0)
499 nv_wr32(dev, 0x41a188, i >> 6);
500 nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
501 }
502 nv_wr32(dev, 0x000260, r000260);
503
504 /* start HUB ucode running, it'll init the GPCs */
505 nv_wr32(dev, 0x409800, dev_priv->chipset);
506 nv_wr32(dev, 0x40910c, 0x00000000);
507 nv_wr32(dev, 0x409100, 0x00000002);
508 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
509 NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
510 nvc0_graph_ctxctl_debug(dev);
511 return -EBUSY;
512 }
513
514 priv->grctx_size = nv_rd32(dev, 0x409804);
515 return 0;
516 }
424 517
425 /* load fuc microcode */ 518 /* load fuc microcode */
426 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); 519 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
@@ -528,6 +621,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
528} 621}
529 622
530static void 623static void
624nvc0_graph_ctxctl_isr(struct drm_device *dev)
625{
626 u32 ustat = nv_rd32(dev, 0x409c18);
627
628 if (ustat & 0x00000001)
629 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
630 if (ustat & 0x00080000)
631 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
632 if (ustat & ~0x00080001)
633 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
634
635 nvc0_graph_ctxctl_debug(dev);
636 nv_wr32(dev, 0x409c20, ustat);
637}
638
639static void
531nvc0_graph_isr(struct drm_device *dev) 640nvc0_graph_isr(struct drm_device *dev)
532{ 641{
533 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; 642 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
@@ -578,11 +687,7 @@ nvc0_graph_isr(struct drm_device *dev)
578 } 687 }
579 688
580 if (stat & 0x00080000) { 689 if (stat & 0x00080000) {
581 u32 ustat = nv_rd32(dev, 0x409c18); 690 nvc0_graph_ctxctl_isr(dev);
582
583 NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
584
585 nv_wr32(dev, 0x409c20, ustat);
586 nv_wr32(dev, 0x400100, 0x00080000); 691 nv_wr32(dev, 0x400100, 0x00080000);
587 stat &= ~0x00080000; 692 stat &= ~0x00080000;
588 } 693 }
@@ -606,7 +711,7 @@ nvc0_runk140_isr(struct drm_device *dev)
606 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); 711 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
607 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); 712 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
608 713
609 NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); 714 NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
610 units &= ~(1 << unit); 715 units &= ~(1 << unit);
611 } 716 }
612} 717}
@@ -651,10 +756,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
651{ 756{
652 struct nvc0_graph_priv *priv = nv_engine(dev, engine); 757 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
653 758
654 nvc0_graph_destroy_fw(&priv->fuc409c); 759 if (nouveau_ctxfw) {
655 nvc0_graph_destroy_fw(&priv->fuc409d); 760 nvc0_graph_destroy_fw(&priv->fuc409c);
656 nvc0_graph_destroy_fw(&priv->fuc41ac); 761 nvc0_graph_destroy_fw(&priv->fuc409d);
657 nvc0_graph_destroy_fw(&priv->fuc41ad); 762 nvc0_graph_destroy_fw(&priv->fuc41ac);
763 nvc0_graph_destroy_fw(&priv->fuc41ad);
764 }
658 765
659 nouveau_irq_unregister(dev, 12); 766 nouveau_irq_unregister(dev, 12);
660 nouveau_irq_unregister(dev, 25); 767 nouveau_irq_unregister(dev, 25);
@@ -675,13 +782,10 @@ nvc0_graph_create(struct drm_device *dev)
675 struct drm_nouveau_private *dev_priv = dev->dev_private; 782 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct nvc0_graph_priv *priv; 783 struct nvc0_graph_priv *priv;
677 int ret, gpc, i; 784 int ret, gpc, i;
785 u32 fermi;
678 786
679 switch (dev_priv->chipset) { 787 fermi = nvc0_graph_class(dev);
680 case 0xc0: 788 if (!fermi) {
681 case 0xc3:
682 case 0xc4:
683 break;
684 default:
685 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); 789 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
686 return 0; 790 return 0;
687 } 791 }
@@ -701,15 +805,17 @@ nvc0_graph_create(struct drm_device *dev)
701 nouveau_irq_register(dev, 12, nvc0_graph_isr); 805 nouveau_irq_register(dev, 12, nvc0_graph_isr);
702 nouveau_irq_register(dev, 25, nvc0_runk140_isr); 806 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
703 807
704 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || 808 if (nouveau_ctxfw) {
705 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || 809 NV_INFO(dev, "PGRAPH: using external firmware\n");
706 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || 810 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
707 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { 811 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
708 ret = 0; 812 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
709 goto error; 813 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
814 ret = 0;
815 goto error;
816 }
710 } 817 }
711 818
712
713 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); 819 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
714 if (ret) 820 if (ret)
715 goto error; 821 goto error;
@@ -735,25 +841,28 @@ nvc0_graph_create(struct drm_device *dev)
735 case 0xc0: 841 case 0xc0:
736 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ 842 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
737 priv->magic_not_rop_nr = 0x07; 843 priv->magic_not_rop_nr = 0x07;
738 /* filled values up to tp_total, the rest 0 */
739 priv->magicgpc918 = 0x000ba2e9;
740 } else 844 } else
741 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ 845 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
742 priv->magic_not_rop_nr = 0x05; 846 priv->magic_not_rop_nr = 0x05;
743 priv->magicgpc918 = 0x00092493;
744 } else 847 } else
745 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ 848 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
746 priv->magic_not_rop_nr = 0x06; 849 priv->magic_not_rop_nr = 0x06;
747 priv->magicgpc918 = 0x00088889;
748 } 850 }
749 break; 851 break;
750 case 0xc3: /* 450, 4/0/0/0, 2 */ 852 case 0xc3: /* 450, 4/0/0/0, 2 */
751 priv->magic_not_rop_nr = 0x03; 853 priv->magic_not_rop_nr = 0x03;
752 priv->magicgpc918 = 0x00200000;
753 break; 854 break;
754 case 0xc4: /* 460, 3/4/0/0, 4 */ 855 case 0xc4: /* 460, 3/4/0/0, 4 */
755 priv->magic_not_rop_nr = 0x01; 856 priv->magic_not_rop_nr = 0x01;
756 priv->magicgpc918 = 0x00124925; 857 break;
858 case 0xc1: /* 2/0/0/0, 1 */
859 priv->magic_not_rop_nr = 0x01;
860 break;
861 case 0xc8: /* 4/4/3/4, 5 */
862 priv->magic_not_rop_nr = 0x06;
863 break;
864 case 0xce: /* 4/4/0/0, 4 */
865 priv->magic_not_rop_nr = 0x03;
757 break; 866 break;
758 } 867 }
759 868
@@ -763,13 +872,16 @@ nvc0_graph_create(struct drm_device *dev)
763 priv->tp_nr[3], priv->rop_nr); 872 priv->tp_nr[3], priv->rop_nr);
764 /* use 0xc3's values... */ 873 /* use 0xc3's values... */
765 priv->magic_not_rop_nr = 0x03; 874 priv->magic_not_rop_nr = 0x03;
766 priv->magicgpc918 = 0x00200000;
767 } 875 }
768 876
769 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 877 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
770 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 878 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
771 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); 879 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
772 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 880 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
881 if (fermi >= 0x9197)
882 NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
883 if (fermi >= 0x9297)
884 NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
773 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ 885 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
774 return 0; 886 return 0;
775 887
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
new file mode 100644
index 000000000000..2a4b6dc8f9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -0,0 +1,400 @@
1/* fuc microcode util functions for nvc0 PGRAPH
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
27define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
28
29ifdef(`include_code', `
30// Error codes
31define(`E_BAD_COMMAND', 0x01)
32define(`E_CMD_OVERFLOW', 0x02)
33
34// Util macros to help with debugging ucode hangs etc
35define(`T_WAIT', 0)
36define(`T_MMCTX', 1)
37define(`T_STRWAIT', 2)
38define(`T_STRINIT', 3)
39define(`T_AUTO', 4)
40define(`T_CHAN', 5)
41define(`T_LOAD', 6)
42define(`T_SAVE', 7)
43define(`T_LCHAN', 8)
44define(`T_LCTXH', 9)
45
46define(`trace_set', `
47 mov $r8 0x83c
48 shl b32 $r8 6
49 clear b32 $r9
50 bset $r9 $1
51 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
52')
53
54define(`trace_clr', `
55 mov $r8 0x85c
56 shl b32 $r8 6
57 clear b32 $r9
58 bset $r9 $1
59 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
60')
61
62// queue_put - add request to queue
63//
64// In : $r13 queue pointer
65// $r14 command
66// $r15 data
67//
68queue_put:
69 // make sure we have space..
70 ld b32 $r8 D[$r13 + 0x0] // GET
71 ld b32 $r9 D[$r13 + 0x4] // PUT
72 xor $r8 8
73 cmpu b32 $r8 $r9
74 bra ne queue_put_next
75 mov $r15 E_CMD_OVERFLOW
76 call error
77 ret
78
79 // store cmd/data on queue
80 queue_put_next:
81 and $r8 $r9 7
82 shl b32 $r8 3
83 add b32 $r8 $r13
84 add b32 $r8 8
85 st b32 D[$r8 + 0x0] $r14
86 st b32 D[$r8 + 0x4] $r15
87
88 // update PUT
89 add b32 $r9 1
90 and $r9 0xf
91 st b32 D[$r13 + 0x4] $r9
92 ret
93
94// queue_get - fetch request from queue
95//
96// In : $r13 queue pointer
97//
98// Out: $p1 clear on success (data available)
99// $r14 command
100// $r15 data
101//
102queue_get:
103 bset $flags $p1
104 ld b32 $r8 D[$r13 + 0x0] // GET
105 ld b32 $r9 D[$r13 + 0x4] // PUT
106 cmpu b32 $r8 $r9
107 bra e queue_get_done
108 // fetch first cmd/data pair
109 and $r9 $r8 7
110 shl b32 $r9 3
111 add b32 $r9 $r13
112 add b32 $r9 8
113 ld b32 $r14 D[$r9 + 0x0]
114 ld b32 $r15 D[$r9 + 0x4]
115
116 // update GET
117 add b32 $r8 1
118 and $r8 0xf
119 st b32 D[$r13 + 0x0] $r8
120 bclr $flags $p1
121queue_get_done:
122 ret
123
124// nv_rd32 - read 32-bit value from nv register
125//
126// In : $r14 register
127// Out: $r15 value
128//
129nv_rd32:
130 mov $r11 0x728
131 shl b32 $r11 6
132 mov b32 $r12 $r14
133 bset $r12 31 // MMIO_CTRL_PENDING
134 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
135 nv_rd32_wait:
136 iord $r12 I[$r11 + 0x000]
137 xbit $r12 $r12 31
138 bra ne nv_rd32_wait
139 mov $r10 6 // DONE_MMIO_RD
140 call wait_doneo
141 iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
142 ret
143
144// nv_wr32 - write 32-bit value to nv register
145//
146// In : $r14 register
147// $r15 value
148//
149nv_wr32:
150 mov $r11 0x728
151 shl b32 $r11 6
152 iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
153 mov b32 $r12 $r14
154 bset $r12 31 // MMIO_CTRL_PENDING
155 bset $r12 30 // MMIO_CTRL_WRITE
156 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
157 nv_wr32_wait:
158 iord $r12 I[$r11 + 0x000]
159 xbit $r12 $r12 31
160 bra ne nv_wr32_wait
161 ret
162
163// (re)set watchdog timer
164//
165// In : $r15 timeout
166//
167watchdog_reset:
168 mov $r8 0x430
169 shl b32 $r8 6
170 bset $r15 31
171 iowr I[$r8 + 0x000] $r15
172 ret
173
174// clear watchdog timer
175watchdog_clear:
176 mov $r8 0x430
177 shl b32 $r8 6
178 iowr I[$r8 + 0x000] $r0
179 ret
180
181// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
182//
183// In : $r10 bit to wait on
184//
185define(`wait_done', `
186$1:
187 trace_set(T_WAIT);
188 mov $r8 0x818
189 shl b32 $r8 6
190 iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
191 wait_done_$1:
192 mov $r8 0x400
193 shl b32 $r8 6
194 iord $r8 I[$r8 + 0x000] // DONE
195 xbit $r8 $r8 $r10
196 bra $2 wait_done_$1
197 trace_clr(T_WAIT)
198 ret
199')
200wait_done(wait_donez, ne)
201wait_done(wait_doneo, e)
202
203// mmctx_size - determine size of a mmio list transfer
204//
205// In : $r14 mmio list head
206// $r15 mmio list tail
207// Out: $r15 transfer size (in bytes)
208//
209mmctx_size:
210 clear b32 $r9
211 nv_mmctx_size_loop:
212 ld b32 $r8 D[$r14]
213 shr b32 $r8 26
214 add b32 $r8 1
215 shl b32 $r8 2
216 add b32 $r9 $r8
217 add b32 $r14 4
218 cmpu b32 $r14 $r15
219 bra ne nv_mmctx_size_loop
220 mov b32 $r15 $r9
221 ret
222
223// mmctx_xfer - execute a list of mmio transfers
224//
225// In : $r10 flags
226// bit 0: direction (0 = save, 1 = load)
227// bit 1: set if first transfer
228// bit 2: set if last transfer
229// $r11 base
230// $r12 mmio list head
231// $r13 mmio list tail
232// $r14 multi_stride
233// $r15 multi_mask
234//
235mmctx_xfer:
236 trace_set(T_MMCTX)
237 mov $r8 0x710
238 shl b32 $r8 6
239 clear b32 $r9
240 or $r11 $r11
241 bra e mmctx_base_disabled
242 iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
243 bset $r9 0 // BASE_EN
244 mmctx_base_disabled:
245 or $r14 $r14
246 bra e mmctx_multi_disabled
247 iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
248 iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
249 bset $r9 1 // MULTI_EN
250 mmctx_multi_disabled:
251 add b32 $r8 0x100
252
253 xbit $r11 $r10 0
254 shl b32 $r11 16 // DIR
255 bset $r11 12 // QLIMIT = 0x10
256 xbit $r14 $r10 1
257 shl b32 $r14 17
258 or $r11 $r14 // START_TRIGGER
259 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
260
261 // loop over the mmio list, and send requests to the hw
262 mmctx_exec_loop:
263 // wait for space in mmctx queue
264 mmctx_wait_free:
265 iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
266 and $r14 0x1f
267 bra e mmctx_wait_free
268
269 // queue up an entry
270 ld b32 $r14 D[$r12]
271 or $r14 $r9
272 iowr I[$r8 + 0x300] $r14
273 add b32 $r12 4
274 cmpu b32 $r12 $r13
275 bra ne mmctx_exec_loop
276
277 xbit $r11 $r10 2
278 bra ne mmctx_stop
279 // wait for queue to empty
280 mmctx_fini_wait:
281 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
282 and $r11 0x1f
283 cmpu b32 $r11 0x10
284 bra ne mmctx_fini_wait
285 mov $r10 2 // DONE_MMCTX
286 call wait_donez
287 bra mmctx_done
288 mmctx_stop:
289 xbit $r11 $r10 0
290 shl b32 $r11 16 // DIR
291 bset $r11 12 // QLIMIT = 0x10
292 bset $r11 18 // STOP_TRIGGER
293 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
294 mmctx_stop_wait:
295 // wait for STOP_TRIGGER to clear
296 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
297 xbit $r11 $r11 18
298 bra ne mmctx_stop_wait
299 mmctx_done:
300 trace_clr(T_MMCTX)
301 ret
302
303// Wait for DONE_STRAND
304//
305strand_wait:
306 push $r10
307 mov $r10 2
308 call wait_donez
309 pop $r10
310 ret
311
312// unknown - call before issuing strand commands
313//
314strand_pre:
315 mov $r8 0x4afc
316 sethi $r8 0x20000
317 mov $r9 0xc
318 iowr I[$r8] $r9
319 call strand_wait
320 ret
321
322// unknown - call after issuing strand commands
323//
324strand_post:
325 mov $r8 0x4afc
326 sethi $r8 0x20000
327 mov $r9 0xd
328 iowr I[$r8] $r9
329 call strand_wait
330 ret
331
332// Selects strand set?!
333//
334// In: $r14 id
335//
336strand_set:
337 mov $r10 0x4ffc
338 sethi $r10 0x20000
339 sub b32 $r11 $r10 0x500
340 mov $r12 0xf
341 iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
342 mov $r12 0xb
343 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
344 call strand_wait
345 iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
346 mov $r12 0xa
347 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
348 call strand_wait
349 ret
350
351// Initialise strand context data
352//
353// In : $r15 context base
354// Out: $r15 context size (in bytes)
355//
356// Strandset(?) 3 hardcoded currently
357//
358strand_ctx_init:
359 trace_set(T_STRINIT)
360 call strand_pre
361 mov $r14 3
362 call strand_set
363 mov $r10 0x46fc
364 sethi $r10 0x20000
365 add b32 $r11 $r10 0x400
366 iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
367 mov $r12 1
368 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
369 call strand_wait
370 sub b32 $r12 $r0 1
371 iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
372 mov $r12 2
373 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
374 call strand_wait
375 call strand_post
376
377 // read the size of each strand, poke the context offset of
378 // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
379 // about it later then.
380 mov $r8 0x880
381 shl b32 $r8 6
382 iord $r9 I[$r8 + 0x000] // STRANDS
383 add b32 $r8 0x2200
384 shr b32 $r14 $r15 8
385 ctx_init_strand_loop:
386 iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
387 iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
388 iord $r10 I[$r8 + 0x200] // STRAND_SIZE
389 shr b32 $r10 6
390 add b32 $r10 1
391 add b32 $r14 $r10
392 add b32 $r8 4
393 sub b32 $r9 1
394 bra ne ctx_init_strand_loop
395
396 shl b32 $r14 8
397 sub b32 $r15 $r14 $r15
398 trace_clr(T_STRINIT)
399 ret
400')
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index f5d184e0689d..55689e997286 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -57,8 +57,7 @@ struct nvc0_graph_priv {
57 struct nouveau_gpuobj *unk4188b4; 57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8; 58 struct nouveau_gpuobj *unk4188b8;
59 59
60 u8 magic_not_rop_nr; 60 u8 magic_not_rop_nr;
61 u32 magicgpc918;
62}; 61};
63 62
64struct nvc0_graph_chan { 63struct nvc0_graph_chan {
@@ -72,4 +71,25 @@ struct nvc0_graph_chan {
72 71
73int nvc0_grctx_generate(struct nouveau_channel *); 72int nvc0_grctx_generate(struct nouveau_channel *);
74 73
74/* nvc0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nvc0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xc0:
82 case 0xc3:
83 case 0xc4:
84 case 0xce: /* guess, mmio trace shows only 0x9097 state */
85 return 0x9097;
86 case 0xc1:
87 return 0x9197;
88 case 0xc8:
89 return 0x9297;
90 default:
91 return 0;
92 }
93}
94
75#endif 95#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 6df066114133..31018eaf5279 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
45static void 45static void
46nvc0_grctx_generate_9097(struct drm_device *dev) 46nvc0_grctx_generate_9097(struct drm_device *dev)
47{ 47{
48 u32 fermi = nvc0_graph_class(dev);
49 u32 mthd;
50
48 nv_mthd(dev, 0x9097, 0x0800, 0x00000000); 51 nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
49 nv_mthd(dev, 0x9097, 0x0840, 0x00000000); 52 nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
50 nv_mthd(dev, 0x9097, 0x0880, 0x00000000); 53 nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
824 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); 827 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
825 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); 828 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
826 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); 829 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
827 nv_mthd(dev, 0x9097, 0x3400, 0x00000000); 830 if (fermi == 0x9097) {
828 nv_mthd(dev, 0x9097, 0x3404, 0x00000000); 831 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
829 nv_mthd(dev, 0x9097, 0x3408, 0x00000000); 832 nv_mthd(dev, 0x9097, mthd, 0x00000000);
830 nv_mthd(dev, 0x9097, 0x340c, 0x00000000); 833 }
831 nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
832 nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
833 nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
834 nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
835 nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
836 nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
837 nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
838 nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
839 nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
840 nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
841 nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
842 nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
843 nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
844 nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
845 nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
846 nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
847 nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
848 nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
849 nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
850 nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
851 nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
852 nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
853 nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
854 nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
855 nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
856 nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
857 nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
858 nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
859 nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
860 nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
861 nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
862 nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
863 nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
864 nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
865 nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
866 nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
867 nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
868 nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
869 nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
870 nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
871 nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
872 nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
873 nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
874 nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
875 nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
876 nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
877 nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
878 nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
879 nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
880 nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
881 nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
882 nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
883 nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
884 nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
885 nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
886 nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
887 nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
888 nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
889 nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
890 nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
891 nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
892 nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
893 nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
894 nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
895 nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
896 nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
897 nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
898 nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
899 nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
900 nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
901 nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
902 nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
903 nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
904 nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
905 nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
906 nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
907 nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
908 nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
909 nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
910 nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
911 nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
912 nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
913 nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
914 nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
915 nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
916 nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
917 nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
918 nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
919 nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
920 nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
921 nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
922 nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
923 nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
924 nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
925 nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
926 nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
927 nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
928 nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
929 nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
930 nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
931 nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
932 nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
933 nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
934 nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
935 nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
936 nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
937 nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
938 nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
939 nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
940 nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
941 nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
942 nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
943 nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
944 nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
945 nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
946 nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
947 nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
948 nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
949 nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
950 nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
951 nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
952 nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
953 nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
954 nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
955 nv_mthd(dev, 0x9097, 0x030c, 0x00000001); 834 nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
956 nv_mthd(dev, 0x9097, 0x1944, 0x00000000); 835 nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
957 nv_mthd(dev, 0x9097, 0x1514, 0x00000000); 836 nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
@@ -1321,6 +1200,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
1321} 1200}
1322 1201
1323static void 1202static void
1203nvc0_grctx_generate_9197(struct drm_device *dev)
1204{
1205 u32 fermi = nvc0_graph_class(dev);
1206 u32 mthd;
1207
1208 if (fermi == 0x9197) {
1209 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1210 nv_mthd(dev, 0x9197, mthd, 0x00000000);
1211 }
1212 nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
1213}
1214
1215static void
1216nvc0_grctx_generate_9297(struct drm_device *dev)
1217{
1218 u32 fermi = nvc0_graph_class(dev);
1219 u32 mthd;
1220
1221 if (fermi == 0x9297) {
1222 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1223 nv_mthd(dev, 0x9297, mthd, 0x00000000);
1224 }
1225 nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
1226 nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
1227 nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
1228 nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
1229 nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
1230 nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
1231}
1232
1233static void
1324nvc0_grctx_generate_902d(struct drm_device *dev) 1234nvc0_grctx_generate_902d(struct drm_device *dev)
1325{ 1235{
1326 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); 1236 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev)
1559static void 1469static void
1560nvc0_grctx_generate_shaders(struct drm_device *dev) 1470nvc0_grctx_generate_shaders(struct drm_device *dev)
1561{ 1471{
1562 nv_wr32(dev, 0x405800, 0x078000bf); 1472 struct drm_nouveau_private *dev_priv = dev->dev_private;
1563 nv_wr32(dev, 0x405830, 0x02180000); 1473
1474 if (dev_priv->chipset != 0xc1) {
1475 nv_wr32(dev, 0x405800, 0x078000bf);
1476 nv_wr32(dev, 0x405830, 0x02180000);
1477 } else {
1478 nv_wr32(dev, 0x405800, 0x0f8000bf);
1479 nv_wr32(dev, 0x405830, 0x02180218);
1480 }
1564 nv_wr32(dev, 0x405834, 0x00000000); 1481 nv_wr32(dev, 0x405834, 0x00000000);
1565 nv_wr32(dev, 0x405838, 0x00000000); 1482 nv_wr32(dev, 0x405838, 0x00000000);
1566 nv_wr32(dev, 0x405854, 0x00000000); 1483 nv_wr32(dev, 0x405854, 0x00000000);
@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev)
1586static void 1503static void
1587nvc0_grctx_generate_unk64xx(struct drm_device *dev) 1504nvc0_grctx_generate_unk64xx(struct drm_device *dev)
1588{ 1505{
1506 struct drm_nouveau_private *dev_priv = dev->dev_private;
1507
1589 nv_wr32(dev, 0x4064a8, 0x00000000); 1508 nv_wr32(dev, 0x4064a8, 0x00000000);
1590 nv_wr32(dev, 0x4064ac, 0x00003fff); 1509 nv_wr32(dev, 0x4064ac, 0x00003fff);
1591 nv_wr32(dev, 0x4064b4, 0x00000000); 1510 nv_wr32(dev, 0x4064b4, 0x00000000);
1592 nv_wr32(dev, 0x4064b8, 0x00000000); 1511 nv_wr32(dev, 0x4064b8, 0x00000000);
1512 if (dev_priv->chipset == 0xc1) {
1513 nv_wr32(dev, 0x4064c0, 0x80140078);
1514 nv_wr32(dev, 0x4064c4, 0x0086ffff);
1515 }
1593} 1516}
1594 1517
1595static void 1518static void
@@ -1622,21 +1545,14 @@ static void
1622nvc0_grctx_generate_rop(struct drm_device *dev) 1545nvc0_grctx_generate_rop(struct drm_device *dev)
1623{ 1546{
1624 struct drm_nouveau_private *dev_priv = dev->dev_private; 1547 struct drm_nouveau_private *dev_priv = dev->dev_private;
1548 int chipset = dev_priv->chipset;
1625 1549
1626 /* ROPC_BROADCAST */ 1550 /* ROPC_BROADCAST */
1627 nv_wr32(dev, 0x408800, 0x02802a3c); 1551 nv_wr32(dev, 0x408800, 0x02802a3c);
1628 nv_wr32(dev, 0x408804, 0x00000040); 1552 nv_wr32(dev, 0x408804, 0x00000040);
1629 nv_wr32(dev, 0x408808, 0x0003e00d); 1553 nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
1630 switch (dev_priv->chipset) { 1554 nv_wr32(dev, 0x408900, 0x3080b801);
1631 case 0xc0: 1555 nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
1632 nv_wr32(dev, 0x408900, 0x0080b801);
1633 break;
1634 case 0xc3:
1635 case 0xc4:
1636 nv_wr32(dev, 0x408900, 0x3080b801);
1637 break;
1638 }
1639 nv_wr32(dev, 0x408904, 0x02000001);
1640 nv_wr32(dev, 0x408908, 0x00c80929); 1556 nv_wr32(dev, 0x408908, 0x00c80929);
1641 nv_wr32(dev, 0x40890c, 0x00000000); 1557 nv_wr32(dev, 0x40890c, 0x00000000);
1642 nv_wr32(dev, 0x408980, 0x0000011d); 1558 nv_wr32(dev, 0x408980, 0x0000011d);
@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
1645static void 1561static void
1646nvc0_grctx_generate_gpc(struct drm_device *dev) 1562nvc0_grctx_generate_gpc(struct drm_device *dev)
1647{ 1563{
1564 struct drm_nouveau_private *dev_priv = dev->dev_private;
1565 int chipset = dev_priv->chipset;
1648 int i; 1566 int i;
1649 1567
1650 /* GPC_BROADCAST */ 1568 /* GPC_BROADCAST */
@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1676 nv_wr32(dev, 0x41880c, 0x00000000); 1594 nv_wr32(dev, 0x41880c, 0x00000000);
1677 nv_wr32(dev, 0x418810, 0x00000000); 1595 nv_wr32(dev, 0x418810, 0x00000000);
1678 nv_wr32(dev, 0x418828, 0x00008442); 1596 nv_wr32(dev, 0x418828, 0x00008442);
1679 nv_wr32(dev, 0x418830, 0x00000001); 1597 nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
1680 nv_wr32(dev, 0x4188d8, 0x00000008); 1598 nv_wr32(dev, 0x4188d8, 0x00000008);
1681 nv_wr32(dev, 0x4188e0, 0x01000000); 1599 nv_wr32(dev, 0x4188e0, 0x01000000);
1682 nv_wr32(dev, 0x4188e8, 0x00000000); 1600 nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1684 nv_wr32(dev, 0x4188f0, 0x00000000); 1602 nv_wr32(dev, 0x4188f0, 0x00000000);
1685 nv_wr32(dev, 0x4188f4, 0x00000000); 1603 nv_wr32(dev, 0x4188f4, 0x00000000);
1686 nv_wr32(dev, 0x4188f8, 0x00000000); 1604 nv_wr32(dev, 0x4188f8, 0x00000000);
1687 nv_wr32(dev, 0x4188fc, 0x00100000); 1605 nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
1688 nv_wr32(dev, 0x41891c, 0x00ff00ff); 1606 nv_wr32(dev, 0x41891c, 0x00ff00ff);
1689 nv_wr32(dev, 0x418924, 0x00000000); 1607 nv_wr32(dev, 0x418924, 0x00000000);
1690 nv_wr32(dev, 0x418928, 0x00ffff00); 1608 nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
1715 nv_wr32(dev, 0x418c24, 0x00000000); 1633 nv_wr32(dev, 0x418c24, 0x00000000);
1716 nv_wr32(dev, 0x418c28, 0x00000000); 1634 nv_wr32(dev, 0x418c28, 0x00000000);
1717 nv_wr32(dev, 0x418c2c, 0x00000000); 1635 nv_wr32(dev, 0x418c2c, 0x00000000);
1636 if (chipset == 0xc1)
1637 nv_wr32(dev, 0x418c6c, 0x00000001);
1718 nv_wr32(dev, 0x418c80, 0x20200004); 1638 nv_wr32(dev, 0x418c80, 0x20200004);
1719 nv_wr32(dev, 0x418c8c, 0x00000001); 1639 nv_wr32(dev, 0x418c8c, 0x00000001);
1720 nv_wr32(dev, 0x419000, 0x00000780); 1640 nv_wr32(dev, 0x419000, 0x00000780);
@@ -1727,10 +1647,13 @@ static void
1727nvc0_grctx_generate_tp(struct drm_device *dev) 1647nvc0_grctx_generate_tp(struct drm_device *dev)
1728{ 1648{
1729 struct drm_nouveau_private *dev_priv = dev->dev_private; 1649 struct drm_nouveau_private *dev_priv = dev->dev_private;
1650 int chipset = dev_priv->chipset;
1730 1651
1731 /* GPC_BROADCAST.TP_BROADCAST */ 1652 /* GPC_BROADCAST.TP_BROADCAST */
1653 nv_wr32(dev, 0x419818, 0x00000000);
1654 nv_wr32(dev, 0x41983c, 0x00038bc7);
1732 nv_wr32(dev, 0x419848, 0x00000000); 1655 nv_wr32(dev, 0x419848, 0x00000000);
1733 nv_wr32(dev, 0x419864, 0x0000012a); 1656 nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
1734 nv_wr32(dev, 0x419888, 0x00000000); 1657 nv_wr32(dev, 0x419888, 0x00000000);
1735 nv_wr32(dev, 0x419a00, 0x000001f0); 1658 nv_wr32(dev, 0x419a00, 0x000001f0);
1736 nv_wr32(dev, 0x419a04, 0x00000001); 1659 nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1740 nv_wr32(dev, 0x419a14, 0x00000200); 1663 nv_wr32(dev, 0x419a14, 0x00000200);
1741 nv_wr32(dev, 0x419a1c, 0x00000000); 1664 nv_wr32(dev, 0x419a1c, 0x00000000);
1742 nv_wr32(dev, 0x419a20, 0x00000800); 1665 nv_wr32(dev, 0x419a20, 0x00000800);
1743 if (dev_priv->chipset != 0xc0) 1666 if (chipset != 0xc0 && chipset != 0xc8)
1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ 1667 nv_wr32(dev, 0x00419ac4, 0x0007f440);
1745 nv_wr32(dev, 0x419b00, 0x0a418820); 1668 nv_wr32(dev, 0x419b00, 0x0a418820);
1746 nv_wr32(dev, 0x419b04, 0x062080e6); 1669 nv_wr32(dev, 0x419b04, 0x062080e6);
1747 nv_wr32(dev, 0x419b08, 0x020398a4); 1670 nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1749 nv_wr32(dev, 0x419b10, 0x0a418820); 1672 nv_wr32(dev, 0x419b10, 0x0a418820);
1750 nv_wr32(dev, 0x419b14, 0x000000e6); 1673 nv_wr32(dev, 0x419b14, 0x000000e6);
1751 nv_wr32(dev, 0x419bd0, 0x00900103); 1674 nv_wr32(dev, 0x419bd0, 0x00900103);
1752 nv_wr32(dev, 0x419be0, 0x00000001); 1675 nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
1753 nv_wr32(dev, 0x419be4, 0x00000000); 1676 nv_wr32(dev, 0x419be4, 0x00000000);
1754 nv_wr32(dev, 0x419c00, 0x00000002); 1677 nv_wr32(dev, 0x419c00, 0x00000002);
1755 nv_wr32(dev, 0x419c04, 0x00000006); 1678 nv_wr32(dev, 0x419c04, 0x00000006);
1756 nv_wr32(dev, 0x419c08, 0x00000002); 1679 nv_wr32(dev, 0x419c08, 0x00000002);
1757 nv_wr32(dev, 0x419c20, 0x00000000); 1680 nv_wr32(dev, 0x419c20, 0x00000000);
1758 nv_wr32(dev, 0x419cbc, 0x28137606); 1681 nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048
1759 nv_wr32(dev, 0x419ce8, 0x00000000); 1682 nv_wr32(dev, 0x419ce8, 0x00000000);
1760 nv_wr32(dev, 0x419cf4, 0x00000183); 1683 nv_wr32(dev, 0x419cf4, 0x00000183);
1761 nv_wr32(dev, 0x419d20, 0x02180000); 1684 nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
1762 nv_wr32(dev, 0x419d24, 0x00001fff); 1685 nv_wr32(dev, 0x419d24, 0x00001fff);
1686 if (chipset == 0xc1)
1687 nv_wr32(dev, 0x419d44, 0x02180218);
1763 nv_wr32(dev, 0x419e04, 0x00000000); 1688 nv_wr32(dev, 0x419e04, 0x00000000);
1764 nv_wr32(dev, 0x419e08, 0x00000000); 1689 nv_wr32(dev, 0x419e08, 0x00000000);
1765 nv_wr32(dev, 0x419e0c, 0x00000000); 1690 nv_wr32(dev, 0x419e0c, 0x00000000);
@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
1785 nv_wr32(dev, 0x419e8c, 0x00000000); 1710 nv_wr32(dev, 0x419e8c, 0x00000000);
1786 nv_wr32(dev, 0x419e90, 0x00000000); 1711 nv_wr32(dev, 0x419e90, 0x00000000);
1787 nv_wr32(dev, 0x419e98, 0x00000000); 1712 nv_wr32(dev, 0x419e98, 0x00000000);
1788 if (dev_priv->chipset != 0xc0) 1713 if (chipset != 0xc0 && chipset != 0xc8)
1789 nv_wr32(dev, 0x419ee0, 0x00011110); 1714 nv_wr32(dev, 0x419ee0, 0x00011110);
1790 nv_wr32(dev, 0x419f50, 0x00000000); 1715 nv_wr32(dev, 0x419f50, 0x00000000);
1791 nv_wr32(dev, 0x419f54, 0x00000000); 1716 nv_wr32(dev, 0x419f54, 0x00000000);
1792 if (dev_priv->chipset != 0xc0) 1717 if (chipset != 0xc0 && chipset != 0xc8)
1793 nv_wr32(dev, 0x419f58, 0x00000000); 1718 nv_wr32(dev, 0x419f58, 0x00000000);
1794} 1719}
1795 1720
@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1801 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 1726 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
1802 struct drm_device *dev = chan->dev; 1727 struct drm_device *dev = chan->dev;
1803 int i, gpc, tp, id; 1728 int i, gpc, tp, id;
1729 u32 fermi = nvc0_graph_class(dev);
1804 u32 r000260, tmp; 1730 u32 r000260, tmp;
1805 1731
1806 r000260 = nv_rd32(dev, 0x000260); 1732 r000260 = nv_rd32(dev, 0x000260);
@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1857 nv_wr32(dev, 0x40587c, 0x00000000); 1783 nv_wr32(dev, 0x40587c, 0x00000000);
1858 1784
1859 if (1) { 1785 if (1) {
1860 const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; 1786 const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0,
1787 16, 0, 0, 0, 0, 0, 8, 0 };
1861 u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; 1788 u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
1862 u8 tpnr[GPC_MAX]; 1789 u8 tpnr[GPC_MAX];
1863 u8 data[32]; 1790 u8 data[TP_MAX];
1864 1791
1865 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1792 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1866 memset(data, 0x1f, sizeof(data)); 1793 memset(data, 0x1f, sizeof(data));
@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
2633 nv_icmd(dev, 0x0000053f, 0xffff0000); 2560 nv_icmd(dev, 0x0000053f, 0xffff0000);
2634 nv_icmd(dev, 0x00000585, 0x0000003f); 2561 nv_icmd(dev, 0x00000585, 0x0000003f);
2635 nv_icmd(dev, 0x00000576, 0x00000003); 2562 nv_icmd(dev, 0x00000576, 0x00000003);
2563 if (dev_priv->chipset == 0xc1)
2564 nv_icmd(dev, 0x0000057b, 0x00000059);
2636 nv_icmd(dev, 0x00000586, 0x00000040); 2565 nv_icmd(dev, 0x00000586, 0x00000040);
2637 nv_icmd(dev, 0x00000582, 0x00000080); 2566 nv_icmd(dev, 0x00000582, 0x00000080);
2638 nv_icmd(dev, 0x00000583, 0x00000080); 2567 nv_icmd(dev, 0x00000583, 0x00000080);
@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
2865 nv_wr32(dev, 0x404154, 0x00000400); 2794 nv_wr32(dev, 0x404154, 0x00000400);
2866 2795
2867 nvc0_grctx_generate_9097(dev); 2796 nvc0_grctx_generate_9097(dev);
2797 if (fermi >= 0x9197)
2798 nvc0_grctx_generate_9197(dev);
2799 if (fermi >= 0x9297)
2800 nvc0_grctx_generate_9297(dev);
2868 nvc0_grctx_generate_902d(dev); 2801 nvc0_grctx_generate_902d(dev);
2869 nvc0_grctx_generate_9039(dev); 2802 nvc0_grctx_generate_9039(dev);
2870 nvc0_grctx_generate_90c0(dev); 2803 nvc0_grctx_generate_90c0(dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
new file mode 100644
index 000000000000..0ec2add72a76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -0,0 +1,474 @@
1/* fuc microcode for nvc0 PGRAPH/GPC
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
28 */
29
30/* TODO
31 * - bracket certain functions with scratch writes, useful for debugging
32 * - watchdog timer around ctx operations
33 */
34
35.section nvc0_grgpc_data
36include(`nvc0_graph.fuc')
37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0
39gpc_mmio_list_tail: .b32 0
40
41tpc_count: .b32 0
42tpc_mask: .b32 0
43tpc_mmio_list_head: .b32 0
44tpc_mmio_list_tail: .b32 0
45
46cmd_queue: queue_init
47
48// chipset descriptions
49chipsets:
50.b8 0xc0 0 0 0
51.b16 nvc0_gpc_mmio_head
52.b16 nvc0_gpc_mmio_tail
53.b16 nvc0_tpc_mmio_head
54.b16 nvc0_tpc_mmio_tail
55.b8 0xc1 0 0 0
56.b16 nvc0_gpc_mmio_head
57.b16 nvc1_gpc_mmio_tail
58.b16 nvc0_tpc_mmio_head
59.b16 nvc1_tpc_mmio_tail
60.b8 0xc3 0 0 0
61.b16 nvc0_gpc_mmio_head
62.b16 nvc0_gpc_mmio_tail
63.b16 nvc0_tpc_mmio_head
64.b16 nvc3_tpc_mmio_tail
65.b8 0xc4 0 0 0
66.b16 nvc0_gpc_mmio_head
67.b16 nvc0_gpc_mmio_tail
68.b16 nvc0_tpc_mmio_head
69.b16 nvc3_tpc_mmio_tail
70.b8 0xc8 0 0 0
71.b16 nvc0_gpc_mmio_head
72.b16 nvc0_gpc_mmio_tail
73.b16 nvc0_tpc_mmio_head
74.b16 nvc0_tpc_mmio_tail
75.b8 0xce 0 0 0
76.b16 nvc0_gpc_mmio_head
77.b16 nvc0_gpc_mmio_tail
78.b16 nvc0_tpc_mmio_head
79.b16 nvc3_tpc_mmio_tail
80.b8 0 0 0 0
81
82// GPC mmio lists
83nvc0_gpc_mmio_head:
84mmctx_data(0x000380, 1)
85mmctx_data(0x000400, 6)
86mmctx_data(0x000450, 9)
87mmctx_data(0x000600, 1)
88mmctx_data(0x000684, 1)
89mmctx_data(0x000700, 5)
90mmctx_data(0x000800, 1)
91mmctx_data(0x000808, 3)
92mmctx_data(0x000828, 1)
93mmctx_data(0x000830, 1)
94mmctx_data(0x0008d8, 1)
95mmctx_data(0x0008e0, 1)
96mmctx_data(0x0008e8, 6)
97mmctx_data(0x00091c, 1)
98mmctx_data(0x000924, 3)
99mmctx_data(0x000b00, 1)
100mmctx_data(0x000b08, 6)
101mmctx_data(0x000bb8, 1)
102mmctx_data(0x000c08, 1)
103mmctx_data(0x000c10, 8)
104mmctx_data(0x000c80, 1)
105mmctx_data(0x000c8c, 1)
106mmctx_data(0x001000, 3)
107mmctx_data(0x001014, 1)
108nvc0_gpc_mmio_tail:
109mmctx_data(0x000c6c, 1);
110nvc1_gpc_mmio_tail:
111
112// TPC mmio lists
113nvc0_tpc_mmio_head:
114mmctx_data(0x000018, 1)
115mmctx_data(0x00003c, 1)
116mmctx_data(0x000048, 1)
117mmctx_data(0x000064, 1)
118mmctx_data(0x000088, 1)
119mmctx_data(0x000200, 6)
120mmctx_data(0x00021c, 2)
121mmctx_data(0x000300, 6)
122mmctx_data(0x0003d0, 1)
123mmctx_data(0x0003e0, 2)
124mmctx_data(0x000400, 3)
125mmctx_data(0x000420, 1)
126mmctx_data(0x0004b0, 1)
127mmctx_data(0x0004e8, 1)
128mmctx_data(0x0004f4, 1)
129mmctx_data(0x000520, 2)
130mmctx_data(0x000604, 4)
131mmctx_data(0x000644, 20)
132mmctx_data(0x000698, 1)
133mmctx_data(0x000750, 2)
134nvc0_tpc_mmio_tail:
135mmctx_data(0x000758, 1)
136mmctx_data(0x0002c4, 1)
137mmctx_data(0x0004bc, 1)
138mmctx_data(0x0006e0, 1)
139nvc3_tpc_mmio_tail:
140mmctx_data(0x000544, 1)
141nvc1_tpc_mmio_tail:
142
143
144.section nvc0_grgpc_code
145bra init
146define(`include_code')
147include(`nvc0_graph.fuc')
148
149// reports an exception to the host
150//
151// In: $r15 error code (see nvc0_graph.fuc)
152//
153error:
154 push $r14
155 mov $r14 -0x67ec // 0x9814
156 sethi $r14 0x400000
157 call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
158 add b32 $r14 0x41c
159 mov $r15 1
160 call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
161 pop $r14
162 ret
163
164// GPC fuc initialisation, executed by triggering ucode start, will
165// fall through to main loop after completion.
166//
167// Input:
168// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
169// CC_SCRATCH[1]: context base
170//
171// Output:
172// CC_SCRATCH[0]:
173// 31:31: set to signal completion
174// CC_SCRATCH[1]:
175// 31:0: GPC context size
176//
177init:
178 clear b32 $r0
179 mov $sp $r0
180
181 // enable fifo access
182 mov $r1 0x1200
183 mov $r2 2
184 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
185
186 // setup i0 handler, and route all interrupts to it
187 mov $r1 ih
188 mov $iv0 $r1
189 mov $r1 0x400
190 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
191
192 // enable fifo interrupt
193 mov $r2 4
194 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
195
196 // enable interrupts
197 bset $flags ie0
198
199 // figure out which GPC we are, and how many TPCs we have
200 mov $r1 0x608
201 shl b32 $r1 6
202 iord $r2 I[$r1 + 0x000] // UNITS
203 mov $r3 1
204 and $r2 0x1f
205 shl b32 $r3 $r2
206 sub b32 $r3 1
207 st b32 D[$r0 + tpc_count] $r2
208 st b32 D[$r0 + tpc_mask] $r3
209 add b32 $r1 0x400
210 iord $r2 I[$r1 + 0x000] // MYINDEX
211 st b32 D[$r0 + gpc_id] $r2
212
213 // find context data for this chipset
214 mov $r2 0x800
215 shl b32 $r2 6
216 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
217 mov $r1 chipsets - 12
218 init_find_chipset:
219 add b32 $r1 12
220 ld b32 $r3 D[$r1 + 0x00]
221 cmpu b32 $r3 $r2
222 bra e init_context
223 cmpu b32 $r3 0
224 bra ne init_find_chipset
225 // unknown chipset
226 ret
227
228 // initialise context base, and size tracking
229 init_context:
230 mov $r2 0x800
231 shl b32 $r2 6
232 iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
233 clear b32 $r3 // track GPC context size here
234
235 // set mmctx base addresses now so we don't have to do it later,
236 // they don't currently ever change
237 mov $r4 0x700
238 shl b32 $r4 6
239 shr b32 $r5 $r2 8
240 iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
241 iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
242
243 // calculate GPC mmio context size, store the chipset-specific
244 // mmio list pointers somewhere we can get at them later without
245 // re-parsing the chipset list
246 clear b32 $r14
247 clear b32 $r15
248 ld b16 $r14 D[$r1 + 4]
249 ld b16 $r15 D[$r1 + 6]
250 st b16 D[$r0 + gpc_mmio_list_head] $r14
251 st b16 D[$r0 + gpc_mmio_list_tail] $r15
252 call mmctx_size
253 add b32 $r2 $r15
254 add b32 $r3 $r15
255
256 // calculate per-TPC mmio context size, store the list pointers
257 ld b16 $r14 D[$r1 + 8]
258 ld b16 $r15 D[$r1 + 10]
259 st b16 D[$r0 + tpc_mmio_list_head] $r14
260 st b16 D[$r0 + tpc_mmio_list_tail] $r15
261 call mmctx_size
262 ld b32 $r14 D[$r0 + tpc_count]
263 mulu $r14 $r15
264 add b32 $r2 $r14
265 add b32 $r3 $r14
266
267 // round up base/size to 256 byte boundary (for strand SWBASE)
268 add b32 $r4 0x1300
269 shr b32 $r3 2
270 iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
271 shr b32 $r2 8
272 shr b32 $r3 6
273 add b32 $r2 1
274 add b32 $r3 1
275 shl b32 $r2 8
276 shl b32 $r3 8
277
278 // calculate size of strand context data
279 mov b32 $r15 $r2
280 call strand_ctx_init
281 add b32 $r3 $r15
282
283 // save context size, and tell HUB we're done
284 mov $r1 0x800
285 shl b32 $r1 6
286 iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
287 add b32 $r1 0x800
288 clear b32 $r2
289 bset $r2 31
290 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
291
292// Main program loop, very simple, sleeps until woken up by the interrupt
293// handler, pulls a command from the queue and executes its handler
294//
295main:
296 bset $flags $p0
297 sleep $p0
298 mov $r13 cmd_queue
299 call queue_get
300 bra $p1 main
301
302 // 0x0000-0x0003 are all context transfers
303 cmpu b32 $r14 0x04
304 bra nc main_not_ctx_xfer
305 // fetch $flags and mask off $p1/$p2
306 mov $r1 $flags
307 mov $r2 0x0006
308 not b32 $r2
309 and $r1 $r2
310 // set $p1/$p2 according to transfer type
311 shl b32 $r14 1
312 or $r1 $r14
313 mov $flags $r1
314 // transfer context data
315 call ctx_xfer
316 bra main
317
318 main_not_ctx_xfer:
319 shl b32 $r15 $r14 16
320 or $r15 E_BAD_COMMAND
321 call error
322 bra main
323
324// interrupt handler
325ih:
326 push $r8
327 mov $r8 $flags
328 push $r8
329 push $r9
330 push $r10
331 push $r11
332 push $r13
333 push $r14
334 push $r15
335
336 // incoming fifo command?
337 iord $r10 I[$r0 + 0x200] // INTR
338 and $r11 $r10 0x00000004
339 bra e ih_no_fifo
340 // queue incoming fifo command for later processing
341 mov $r11 0x1900
342 mov $r13 cmd_queue
343 iord $r14 I[$r11 + 0x100] // FIFO_CMD
344 iord $r15 I[$r11 + 0x000] // FIFO_DATA
345 call queue_put
346 add b32 $r11 0x400
347 mov $r14 1
348 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
349
350 // ack, and wake up main()
351 ih_no_fifo:
352 iowr I[$r0 + 0x100] $r10 // INTR_ACK
353
354 pop $r15
355 pop $r14
356 pop $r13
357 pop $r11
358 pop $r10
359 pop $r9
360 pop $r8
361 mov $flags $r8
362 pop $r8
363 bclr $flags $p0
364 iret
365
366// Set this GPC's bit in HUB_BAR, used to signal completion of various
367// activities to the HUB fuc
368//
369hub_barrier_done:
370 mov $r15 1
371 ld b32 $r14 D[$r0 + gpc_id]
372 shl b32 $r15 $r14
373 mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
374 sethi $r14 0x400000
375 call nv_wr32
376 ret
377
378// Disables various things, waits a bit, and re-enables them..
379//
380// Not sure how exactly this helps, perhaps "ENABLE" is not such a
381// good description for the bits we turn off? Anyways, without this,
382// funny things happen.
383//
384ctx_redswitch:
385 mov $r14 0x614
386 shl b32 $r14 6
387 mov $r15 0x020
388 iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
389 mov $r15 8
390 ctx_redswitch_delay:
391 sub b32 $r15 1
392 bra ne ctx_redswitch_delay
393 mov $r15 0xa20
394 iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
395 ret
396
397// Transfer GPC context data between GPU and storage area
398//
399// In: $r15 context base address
400// $p1 clear on save, set on load
401// $p2 set if opposite direction done/will be done, so:
402// on save it means: "a load will follow this save"
403// on load it means: "a save preceeded this load"
404//
405ctx_xfer:
406 // set context base address
407 mov $r1 0xa04
408 shl b32 $r1 6
409 iowr I[$r1 + 0x000] $r15// MEM_BASE
410 bra not $p1 ctx_xfer_not_load
411 call ctx_redswitch
412 ctx_xfer_not_load:
413
414 // strands
415 mov $r1 0x4afc
416 sethi $r1 0x20000
417 mov $r2 0xc
418 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
419 call strand_wait
420 mov $r2 0x47fc
421 sethi $r2 0x20000
422 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
423 xbit $r2 $flags $p1
424 add b32 $r2 3
425 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
426
427 // mmio context
428 xbit $r10 $flags $p1 // direction
429 or $r10 2 // first
430 mov $r11 0x0000
431 sethi $r11 0x500000
432 ld b32 $r12 D[$r0 + gpc_id]
433 shl b32 $r12 15
434 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
435 ld b32 $r12 D[$r0 + gpc_mmio_list_head]
436 ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
437 mov $r14 0 // not multi
438 call mmctx_xfer
439
440 // per-TPC mmio context
441 xbit $r10 $flags $p1 // direction
442 or $r10 4 // last
443 mov $r11 0x4000
444 sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
445 ld b32 $r12 D[$r0 + gpc_id]
446 shl b32 $r12 15
447 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
448 ld b32 $r12 D[$r0 + tpc_mmio_list_head]
449 ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
450 ld b32 $r15 D[$r0 + tpc_mask]
451 mov $r14 0x800 // stride = 0x800
452 call mmctx_xfer
453
454 // wait for strands to finish
455 call strand_wait
456
457 // if load, or a save without a load following, do some
458 // unknown stuff that's done after finishing a block of
459 // strand commands
460 bra $p1 ctx_xfer_post
461 bra not $p2 ctx_xfer_done
462 ctx_xfer_post:
463 mov $r1 0x4afc
464 sethi $r1 0x20000
465 mov $r2 0xd
466 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
467 call strand_wait
468
469 // mark completion in HUB's barrier
470 ctx_xfer_done:
471 call hub_barrier_done
472 ret
473
474.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
new file mode 100644
index 000000000000..1896c898f5ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -0,0 +1,483 @@
1uint32_t nvc0_grgpc_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x000000c0,
28 0x011000b0,
29 0x01640114,
30 0x000000c1,
31 0x011400b0,
32 0x01780114,
33 0x000000c3,
34 0x011000b0,
35 0x01740114,
36 0x000000c4,
37 0x011000b0,
38 0x01740114,
39 0x000000c8,
40 0x011000b0,
41 0x01640114,
42 0x000000ce,
43 0x011000b0,
44 0x01740114,
45 0x00000000,
46 0x00000380,
47 0x14000400,
48 0x20000450,
49 0x00000600,
50 0x00000684,
51 0x10000700,
52 0x00000800,
53 0x08000808,
54 0x00000828,
55 0x00000830,
56 0x000008d8,
57 0x000008e0,
58 0x140008e8,
59 0x0000091c,
60 0x08000924,
61 0x00000b00,
62 0x14000b08,
63 0x00000bb8,
64 0x00000c08,
65 0x1c000c10,
66 0x00000c80,
67 0x00000c8c,
68 0x08001000,
69 0x00001014,
70 0x00000c6c,
71 0x00000018,
72 0x0000003c,
73 0x00000048,
74 0x00000064,
75 0x00000088,
76 0x14000200,
77 0x0400021c,
78 0x14000300,
79 0x000003d0,
80 0x040003e0,
81 0x08000400,
82 0x00000420,
83 0x000004b0,
84 0x000004e8,
85 0x000004f4,
86 0x04000520,
87 0x0c000604,
88 0x4c000644,
89 0x00000698,
90 0x04000750,
91 0x00000758,
92 0x000002c4,
93 0x000004bc,
94 0x000006e0,
95 0x00000544,
96};
97
98uint32_t nvc0_grgpc_code[] = {
99 0x03060ef5,
100 0x9800d898,
101 0x86f001d9,
102 0x0489b808,
103 0xf00c1bf4,
104 0x21f502f7,
105 0x00f802ec,
106 0xb60798c4,
107 0x8dbb0384,
108 0x0880b600,
109 0x80008e80,
110 0x90b6018f,
111 0x0f94f001,
112 0xf801d980,
113 0x0131f400,
114 0x9800d898,
115 0x89b801d9,
116 0x210bf404,
117 0xb60789c4,
118 0x9dbb0394,
119 0x0890b600,
120 0x98009e98,
121 0x80b6019f,
122 0x0f84f001,
123 0xf400d880,
124 0x00f80132,
125 0x0728b7f1,
126 0xb906b4b6,
127 0xc9f002ec,
128 0x00bcd01f,
129 0xc800bccf,
130 0x1bf41fcc,
131 0x06a7f0fa,
132 0x010321f5,
133 0xf840bfcf,
134 0x28b7f100,
135 0x06b4b607,
136 0xb980bfd0,
137 0xc9f002ec,
138 0x1ec9f01f,
139 0xcf00bcd0,
140 0xccc800bc,
141 0xfa1bf41f,
142 0x87f100f8,
143 0x84b60430,
144 0x1ff9f006,
145 0xf8008fd0,
146 0x3087f100,
147 0x0684b604,
148 0xf80080d0,
149 0x3c87f100,
150 0x0684b608,
151 0x99f094bd,
152 0x0089d000,
153 0x081887f1,
154 0xd00684b6,
155 0x87f1008a,
156 0x84b60400,
157 0x0088cf06,
158 0xf4888aff,
159 0x87f1f31b,
160 0x84b6085c,
161 0xf094bd06,
162 0x89d00099,
163 0xf100f800,
164 0xb6083c87,
165 0x94bd0684,
166 0xd00099f0,
167 0x87f10089,
168 0x84b60818,
169 0x008ad006,
170 0x040087f1,
171 0xcf0684b6,
172 0x8aff0088,
173 0xf30bf488,
174 0x085c87f1,
175 0xbd0684b6,
176 0x0099f094,
177 0xf80089d0,
178 0x9894bd00,
179 0x85b600e8,
180 0x0180b61a,
181 0xbb0284b6,
182 0xe0b60098,
183 0x04efb804,
184 0xb9eb1bf4,
185 0x00f8029f,
186 0x083c87f1,
187 0xbd0684b6,
188 0x0199f094,
189 0xf10089d0,
190 0xb6071087,
191 0x94bd0684,
192 0xf405bbfd,
193 0x8bd0090b,
194 0x0099f000,
195 0xf405eefd,
196 0x8ed00c0b,
197 0xc08fd080,
198 0xb70199f0,
199 0xc8010080,
200 0xb4b600ab,
201 0x0cb9f010,
202 0xb601aec8,
203 0xbefd11e4,
204 0x008bd005,
205 0xf0008ecf,
206 0x0bf41fe4,
207 0x00ce98fa,
208 0xd005e9fd,
209 0xc0b6c08e,
210 0x04cdb804,
211 0xc8e81bf4,
212 0x1bf402ab,
213 0x008bcf18,
214 0xb01fb4f0,
215 0x1bf410b4,
216 0x02a7f0f7,
217 0xf4c921f4,
218 0xabc81b0e,
219 0x10b4b600,
220 0xf00cb9f0,
221 0x8bd012b9,
222 0x008bcf00,
223 0xf412bbc8,
224 0x87f1fa1b,
225 0x84b6085c,
226 0xf094bd06,
227 0x89d00199,
228 0xf900f800,
229 0x02a7f0a0,
230 0xfcc921f4,
231 0xf100f8a0,
232 0xf04afc87,
233 0x97f00283,
234 0x0089d00c,
235 0x020721f5,
236 0x87f100f8,
237 0x83f04afc,
238 0x0d97f002,
239 0xf50089d0,
240 0xf8020721,
241 0xfca7f100,
242 0x02a3f04f,
243 0x0500aba2,
244 0xd00fc7f0,
245 0xc7f000ac,
246 0x00bcd00b,
247 0x020721f5,
248 0xf000aed0,
249 0xbcd00ac7,
250 0x0721f500,
251 0xf100f802,
252 0xb6083c87,
253 0x94bd0684,
254 0xd00399f0,
255 0x21f50089,
256 0xe7f00213,
257 0x3921f503,
258 0xfca7f102,
259 0x02a3f046,
260 0x0400aba0,
261 0xf040a0d0,
262 0xbcd001c7,
263 0x0721f500,
264 0x010c9202,
265 0xf000acd0,
266 0xbcd002c7,
267 0x0721f500,
268 0x2621f502,
269 0x8087f102,
270 0x0684b608,
271 0xb70089cf,
272 0x95220080,
273 0x8ed008fe,
274 0x408ed000,
275 0xb6808acf,
276 0xa0b606a5,
277 0x00eabb01,
278 0xb60480b6,
279 0x1bf40192,
280 0x08e4b6e8,
281 0xf1f2efbc,
282 0xb6085c87,
283 0x94bd0684,
284 0xd00399f0,
285 0x00f80089,
286 0xe7f1e0f9,
287 0xe3f09814,
288 0x8d21f440,
289 0x041ce0b7,
290 0xf401f7f0,
291 0xe0fc8d21,
292 0x04bd00f8,
293 0xf10004fe,
294 0xf0120017,
295 0x12d00227,
296 0x3e17f100,
297 0x0010fe04,
298 0x040017f1,
299 0xf0c010d0,
300 0x12d00427,
301 0x1031f400,
302 0x060817f1,
303 0xcf0614b6,
304 0x37f00012,
305 0x1f24f001,
306 0xb60432bb,
307 0x02800132,
308 0x04038003,
309 0x040010b7,
310 0x800012cf,
311 0x27f10002,
312 0x24b60800,
313 0x0022cf06,
314 0xb65817f0,
315 0x13980c10,
316 0x0432b800,
317 0xb00b0bf4,
318 0x1bf40034,
319 0xf100f8f1,
320 0xb6080027,
321 0x22cf0624,
322 0xf134bd40,
323 0xb6070047,
324 0x25950644,
325 0x0045d008,
326 0xbd4045d0,
327 0x58f4bde4,
328 0x1f58021e,
329 0x020e4003,
330 0xf5040f40,
331 0xbb013d21,
332 0x3fbb002f,
333 0x041e5800,
334 0x40051f58,
335 0x0f400a0e,
336 0x3d21f50c,
337 0x030e9801,
338 0xbb00effd,
339 0x3ebb002e,
340 0x0040b700,
341 0x0235b613,
342 0xb60043d0,
343 0x35b60825,
344 0x0120b606,
345 0xb60130b6,
346 0x34b60824,
347 0x022fb908,
348 0x026321f5,
349 0xf1003fbb,
350 0xb6080017,
351 0x13d00614,
352 0x0010b740,
353 0xf024bd08,
354 0x12d01f29,
355 0x0031f400,
356 0xf00028f4,
357 0x21f41cd7,
358 0xf401f439,
359 0xf404e4b0,
360 0x81fe1e18,
361 0x0627f001,
362 0x12fd20bd,
363 0x01e4b604,
364 0xfe051efd,
365 0x21f50018,
366 0x0ef404c3,
367 0x10ef94d3,
368 0xf501f5f0,
369 0xf402ec21,
370 0x80f9c60e,
371 0xf90188fe,
372 0xf990f980,
373 0xf9b0f9a0,
374 0xf9e0f9d0,
375 0x800acff0,
376 0xf404abc4,
377 0xb7f11d0b,
378 0xd7f01900,
379 0x40becf1c,
380 0xf400bfcf,
381 0xb0b70421,
382 0xe7f00400,
383 0x00bed001,
384 0xfc400ad0,
385 0xfce0fcf0,
386 0xfcb0fcd0,
387 0xfc90fca0,
388 0x0088fe80,
389 0x32f480fc,
390 0xf001f800,
391 0x0e9801f7,
392 0x04febb00,
393 0x9418e7f1,
394 0xf440e3f0,
395 0x00f88d21,
396 0x0614e7f1,
397 0xf006e4b6,
398 0xefd020f7,
399 0x08f7f000,
400 0xf401f2b6,
401 0xf7f1fd1b,
402 0xefd00a20,
403 0xf100f800,
404 0xb60a0417,
405 0x1fd00614,
406 0x0711f400,
407 0x04a421f5,
408 0x4afc17f1,
409 0xf00213f0,
410 0x12d00c27,
411 0x0721f500,
412 0xfc27f102,
413 0x0223f047,
414 0xf00020d0,
415 0x20b6012c,
416 0x0012d003,
417 0xf001acf0,
418 0xb7f002a5,
419 0x50b3f000,
420 0xb6000c98,
421 0xbcbb0fc4,
422 0x010c9800,
423 0xf0020d98,
424 0x21f500e7,
425 0xacf0015c,
426 0x04a5f001,
427 0x4000b7f1,
428 0x9850b3f0,
429 0xc4b6000c,
430 0x00bcbb0f,
431 0x98050c98,
432 0x0f98060d,
433 0x00e7f104,
434 0x5c21f508,
435 0x0721f501,
436 0x0601f402,
437 0xf11412f4,
438 0xf04afc17,
439 0x27f00213,
440 0x0012d00d,
441 0x020721f5,
442 0x048f21f5,
443 0x000000f8,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
new file mode 100644
index 000000000000..a1a599124cf4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -0,0 +1,808 @@
1/* fuc microcode for nvc0 PGRAPH/HUB
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
28 */
29
30.section nvc0_grhub_data
31include(`nvc0_graph.fuc')
32gpc_count: .b32 0
33rop_count: .b32 0
34cmd_queue: queue_init
35hub_mmio_list_head: .b32 0
36hub_mmio_list_tail: .b32 0
37
38ctx_current: .b32 0
39
40chipsets:
41.b8 0xc0 0 0 0
42.b16 nvc0_hub_mmio_head
43.b16 nvc0_hub_mmio_tail
44.b8 0xc1 0 0 0
45.b16 nvc0_hub_mmio_head
46.b16 nvc1_hub_mmio_tail
47.b8 0xc3 0 0 0
48.b16 nvc0_hub_mmio_head
49.b16 nvc0_hub_mmio_tail
50.b8 0xc4 0 0 0
51.b16 nvc0_hub_mmio_head
52.b16 nvc0_hub_mmio_tail
53.b8 0xc8 0 0 0
54.b16 nvc0_hub_mmio_head
55.b16 nvc0_hub_mmio_tail
56.b8 0xce 0 0 0
57.b16 nvc0_hub_mmio_head
58.b16 nvc0_hub_mmio_tail
59.b8 0 0 0 0
60
61nvc0_hub_mmio_head:
62mmctx_data(0x17e91c, 2)
63mmctx_data(0x400204, 2)
64mmctx_data(0x404004, 11)
65mmctx_data(0x404044, 1)
66mmctx_data(0x404094, 14)
67mmctx_data(0x4040d0, 7)
68mmctx_data(0x4040f8, 1)
69mmctx_data(0x404130, 3)
70mmctx_data(0x404150, 3)
71mmctx_data(0x404164, 2)
72mmctx_data(0x404174, 3)
73mmctx_data(0x404200, 8)
74mmctx_data(0x404404, 14)
75mmctx_data(0x404460, 4)
76mmctx_data(0x404480, 1)
77mmctx_data(0x404498, 1)
78mmctx_data(0x404604, 4)
79mmctx_data(0x404618, 32)
80mmctx_data(0x404698, 21)
81mmctx_data(0x4046f0, 2)
82mmctx_data(0x404700, 22)
83mmctx_data(0x405800, 1)
84mmctx_data(0x405830, 3)
85mmctx_data(0x405854, 1)
86mmctx_data(0x405870, 4)
87mmctx_data(0x405a00, 2)
88mmctx_data(0x405a18, 1)
89mmctx_data(0x406020, 1)
90mmctx_data(0x406028, 4)
91mmctx_data(0x4064a8, 2)
92mmctx_data(0x4064b4, 2)
93mmctx_data(0x407804, 1)
94mmctx_data(0x40780c, 6)
95mmctx_data(0x4078bc, 1)
96mmctx_data(0x408000, 7)
97mmctx_data(0x408064, 1)
98mmctx_data(0x408800, 3)
99mmctx_data(0x408900, 4)
100mmctx_data(0x408980, 1)
101nvc0_hub_mmio_tail:
102mmctx_data(0x4064c0, 2)
103nvc1_hub_mmio_tail:
104
105.align 256
106chan_data:
107chan_mmio_count: .b32 0
108chan_mmio_address: .b32 0
109
110.align 256
111xfer_data: .b32 0
112
113.section nvc0_grhub_code
114bra init
115define(`include_code')
116include(`nvc0_graph.fuc')
117
118// reports an exception to the host
119//
120// In: $r15 error code (see nvc0_graph.fuc)
121//
122error:
123 push $r14
124 mov $r14 0x814
125 shl b32 $r14 6
126 iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
127 mov $r14 0xc1c
128 shl b32 $r14 6
129 mov $r15 1
130 iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
131 pop $r14
132 ret
133
134// HUB fuc initialisation, executed by triggering ucode start, will
135// fall through to main loop after completion.
136//
137// Input:
138// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
139//
140// Output:
141// CC_SCRATCH[0]:
142// 31:31: set to signal completion
143// CC_SCRATCH[1]:
144// 31:0: total PGRAPH context size
145//
146init:
147 clear b32 $r0
148 mov $sp $r0
149 mov $xdbase $r0
150
151 // enable fifo access
152 mov $r1 0x1200
153 mov $r2 2
154 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
155
156 // setup i0 handler, and route all interrupts to it
157 mov $r1 ih
158 mov $iv0 $r1
159 mov $r1 0x400
160 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
161
162 // route HUB_CHANNEL_SWITCH to fuc interrupt 8
163 mov $r3 0x404
164 shl b32 $r3 6
165 mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
166 iowr I[$r3 + 0x000] $r2
167
168 // not sure what these are, route them because NVIDIA does, and
169 // the IRQ handler will signal the host if we ever get one.. we
170 // may find out if/why we need to handle these if so..
171 //
172 mov $r2 0x2004
173 iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
174 mov $r2 0x200b
175 iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
176 mov $r2 0x200c
177 iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
178
179 // enable all INTR_UP interrupts
180 mov $r2 0xc24
181 shl b32 $r2 6
182 not b32 $r3 $r0
183 iowr I[$r2] $r3
184
185 // enable fifo, ctxsw, 9, 10, 15 interrupts
186 mov $r2 -0x78fc // 0x8704
187 sethi $r2 0
188 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
189
190 // fifo level triggered, rest edge
191 sub b32 $r1 0x100
192 mov $r2 4
193 iowr I[$r1] $r2
194
195 // enable interrupts
196 bset $flags ie0
197
198 // fetch enabled GPC/ROP counts
199 mov $r14 -0x69fc // 0x409604
200 sethi $r14 0x400000
201 call nv_rd32
202 extr $r1 $r15 16:20
203 st b32 D[$r0 + rop_count] $r1
204 and $r15 0x1f
205 st b32 D[$r0 + gpc_count] $r15
206
207 // set BAR_REQMASK to GPC mask
208 mov $r1 1
209 shl b32 $r1 $r15
210 sub b32 $r1 1
211 mov $r2 0x40c
212 shl b32 $r2 6
213 iowr I[$r2 + 0x000] $r1
214 iowr I[$r2 + 0x100] $r1
215
216 // find context data for this chipset
217 mov $r2 0x800
218 shl b32 $r2 6
219 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
220 mov $r15 chipsets - 8
221 init_find_chipset:
222 add b32 $r15 8
223 ld b32 $r3 D[$r15 + 0x00]
224 cmpu b32 $r3 $r2
225 bra e init_context
226 cmpu b32 $r3 0
227 bra ne init_find_chipset
228 // unknown chipset
229 ret
230
231 // context size calculation, reserve first 256 bytes for use by fuc
232 init_context:
233 mov $r1 256
234
235 // calculate size of mmio context data
236 ld b16 $r14 D[$r15 + 4]
237 ld b16 $r15 D[$r15 + 6]
238 sethi $r14 0
239 st b32 D[$r0 + hub_mmio_list_head] $r14
240 st b32 D[$r0 + hub_mmio_list_tail] $r15
241 call mmctx_size
242
243 // set mmctx base addresses now so we don't have to do it later,
244 // they don't (currently) ever change
245 mov $r3 0x700
246 shl b32 $r3 6
247 shr b32 $r4 $r1 8
248 iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
249 iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
250 add b32 $r3 0x1300
251 add b32 $r1 $r15
252 shr b32 $r15 2
253 iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
254
255 // strands, base offset needs to be aligned to 256 bytes
256 shr b32 $r1 8
257 add b32 $r1 1
258 shl b32 $r1 8
259 mov b32 $r15 $r1
260 call strand_ctx_init
261 add b32 $r1 $r15
262
263 // initialise each GPC in sequence by passing in the offset of its
264 // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
265 // has previously been uploaded by the host) running.
266 //
267 // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
268 // when it has completed, and return the size of its context data
269 // in GPCn_CC_SCRATCH[1]
270 //
271 ld b32 $r3 D[$r0 + gpc_count]
272 mov $r4 0x2000
273 sethi $r4 0x500000
274 init_gpc:
275 // setup, and start GPC ucode running
276 add b32 $r14 $r4 0x804
277 mov b32 $r15 $r1
278 call nv_wr32 // CC_SCRATCH[1] = ctx offset
279 add b32 $r14 $r4 0x800
280 mov b32 $r15 $r2
281 call nv_wr32 // CC_SCRATCH[0] = chipset
282 add b32 $r14 $r4 0x10c
283 clear b32 $r15
284 call nv_wr32
285 add b32 $r14 $r4 0x104
286 call nv_wr32 // ENTRY
287 add b32 $r14 $r4 0x100
288 mov $r15 2 // CTRL_START_TRIGGER
289 call nv_wr32 // CTRL
290
291 // wait for it to complete, and adjust context size
292 add b32 $r14 $r4 0x800
293 init_gpc_wait:
294 call nv_rd32
295 xbit $r15 $r15 31
296 bra e init_gpc_wait
297 add b32 $r14 $r4 0x804
298 call nv_rd32
299 add b32 $r1 $r15
300
301 // next!
302 add b32 $r4 0x8000
303 sub b32 $r3 1
304 bra ne init_gpc
305
306 // save context size, and tell host we're ready
307 mov $r2 0x800
308 shl b32 $r2 6
309 iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
310 add b32 $r2 0x800
311 clear b32 $r1
312 bset $r1 31
313 iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
314
315// Main program loop, very simple, sleeps until woken up by the interrupt
316// handler, pulls a command from the queue and executes its handler
317//
318main:
319 // sleep until we have something to do
320 bset $flags $p0
321 sleep $p0
322 mov $r13 cmd_queue
323 call queue_get
324 bra $p1 main
325
326 // context switch, requested by GPU?
327 cmpu b32 $r14 0x4001
328 bra ne main_not_ctx_switch
329 trace_set(T_AUTO)
330 mov $r1 0xb00
331 shl b32 $r1 6
332 iord $r2 I[$r1 + 0x100] // CHAN_NEXT
333 iord $r1 I[$r1 + 0x000] // CHAN_CUR
334
335 xbit $r3 $r1 31
336 bra e chsw_no_prev
337 xbit $r3 $r2 31
338 bra e chsw_prev_no_next
339 push $r2
340 mov b32 $r2 $r1
341 trace_set(T_SAVE)
342 bclr $flags $p1
343 bset $flags $p2
344 call ctx_xfer
345 trace_clr(T_SAVE);
346 pop $r2
347 trace_set(T_LOAD);
348 bset $flags $p1
349 call ctx_xfer
350 trace_clr(T_LOAD);
351 bra chsw_done
352 chsw_prev_no_next:
353 push $r2
354 mov b32 $r2 $r1
355 bclr $flags $p1
356 bclr $flags $p2
357 call ctx_xfer
358 pop $r2
359 mov $r1 0xb00
360 shl b32 $r1 6
361 iowr I[$r1] $r2
362 bra chsw_done
363 chsw_no_prev:
364 xbit $r3 $r2 31
365 bra e chsw_done
366 bset $flags $p1
367 bclr $flags $p2
368 call ctx_xfer
369
370 // ack the context switch request
371 chsw_done:
372 mov $r1 0xb0c
373 shl b32 $r1 6
374 mov $r2 1
375 iowr I[$r1 + 0x000] $r2 // 0x409b0c
376 trace_clr(T_AUTO)
377 bra main
378
379 // request to set current channel? (*not* a context switch)
380 main_not_ctx_switch:
381 cmpu b32 $r14 0x0001
382 bra ne main_not_ctx_chan
383 mov b32 $r2 $r15
384 call ctx_chan
385 bra main_done
386
387 // request to store current channel context?
388 main_not_ctx_chan:
389 cmpu b32 $r14 0x0002
390 bra ne main_not_ctx_save
391 trace_set(T_SAVE)
392 bclr $flags $p1
393 bclr $flags $p2
394 call ctx_xfer
395 trace_clr(T_SAVE)
396 bra main_done
397
398 main_not_ctx_save:
399 shl b32 $r15 $r14 16
400 or $r15 E_BAD_COMMAND
401 call error
402 bra main
403
404 main_done:
405 mov $r1 0x820
406 shl b32 $r1 6
407 clear b32 $r2
408 bset $r2 31
409 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
410 bra main
411
412// interrupt handler
413ih:
414 push $r8
415 mov $r8 $flags
416 push $r8
417 push $r9
418 push $r10
419 push $r11
420 push $r13
421 push $r14
422 push $r15
423
424 // incoming fifo command?
425 iord $r10 I[$r0 + 0x200] // INTR
426 and $r11 $r10 0x00000004
427 bra e ih_no_fifo
428 // queue incoming fifo command for later processing
429 mov $r11 0x1900
430 mov $r13 cmd_queue
431 iord $r14 I[$r11 + 0x100] // FIFO_CMD
432 iord $r15 I[$r11 + 0x000] // FIFO_DATA
433 call queue_put
434 add b32 $r11 0x400
435 mov $r14 1
436 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
437
438 // context switch request?
439 ih_no_fifo:
440 and $r11 $r10 0x00000100
441 bra e ih_no_ctxsw
442 // enqueue a context switch for later processing
443 mov $r13 cmd_queue
444 mov $r14 0x4001
445 call queue_put
446
447 // anything we didn't handle, bring it to the host's attention
448 ih_no_ctxsw:
449 mov $r11 0x104
450 not b32 $r11
451 and $r11 $r10 $r11
452 bra e ih_no_other
453 mov $r10 0xc1c
454 shl b32 $r10 6
455 iowr I[$r10] $r11 // INTR_UP_SET
456
457 // ack, and wake up main()
458 ih_no_other:
459 iowr I[$r0 + 0x100] $r10 // INTR_ACK
460
461 pop $r15
462 pop $r14
463 pop $r13
464 pop $r11
465 pop $r10
466 pop $r9
467 pop $r8
468 mov $flags $r8
469 pop $r8
470 bclr $flags $p0
471 iret
472
473// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
474ctx_4160s:
475 mov $r14 0x4160
476 sethi $r14 0x400000
477 mov $r15 1
478 call nv_wr32
479 ctx_4160s_wait:
480 call nv_rd32
481 xbit $r15 $r15 4
482 bra e ctx_4160s_wait
483 ret
484
485// Without clearing again at end of xfer, some things cause PGRAPH
486// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
487// still function with it set however...
488ctx_4160c:
489 mov $r14 0x4160
490 sethi $r14 0x400000
491 clear b32 $r15
492 call nv_wr32
493 ret
494
495// Again, not real sure
496//
497// In: $r15 value to set 0x404170 to
498//
499ctx_4170s:
500 mov $r14 0x4170
501 sethi $r14 0x400000
502 or $r15 0x10
503 call nv_wr32
504 ret
505
506// Waits for a ctx_4170s() call to complete
507//
508ctx_4170w:
509 mov $r14 0x4170
510 sethi $r14 0x400000
511 call nv_rd32
512 and $r15 0x10
513 bra ne ctx_4170w
514 ret
515
516// Disables various things, waits a bit, and re-enables them..
517//
518// Not sure how exactly this helps, perhaps "ENABLE" is not such a
519// good description for the bits we turn off? Anyways, without this,
520// funny things happen.
521//
522ctx_redswitch:
523 mov $r14 0x614
524 shl b32 $r14 6
525 mov $r15 0x270
526 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
527 mov $r15 8
528 ctx_redswitch_delay:
529 sub b32 $r15 1
530 bra ne ctx_redswitch_delay
531 mov $r15 0x770
532 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
533 ret
534
535// Not a clue what this is for, except that unless the value is 0x10, the
536// strand context is saved (and presumably restored) incorrectly..
537//
538// In: $r15 value to set to (0x00/0x10 are used)
539//
540ctx_86c:
541 mov $r14 0x86c
542 shl b32 $r14 6
543 iowr I[$r14] $r15 // HUB(0x86c) = val
544 mov $r14 -0x75ec
545 sethi $r14 0x400000
546 call nv_wr32 // ROP(0xa14) = val
547 mov $r14 -0x5794
548 sethi $r14 0x410000
549 call nv_wr32 // GPC(0x86c) = val
550 ret
551
552// ctx_load - load's a channel's ctxctl data, and selects its vm
553//
554// In: $r2 channel address
555//
556ctx_load:
557 trace_set(T_CHAN)
558
559 // switch to channel, somewhat magic in parts..
560 mov $r10 12 // DONE_UNK12
561 call wait_donez
562 mov $r1 0xa24
563 shl b32 $r1 6
564 iowr I[$r1 + 0x000] $r0 // 0x409a24
565 mov $r3 0xb00
566 shl b32 $r3 6
567 iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
568 mov $r1 0xa0c
569 shl b32 $r1 6
570 mov $r4 7
571 iowr I[$r1 + 0x000] $r2 // MEM_CHAN
572 iowr I[$r1 + 0x100] $r4 // MEM_CMD
573 ctx_chan_wait_0:
574 iord $r4 I[$r1 + 0x100]
575 and $r4 0x1f
576 bra ne ctx_chan_wait_0
577 iowr I[$r3 + 0x000] $r2 // CHAN_CUR
578
579 // load channel header, fetch PGRAPH context pointer
580 mov $xtargets $r0
581 bclr $r2 31
582 shl b32 $r2 4
583 add b32 $r2 2
584
585 trace_set(T_LCHAN)
586 mov $r1 0xa04
587 shl b32 $r1 6
588 iowr I[$r1 + 0x000] $r2 // MEM_BASE
589 mov $r1 0xa20
590 shl b32 $r1 6
591 mov $r2 0x0002
592 sethi $r2 0x80000000
593 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
594 mov $r1 0x10 // chan + 0x0210
595 mov $r2 xfer_data
596 sethi $r2 0x00020000 // 16 bytes
597 xdld $r1 $r2
598 xdwait
599 trace_clr(T_LCHAN)
600
601 // update current context
602 ld b32 $r1 D[$r0 + xfer_data + 4]
603 shl b32 $r1 24
604 ld b32 $r2 D[$r0 + xfer_data + 0]
605 shr b32 $r2 8
606 or $r1 $r2
607 st b32 D[$r0 + ctx_current] $r1
608
609 // set transfer base to start of context, and fetch context header
610 trace_set(T_LCTXH)
611 mov $r2 0xa04
612 shl b32 $r2 6
613 iowr I[$r2 + 0x000] $r1 // MEM_BASE
614 mov $r2 1
615 mov $r1 0xa20
616 shl b32 $r1 6
617 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
618 mov $r1 chan_data
619 sethi $r1 0x00060000 // 256 bytes
620 xdld $r0 $r1
621 xdwait
622 trace_clr(T_LCTXH)
623
624 trace_clr(T_CHAN)
625 ret
626
627// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
628// the active channel for ctxctl, but not actually transfer
629// any context data. intended for use only during initial
630// context construction.
631//
632// In: $r2 channel address
633//
634ctx_chan:
635 call ctx_4160s
636 call ctx_load
637 mov $r10 12 // DONE_UNK12
638 call wait_donez
639 mov $r1 0xa10
640 shl b32 $r1 6
641 mov $r2 5
642 iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
643 ctx_chan_wait:
644 iord $r2 I[$r1 + 0x000]
645 or $r2 $r2
646 bra ne ctx_chan_wait
647 call ctx_4160c
648 ret
649
650// Execute per-context state overrides list
651//
652// Only executed on the first load of a channel. Might want to look into
653// removing this and having the host directly modify the channel's context
654// to change this state... The nouveau DRM already builds this list as
655// it's definitely needed for NVIDIA's, so we may as well use it for now
656//
657// Input: $r1 mmio list length
658//
659ctx_mmio_exec:
660 // set transfer base to be the mmio list
661 ld b32 $r3 D[$r0 + chan_mmio_address]
662 mov $r2 0xa04
663 shl b32 $r2 6
664 iowr I[$r2 + 0x000] $r3 // MEM_BASE
665
666 clear b32 $r3
667 ctx_mmio_loop:
668 // fetch next 256 bytes of mmio list if necessary
669 and $r4 $r3 0xff
670 bra ne ctx_mmio_pull
671 mov $r5 xfer_data
672 sethi $r5 0x00060000 // 256 bytes
673 xdld $r3 $r5
674 xdwait
675
676 // execute a single list entry
677 ctx_mmio_pull:
678 ld b32 $r14 D[$r4 + xfer_data + 0x00]
679 ld b32 $r15 D[$r4 + xfer_data + 0x04]
680 call nv_wr32
681
682 // next!
683 add b32 $r3 8
684 sub b32 $r1 1
685 bra ne ctx_mmio_loop
686
687 // set transfer base back to the current context
688 ctx_mmio_done:
689 ld b32 $r3 D[$r0 + ctx_current]
690 iowr I[$r2 + 0x000] $r3 // MEM_BASE
691
692 // disable the mmio list now, we don't need/want to execute it again
693 st b32 D[$r0 + chan_mmio_count] $r0
694 mov $r1 chan_data
695 sethi $r1 0x00060000 // 256 bytes
696 xdst $r0 $r1
697 xdwait
698 ret
699
700// Transfer HUB context data between GPU and storage area
701//
702// In: $r2 channel address
703// $p1 clear on save, set on load
704// $p2 set if opposite direction done/will be done, so:
705// on save it means: "a load will follow this save"
706// on load it means: "a save preceeded this load"
707//
708ctx_xfer:
709 bra not $p1 ctx_xfer_pre
710 bra $p2 ctx_xfer_pre_load
711 ctx_xfer_pre:
712 mov $r15 0x10
713 call ctx_86c
714 call ctx_4160s
715 bra not $p1 ctx_xfer_exec
716
717 ctx_xfer_pre_load:
718 mov $r15 2
719 call ctx_4170s
720 call ctx_4170w
721 call ctx_redswitch
722 clear b32 $r15
723 call ctx_4170s
724 call ctx_load
725
726 // fetch context pointer, and initiate xfer on all GPCs
727 ctx_xfer_exec:
728 ld b32 $r1 D[$r0 + ctx_current]
729 mov $r2 0x414
730 shl b32 $r2 6
731 iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
732 mov $r14 -0x5b00
733 sethi $r14 0x410000
734 mov b32 $r15 $r1
735 call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
736 add b32 $r14 4
737 xbit $r15 $flags $p1
738 xbit $r2 $flags $p2
739 shl b32 $r2 1
740 or $r15 $r2
741 call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
742
743 // strands
744 mov $r1 0x4afc
745 sethi $r1 0x20000
746 mov $r2 0xc
747 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
748 call strand_wait
749 mov $r2 0x47fc
750 sethi $r2 0x20000
751 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
752 xbit $r2 $flags $p1
753 add b32 $r2 3
754 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
755
756 // mmio context
757 xbit $r10 $flags $p1 // direction
758 or $r10 6 // first, last
759 mov $r11 0 // base = 0
760 ld b32 $r12 D[$r0 + hub_mmio_list_head]
761 ld b32 $r13 D[$r0 + hub_mmio_list_tail]
762 mov $r14 0 // not multi
763 call mmctx_xfer
764
765 // wait for GPCs to all complete
766 mov $r10 8 // DONE_BAR
767 call wait_doneo
768
769 // wait for strand xfer to complete
770 call strand_wait
771
772 // post-op
773 bra $p1 ctx_xfer_post
774 mov $r10 12 // DONE_UNK12
775 call wait_donez
776 mov $r1 0xa10
777 shl b32 $r1 6
778 mov $r2 5
779 iowr I[$r1] $r2 // MEM_CMD
780 ctx_xfer_post_save_wait:
781 iord $r2 I[$r1]
782 or $r2 $r2
783 bra ne ctx_xfer_post_save_wait
784
785 bra $p2 ctx_xfer_done
786 ctx_xfer_post:
787 mov $r15 2
788 call ctx_4170s
789 clear b32 $r15
790 call ctx_86c
791 call strand_post
792 call ctx_4170w
793 clear b32 $r15
794 call ctx_4170s
795
796 bra not $p1 ctx_xfer_no_post_mmio
797 ld b32 $r1 D[$r0 + chan_mmio_count]
798 or $r1 $r1
799 bra e ctx_xfer_no_post_mmio
800 call ctx_mmio_exec
801
802 ctx_xfer_no_post_mmio:
803 call ctx_4160c
804
805 ctx_xfer_done:
806 ret
807
808.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
new file mode 100644
index 000000000000..b3b541b6d044
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -0,0 +1,838 @@
1uint32_t nvc0_grhub_data[] = {
2 0x00000000,
3 0x00000000,
4 0x00000000,
5 0x00000000,
6 0x00000000,
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x000000c0,
26 0x012c0090,
27 0x000000c1,
28 0x01300090,
29 0x000000c3,
30 0x012c0090,
31 0x000000c4,
32 0x012c0090,
33 0x000000c8,
34 0x012c0090,
35 0x000000ce,
36 0x012c0090,
37 0x00000000,
38 0x0417e91c,
39 0x04400204,
40 0x28404004,
41 0x00404044,
42 0x34404094,
43 0x184040d0,
44 0x004040f8,
45 0x08404130,
46 0x08404150,
47 0x04404164,
48 0x08404174,
49 0x1c404200,
50 0x34404404,
51 0x0c404460,
52 0x00404480,
53 0x00404498,
54 0x0c404604,
55 0x7c404618,
56 0x50404698,
57 0x044046f0,
58 0x54404700,
59 0x00405800,
60 0x08405830,
61 0x00405854,
62 0x0c405870,
63 0x04405a00,
64 0x00405a18,
65 0x00406020,
66 0x0c406028,
67 0x044064a8,
68 0x044064b4,
69 0x00407804,
70 0x1440780c,
71 0x004078bc,
72 0x18408000,
73 0x00408064,
74 0x08408800,
75 0x0c408900,
76 0x00408980,
77 0x044064c0,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x00000000,
140 0x00000000,
141 0x00000000,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195};
196
197uint32_t nvc0_grhub_code[] = {
198 0x03090ef5,
199 0x9800d898,
200 0x86f001d9,
201 0x0489b808,
202 0xf00c1bf4,
203 0x21f502f7,
204 0x00f802ec,
205 0xb60798c4,
206 0x8dbb0384,
207 0x0880b600,
208 0x80008e80,
209 0x90b6018f,
210 0x0f94f001,
211 0xf801d980,
212 0x0131f400,
213 0x9800d898,
214 0x89b801d9,
215 0x210bf404,
216 0xb60789c4,
217 0x9dbb0394,
218 0x0890b600,
219 0x98009e98,
220 0x80b6019f,
221 0x0f84f001,
222 0xf400d880,
223 0x00f80132,
224 0x0728b7f1,
225 0xb906b4b6,
226 0xc9f002ec,
227 0x00bcd01f,
228 0xc800bccf,
229 0x1bf41fcc,
230 0x06a7f0fa,
231 0x010321f5,
232 0xf840bfcf,
233 0x28b7f100,
234 0x06b4b607,
235 0xb980bfd0,
236 0xc9f002ec,
237 0x1ec9f01f,
238 0xcf00bcd0,
239 0xccc800bc,
240 0xfa1bf41f,
241 0x87f100f8,
242 0x84b60430,
243 0x1ff9f006,
244 0xf8008fd0,
245 0x3087f100,
246 0x0684b604,
247 0xf80080d0,
248 0x3c87f100,
249 0x0684b608,
250 0x99f094bd,
251 0x0089d000,
252 0x081887f1,
253 0xd00684b6,
254 0x87f1008a,
255 0x84b60400,
256 0x0088cf06,
257 0xf4888aff,
258 0x87f1f31b,
259 0x84b6085c,
260 0xf094bd06,
261 0x89d00099,
262 0xf100f800,
263 0xb6083c87,
264 0x94bd0684,
265 0xd00099f0,
266 0x87f10089,
267 0x84b60818,
268 0x008ad006,
269 0x040087f1,
270 0xcf0684b6,
271 0x8aff0088,
272 0xf30bf488,
273 0x085c87f1,
274 0xbd0684b6,
275 0x0099f094,
276 0xf80089d0,
277 0x9894bd00,
278 0x85b600e8,
279 0x0180b61a,
280 0xbb0284b6,
281 0xe0b60098,
282 0x04efb804,
283 0xb9eb1bf4,
284 0x00f8029f,
285 0x083c87f1,
286 0xbd0684b6,
287 0x0199f094,
288 0xf10089d0,
289 0xb6071087,
290 0x94bd0684,
291 0xf405bbfd,
292 0x8bd0090b,
293 0x0099f000,
294 0xf405eefd,
295 0x8ed00c0b,
296 0xc08fd080,
297 0xb70199f0,
298 0xc8010080,
299 0xb4b600ab,
300 0x0cb9f010,
301 0xb601aec8,
302 0xbefd11e4,
303 0x008bd005,
304 0xf0008ecf,
305 0x0bf41fe4,
306 0x00ce98fa,
307 0xd005e9fd,
308 0xc0b6c08e,
309 0x04cdb804,
310 0xc8e81bf4,
311 0x1bf402ab,
312 0x008bcf18,
313 0xb01fb4f0,
314 0x1bf410b4,
315 0x02a7f0f7,
316 0xf4c921f4,
317 0xabc81b0e,
318 0x10b4b600,
319 0xf00cb9f0,
320 0x8bd012b9,
321 0x008bcf00,
322 0xf412bbc8,
323 0x87f1fa1b,
324 0x84b6085c,
325 0xf094bd06,
326 0x89d00199,
327 0xf900f800,
328 0x02a7f0a0,
329 0xfcc921f4,
330 0xf100f8a0,
331 0xf04afc87,
332 0x97f00283,
333 0x0089d00c,
334 0x020721f5,
335 0x87f100f8,
336 0x83f04afc,
337 0x0d97f002,
338 0xf50089d0,
339 0xf8020721,
340 0xfca7f100,
341 0x02a3f04f,
342 0x0500aba2,
343 0xd00fc7f0,
344 0xc7f000ac,
345 0x00bcd00b,
346 0x020721f5,
347 0xf000aed0,
348 0xbcd00ac7,
349 0x0721f500,
350 0xf100f802,
351 0xb6083c87,
352 0x94bd0684,
353 0xd00399f0,
354 0x21f50089,
355 0xe7f00213,
356 0x3921f503,
357 0xfca7f102,
358 0x02a3f046,
359 0x0400aba0,
360 0xf040a0d0,
361 0xbcd001c7,
362 0x0721f500,
363 0x010c9202,
364 0xf000acd0,
365 0xbcd002c7,
366 0x0721f500,
367 0x2621f502,
368 0x8087f102,
369 0x0684b608,
370 0xb70089cf,
371 0x95220080,
372 0x8ed008fe,
373 0x408ed000,
374 0xb6808acf,
375 0xa0b606a5,
376 0x00eabb01,
377 0xb60480b6,
378 0x1bf40192,
379 0x08e4b6e8,
380 0xf1f2efbc,
381 0xb6085c87,
382 0x94bd0684,
383 0xd00399f0,
384 0x00f80089,
385 0xe7f1e0f9,
386 0xe4b60814,
387 0x00efd006,
388 0x0c1ce7f1,
389 0xf006e4b6,
390 0xefd001f7,
391 0xf8e0fc00,
392 0xfe04bd00,
393 0x07fe0004,
394 0x0017f100,
395 0x0227f012,
396 0xf10012d0,
397 0xfe05b917,
398 0x17f10010,
399 0x10d00400,
400 0x0437f1c0,
401 0x0634b604,
402 0x200327f1,
403 0xf10032d0,
404 0xd0200427,
405 0x27f10132,
406 0x32d0200b,
407 0x0c27f102,
408 0x0732d020,
409 0x0c2427f1,
410 0xb90624b6,
411 0x23d00003,
412 0x0427f100,
413 0x0023f087,
414 0xb70012d0,
415 0xf0010012,
416 0x12d00427,
417 0x1031f400,
418 0x9604e7f1,
419 0xf440e3f0,
420 0xf1c76821,
421 0x01018090,
422 0x801ff4f0,
423 0x17f0000f,
424 0x041fbb01,
425 0xf10112b6,
426 0xb6040c27,
427 0x21d00624,
428 0x4021d000,
429 0x080027f1,
430 0xcf0624b6,
431 0xf7f00022,
432 0x08f0b654,
433 0xb800f398,
434 0x0bf40432,
435 0x0034b00b,
436 0xf8f11bf4,
437 0x0017f100,
438 0x02fe5801,
439 0xf003ff58,
440 0x0e8000e3,
441 0x150f8014,
442 0x013d21f5,
443 0x070037f1,
444 0x950634b6,
445 0x34d00814,
446 0x4034d000,
447 0x130030b7,
448 0xb6001fbb,
449 0x3fd002f5,
450 0x0815b600,
451 0xb60110b6,
452 0x1fb90814,
453 0x6321f502,
454 0x001fbb02,
455 0xf1000398,
456 0xf0200047,
457 0x4ea05043,
458 0x1fb90804,
459 0x8d21f402,
460 0x08004ea0,
461 0xf4022fb9,
462 0x4ea08d21,
463 0xf4bd010c,
464 0xa08d21f4,
465 0xf401044e,
466 0x4ea08d21,
467 0xf7f00100,
468 0x8d21f402,
469 0x08004ea0,
470 0xc86821f4,
471 0x0bf41fff,
472 0x044ea0fa,
473 0x6821f408,
474 0xb7001fbb,
475 0xb6800040,
476 0x1bf40132,
477 0x0027f1b4,
478 0x0624b608,
479 0xb74021d0,
480 0xbd080020,
481 0x1f19f014,
482 0xf40021d0,
483 0x28f40031,
484 0x08d7f000,
485 0xf43921f4,
486 0xe4b1f401,
487 0x1bf54001,
488 0x87f100d1,
489 0x84b6083c,
490 0xf094bd06,
491 0x89d00499,
492 0x0017f100,
493 0x0614b60b,
494 0xcf4012cf,
495 0x13c80011,
496 0x7e0bf41f,
497 0xf41f23c8,
498 0x20f95a0b,
499 0xf10212b9,
500 0xb6083c87,
501 0x94bd0684,
502 0xd00799f0,
503 0x32f40089,
504 0x0231f401,
505 0x082921f5,
506 0x085c87f1,
507 0xbd0684b6,
508 0x0799f094,
509 0xfc0089d0,
510 0x3c87f120,
511 0x0684b608,
512 0x99f094bd,
513 0x0089d006,
514 0xf50131f4,
515 0xf1082921,
516 0xb6085c87,
517 0x94bd0684,
518 0xd00699f0,
519 0x0ef40089,
520 0xb920f931,
521 0x32f40212,
522 0x0232f401,
523 0x082921f5,
524 0x17f120fc,
525 0x14b60b00,
526 0x0012d006,
527 0xc8130ef4,
528 0x0bf41f23,
529 0x0131f40d,
530 0xf50232f4,
531 0xf1082921,
532 0xb60b0c17,
533 0x27f00614,
534 0x0012d001,
535 0x085c87f1,
536 0xbd0684b6,
537 0x0499f094,
538 0xf50089d0,
539 0xb0ff200e,
540 0x1bf401e4,
541 0x02f2b90d,
542 0x07b521f5,
543 0xb0420ef4,
544 0x1bf402e4,
545 0x3c87f12e,
546 0x0684b608,
547 0x99f094bd,
548 0x0089d007,
549 0xf40132f4,
550 0x21f50232,
551 0x87f10829,
552 0x84b6085c,
553 0xf094bd06,
554 0x89d00799,
555 0x110ef400,
556 0xf010ef94,
557 0x21f501f5,
558 0x0ef502ec,
559 0x17f1fed1,
560 0x14b60820,
561 0xf024bd06,
562 0x12d01f29,
563 0xbe0ef500,
564 0xfe80f9fe,
565 0x80f90188,
566 0xa0f990f9,
567 0xd0f9b0f9,
568 0xf0f9e0f9,
569 0xc4800acf,
570 0x0bf404ab,
571 0x00b7f11d,
572 0x08d7f019,
573 0xcf40becf,
574 0x21f400bf,
575 0x00b0b704,
576 0x01e7f004,
577 0xe400bed0,
578 0xf40100ab,
579 0xd7f00d0b,
580 0x01e7f108,
581 0x0421f440,
582 0x0104b7f1,
583 0xabffb0bd,
584 0x0d0bf4b4,
585 0x0c1ca7f1,
586 0xd006a4b6,
587 0x0ad000ab,
588 0xfcf0fc40,
589 0xfcd0fce0,
590 0xfca0fcb0,
591 0xfe80fc90,
592 0x80fc0088,
593 0xf80032f4,
594 0x60e7f101,
595 0x40e3f041,
596 0xf401f7f0,
597 0x21f48d21,
598 0x04ffc868,
599 0xf8fa0bf4,
600 0x60e7f100,
601 0x40e3f041,
602 0x21f4f4bd,
603 0xf100f88d,
604 0xf04170e7,
605 0xf5f040e3,
606 0x8d21f410,
607 0xe7f100f8,
608 0xe3f04170,
609 0x6821f440,
610 0xf410f4f0,
611 0x00f8f31b,
612 0x0614e7f1,
613 0xf106e4b6,
614 0xd00270f7,
615 0xf7f000ef,
616 0x01f2b608,
617 0xf1fd1bf4,
618 0xd00770f7,
619 0x00f800ef,
620 0x086ce7f1,
621 0xd006e4b6,
622 0xe7f100ef,
623 0xe3f08a14,
624 0x8d21f440,
625 0xa86ce7f1,
626 0xf441e3f0,
627 0x00f88d21,
628 0x083c87f1,
629 0xbd0684b6,
630 0x0599f094,
631 0xf00089d0,
632 0x21f40ca7,
633 0x2417f1c9,
634 0x0614b60a,
635 0xf10010d0,
636 0xb60b0037,
637 0x32d00634,
638 0x0c17f140,
639 0x0614b60a,
640 0xd00747f0,
641 0x14d00012,
642 0x4014cf40,
643 0xf41f44f0,
644 0x32d0fa1b,
645 0x000bfe00,
646 0xb61f2af0,
647 0x20b60424,
648 0x3c87f102,
649 0x0684b608,
650 0x99f094bd,
651 0x0089d008,
652 0x0a0417f1,
653 0xd00614b6,
654 0x17f10012,
655 0x14b60a20,
656 0x0227f006,
657 0x800023f1,
658 0xf00012d0,
659 0x27f11017,
660 0x23f00300,
661 0x0512fa02,
662 0x87f103f8,
663 0x84b6085c,
664 0xf094bd06,
665 0x89d00899,
666 0xc1019800,
667 0x981814b6,
668 0x25b6c002,
669 0x0512fd08,
670 0xf1160180,
671 0xb6083c87,
672 0x94bd0684,
673 0xd00999f0,
674 0x27f10089,
675 0x24b60a04,
676 0x0021d006,
677 0xf10127f0,
678 0xb60a2017,
679 0x12d00614,
680 0x0017f100,
681 0x0613f002,
682 0xf80501fa,
683 0x5c87f103,
684 0x0684b608,
685 0x99f094bd,
686 0x0089d009,
687 0x085c87f1,
688 0xbd0684b6,
689 0x0599f094,
690 0xf80089d0,
691 0x3121f500,
692 0xb821f506,
693 0x0ca7f006,
694 0xf1c921f4,
695 0xb60a1017,
696 0x27f00614,
697 0x0012d005,
698 0xfd0012cf,
699 0x1bf40522,
700 0x4921f5fa,
701 0x9800f806,
702 0x27f18103,
703 0x24b60a04,
704 0x0023d006,
705 0x34c434bd,
706 0x0f1bf4ff,
707 0x030057f1,
708 0xfa0653f0,
709 0x03f80535,
710 0x98c04e98,
711 0x21f4c14f,
712 0x0830b68d,
713 0xf40112b6,
714 0x0398df1b,
715 0x0023d016,
716 0xf1800080,
717 0xf0020017,
718 0x01fa0613,
719 0xf803f806,
720 0x0611f400,
721 0xf01102f4,
722 0x21f510f7,
723 0x21f50698,
724 0x11f40631,
725 0x02f7f01c,
726 0x065721f5,
727 0x066621f5,
728 0x067821f5,
729 0x21f5f4bd,
730 0x21f50657,
731 0x019806b8,
732 0x1427f116,
733 0x0624b604,
734 0xf10020d0,
735 0xf0a500e7,
736 0x1fb941e3,
737 0x8d21f402,
738 0xf004e0b6,
739 0x2cf001fc,
740 0x0124b602,
741 0xf405f2fd,
742 0x17f18d21,
743 0x13f04afc,
744 0x0c27f002,
745 0xf50012d0,
746 0xf1020721,
747 0xf047fc27,
748 0x20d00223,
749 0x012cf000,
750 0xd00320b6,
751 0xacf00012,
752 0x06a5f001,
753 0x9800b7f0,
754 0x0d98140c,
755 0x00e7f015,
756 0x015c21f5,
757 0xf508a7f0,
758 0xf5010321,
759 0xf4020721,
760 0xa7f02201,
761 0xc921f40c,
762 0x0a1017f1,
763 0xf00614b6,
764 0x12d00527,
765 0x0012cf00,
766 0xf40522fd,
767 0x02f4fa1b,
768 0x02f7f032,
769 0x065721f5,
770 0x21f5f4bd,
771 0x21f50698,
772 0x21f50226,
773 0xf4bd0666,
774 0x065721f5,
775 0x981011f4,
776 0x11fd8001,
777 0x070bf405,
778 0x07df21f5,
779 0x064921f5,
780 0x000000f8,
781 0x00000000,
782 0x00000000,
783 0x00000000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836 0x00000000,
837 0x00000000,
838};
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 82357d2df1f4..b701c439c92e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -32,7 +32,6 @@ struct nvc0_instmem_priv {
32 struct nouveau_channel *bar1; 32 struct nouveau_channel *bar1;
33 struct nouveau_gpuobj *bar3_pgd; 33 struct nouveau_gpuobj *bar3_pgd;
34 struct nouveau_channel *bar3; 34 struct nouveau_channel *bar3;
35 struct nouveau_gpuobj *chan_pgd;
36}; 35};
37 36
38int 37int
@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev)
181 goto error; 180 goto error;
182 181
183 /* channel vm */ 182 /* channel vm */
184 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); 183 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
184 &dev_priv->chan_vm);
185 if (ret) 185 if (ret)
186 goto error; 186 goto error;
187 187
188 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
189 if (ret)
190 goto error;
191
192 nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
193 nouveau_vm_ref(NULL, &vm, NULL);
194
195 nvc0_instmem_resume(dev); 188 nvc0_instmem_resume(dev);
196 return 0; 189 return 0;
197error: 190error:
@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev)
211 nv_wr32(dev, 0x1704, 0x00000000); 204 nv_wr32(dev, 0x1704, 0x00000000);
212 nv_wr32(dev, 0x1714, 0x00000000); 205 nv_wr32(dev, 0x1714, 0x00000000);
213 206
214 nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); 207 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
215 nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
216 208
217 nvc0_channel_del(&priv->bar1); 209 nvc0_channel_del(&priv->bar1);
218 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); 210 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a179e6c55afb..9e352944a35a 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm)
105 struct drm_device *dev = vm->dev; 105 struct drm_device *dev = vm->dev;
106 struct nouveau_vm_pgd *vpgd; 106 struct nouveau_vm_pgd *vpgd;
107 unsigned long flags; 107 unsigned long flags;
108 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 108 u32 engine;
109
110 engine = 1;
111 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
112 engine |= 4;
109 113
110 pinstmem->flush(vm->dev); 114 pinstmem->flush(vm->dev);
111 115
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 67c6ec6f34ea..e45a24d84e98 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_mem **pmem) 61 u32 type, struct nouveau_mem **pmem)
62{ 62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 64 struct nouveau_mm *mm = dev_priv->engine.vram.mm;
65 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
66 struct nouveau_mm *mm = man->priv;
67 struct nouveau_mm_node *r; 65 struct nouveau_mm_node *r;
68 struct nouveau_mem *mem; 66 struct nouveau_mem *mem;
69 int ret; 67 int ret;
@@ -105,9 +103,15 @@ int
105nvc0_vram_init(struct drm_device *dev) 103nvc0_vram_init(struct drm_device *dev)
106{ 104{
107 struct drm_nouveau_private *dev_priv = dev->dev_private; 105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
107 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
108 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
109 u32 length;
108 110
109 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 111 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
110 dev_priv->vram_size *= nv_rd32(dev, 0x121c74); 112 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
111 dev_priv->vram_rblock_size = 4096; 113
112 return 0; 114 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
115
116 return nouveau_mm_init(&vram->mm, rsvd_head, length, 1);
113} 117}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9541995e4b21..c742944d3805 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -764,7 +764,7 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
764} 764}
765 765
766static void atombios_crtc_program_pll(struct drm_crtc *crtc, 766static void atombios_crtc_program_pll(struct drm_crtc *crtc,
767 int crtc_id, 767 u32 crtc_id,
768 int pll_id, 768 int pll_id,
769 u32 encoder_mode, 769 u32 encoder_mode,
770 u32 encoder_id, 770 u32 encoder_id,
@@ -851,8 +851,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
851 args.v5.ucPpll = pll_id; 851 args.v5.ucPpll = pll_id;
852 break; 852 break;
853 case 6: 853 case 6:
854 args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id; 854 args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
855 args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
856 args.v6.ucRefDiv = ref_div; 855 args.v6.ucRefDiv = ref_div;
857 args.v6.usFbDiv = cpu_to_le16(fb_div); 856 args.v6.usFbDiv = cpu_to_le16(fb_div);
858 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); 857 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8c0f9e36ff8e..645b84b3d203 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -627,6 +627,7 @@ struct radeon_dp_link_train_info {
627 u8 train_set[4]; 627 u8 train_set[4];
628 u8 link_status[DP_LINK_STATUS_SIZE]; 628 u8 link_status[DP_LINK_STATUS_SIZE];
629 u8 tries; 629 u8 tries;
630 bool use_dpencoder;
630}; 631};
631 632
632static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) 633static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
@@ -646,7 +647,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
646 int rtp = 0; 647 int rtp = 0;
647 648
648 /* set training pattern on the source */ 649 /* set training pattern on the source */
649 if (ASIC_IS_DCE4(dp_info->rdev)) { 650 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
650 switch (tp) { 651 switch (tp) {
651 case DP_TRAINING_PATTERN_1: 652 case DP_TRAINING_PATTERN_1:
652 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1; 653 rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
@@ -706,7 +707,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
706 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); 707 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
707 708
708 /* start training on the source */ 709 /* start training on the source */
709 if (ASIC_IS_DCE4(dp_info->rdev)) 710 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
710 atombios_dig_encoder_setup(dp_info->encoder, 711 atombios_dig_encoder_setup(dp_info->encoder,
711 ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0); 712 ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
712 else 713 else
@@ -731,7 +732,7 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info
731 DP_TRAINING_PATTERN_DISABLE); 732 DP_TRAINING_PATTERN_DISABLE);
732 733
733 /* disable the training pattern on the source */ 734 /* disable the training pattern on the source */
734 if (ASIC_IS_DCE4(dp_info->rdev)) 735 if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
735 atombios_dig_encoder_setup(dp_info->encoder, 736 atombios_dig_encoder_setup(dp_info->encoder,
736 ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0); 737 ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
737 else 738 else
@@ -869,7 +870,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
869 struct radeon_connector *radeon_connector; 870 struct radeon_connector *radeon_connector;
870 struct radeon_connector_atom_dig *dig_connector; 871 struct radeon_connector_atom_dig *dig_connector;
871 struct radeon_dp_link_train_info dp_info; 872 struct radeon_dp_link_train_info dp_info;
872 u8 tmp; 873 int index;
874 u8 tmp, frev, crev;
873 875
874 if (!radeon_encoder->enc_priv) 876 if (!radeon_encoder->enc_priv)
875 return; 877 return;
@@ -884,6 +886,18 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
884 (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) 886 (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
885 return; 887 return;
886 888
889 /* DPEncoderService newer than 1.1 can't program properly the
890 * training pattern. When facing such version use the
891 * DIGXEncoderControl (X== 1 | 2)
892 */
893 dp_info.use_dpencoder = true;
894 index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
895 if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
896 if (crev > 1) {
897 dp_info.use_dpencoder = false;
898 }
899 }
900
887 dp_info.enc_id = 0; 901 dp_info.enc_id = 0;
888 if (dig->dig_encoder) 902 if (dig->dig_encoder)
889 dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 903 dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15bd0477a3e8..14dce9f22172 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1382,9 +1382,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1382 1382
1383 /* set the wb address wether it's enabled or not */ 1383 /* set the wb address wether it's enabled or not */
1384 WREG32(CP_RB_RPTR_ADDR, 1384 WREG32(CP_RB_RPTR_ADDR,
1385#ifdef __BIG_ENDIAN
1386 RB_RPTR_SWAP(2) |
1387#endif
1388 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 1385 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1389 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1386 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1390 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1387 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2047,6 +2044,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2047 rdev->config.evergreen.tile_config |= 2044 rdev->config.evergreen.tile_config |=
2048 ((gb_addr_config & 0x30000000) >> 28) << 12; 2045 ((gb_addr_config & 0x30000000) >> 28) << 12;
2049 2046
2047 rdev->config.evergreen.backend_map = gb_backend_map;
2050 WREG32(GB_BACKEND_MAP, gb_backend_map); 2048 WREG32(GB_BACKEND_MAP, gb_backend_map);
2051 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2049 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2052 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2050 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2761,6 +2759,9 @@ int evergreen_irq_process(struct radeon_device *rdev)
2761 return IRQ_NONE; 2759 return IRQ_NONE;
2762 } 2760 }
2763restart_ih: 2761restart_ih:
2762 /* Order reading of wptr vs. reading of IH ring data */
2763 rmb();
2764
2764 /* display interrupts */ 2765 /* display interrupts */
2765 evergreen_irq_ack(rdev); 2766 evergreen_irq_ack(rdev);
2766 2767
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 23d36417158d..189e86522b5b 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -856,7 +856,6 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
856 case SQ_PGM_START_PS: 856 case SQ_PGM_START_PS:
857 case SQ_PGM_START_HS: 857 case SQ_PGM_START_HS:
858 case SQ_PGM_START_LS: 858 case SQ_PGM_START_LS:
859 case GDS_ADDR_BASE:
860 case SQ_CONST_MEM_BASE: 859 case SQ_CONST_MEM_BASE:
861 case SQ_ALU_CONST_CACHE_GS_0: 860 case SQ_ALU_CONST_CACHE_GS_0:
862 case SQ_ALU_CONST_CACHE_GS_1: 861 case SQ_ALU_CONST_CACHE_GS_1:
@@ -946,6 +945,34 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
946 } 945 }
947 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 946 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
948 break; 947 break;
948 case SX_MEMORY_EXPORT_BASE:
949 if (p->rdev->family >= CHIP_CAYMAN) {
950 dev_warn(p->dev, "bad SET_CONFIG_REG "
951 "0x%04X\n", reg);
952 return -EINVAL;
953 }
954 r = evergreen_cs_packet_next_reloc(p, &reloc);
955 if (r) {
956 dev_warn(p->dev, "bad SET_CONFIG_REG "
957 "0x%04X\n", reg);
958 return -EINVAL;
959 }
960 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
961 break;
962 case CAYMAN_SX_SCATTER_EXPORT_BASE:
963 if (p->rdev->family < CHIP_CAYMAN) {
964 dev_warn(p->dev, "bad SET_CONTEXT_REG "
965 "0x%04X\n", reg);
966 return -EINVAL;
967 }
968 r = evergreen_cs_packet_next_reloc(p, &reloc);
969 if (r) {
970 dev_warn(p->dev, "bad SET_CONTEXT_REG "
971 "0x%04X\n", reg);
972 return -EINVAL;
973 }
974 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
975 break;
949 default: 976 default:
950 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 977 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
951 return -EINVAL; 978 return -EINVAL;
@@ -1153,6 +1180,34 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1153 return r; 1180 return r;
1154 } 1181 }
1155 break; 1182 break;
1183 case PACKET3_DISPATCH_DIRECT:
1184 if (pkt->count != 3) {
1185 DRM_ERROR("bad DISPATCH_DIRECT\n");
1186 return -EINVAL;
1187 }
1188 r = evergreen_cs_track_check(p);
1189 if (r) {
1190 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1191 return r;
1192 }
1193 break;
1194 case PACKET3_DISPATCH_INDIRECT:
1195 if (pkt->count != 1) {
1196 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1197 return -EINVAL;
1198 }
1199 r = evergreen_cs_packet_next_reloc(p, &reloc);
1200 if (r) {
1201 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1202 return -EINVAL;
1203 }
1204 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1205 r = evergreen_cs_track_check(p);
1206 if (r) {
1207 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1208 return r;
1209 }
1210 break;
1156 case PACKET3_WAIT_REG_MEM: 1211 case PACKET3_WAIT_REG_MEM:
1157 if (pkt->count != 5) { 1212 if (pkt->count != 5) {
1158 DRM_ERROR("bad WAIT_REG_MEM\n"); 1213 DRM_ERROR("bad WAIT_REG_MEM\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b7b2714f0b32..7363d9dec909 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -351,6 +351,7 @@
351#define COLOR_BUFFER_SIZE(x) ((x) << 0) 351#define COLOR_BUFFER_SIZE(x) ((x) << 0)
352#define POSITION_BUFFER_SIZE(x) ((x) << 8) 352#define POSITION_BUFFER_SIZE(x) ((x) << 8)
353#define SMX_BUFFER_SIZE(x) ((x) << 16) 353#define SMX_BUFFER_SIZE(x) ((x) << 16)
354#define SX_MEMORY_EXPORT_BASE 0x9010
354#define SX_MISC 0x28350 355#define SX_MISC 0x28350
355 356
356#define CB_PERF_CTR0_SEL_0 0x9A20 357#define CB_PERF_CTR0_SEL_0 0x9A20
@@ -1122,6 +1123,7 @@
1122#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0 1123#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
1123#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0 1124#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
1124#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7 1125#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
1126#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358
1125/* cayman packet3 addition */ 1127/* cayman packet3 addition */
1126#define CAYMAN_PACKET3_DEALLOC_STATE 0x14 1128#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
1127 1129
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 559dbd412906..44c4750f4518 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -833,6 +833,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
833 rdev->config.cayman.tile_config |= 833 rdev->config.cayman.tile_config |=
834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
835 835
836 rdev->config.cayman.backend_map = gb_backend_map;
836 WREG32(GB_BACKEND_MAP, gb_backend_map); 837 WREG32(GB_BACKEND_MAP, gb_backend_map);
837 WREG32(GB_ADDR_CONFIG, gb_addr_config); 838 WREG32(GB_ADDR_CONFIG, gb_addr_config);
838 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 839 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bc54b26cb32f..aa5571b73aa0 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1662,6 +1662,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1662 R6XX_MAX_BACKENDS_MASK) >> 16)), 1662 R6XX_MAX_BACKENDS_MASK) >> 16)),
1663 (cc_rb_backend_disable >> 16)); 1663 (cc_rb_backend_disable >> 16));
1664 rdev->config.r600.tile_config = tiling_config; 1664 rdev->config.r600.tile_config = tiling_config;
1665 rdev->config.r600.backend_map = backend_map;
1665 tiling_config |= BACKEND_MAP(backend_map); 1666 tiling_config |= BACKEND_MAP(backend_map);
1666 WREG32(GB_TILING_CONFIG, tiling_config); 1667 WREG32(GB_TILING_CONFIG, tiling_config);
1667 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1668 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -2212,9 +2213,6 @@ int r600_cp_resume(struct radeon_device *rdev)
2212 2213
2213 /* set the wb address whether it's enabled or not */ 2214 /* set the wb address whether it's enabled or not */
2214 WREG32(CP_RB_RPTR_ADDR, 2215 WREG32(CP_RB_RPTR_ADDR,
2215#ifdef __BIG_ENDIAN
2216 RB_RPTR_SWAP(2) |
2217#endif
2218 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2216 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2219 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2217 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2220 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2218 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2994,10 +2992,6 @@ int r600_irq_init(struct radeon_device *rdev)
2994 /* RPTR_REARM only works if msi's are enabled */ 2992 /* RPTR_REARM only works if msi's are enabled */
2995 if (rdev->msi_enabled) 2993 if (rdev->msi_enabled)
2996 ih_cntl |= RPTR_REARM; 2994 ih_cntl |= RPTR_REARM;
2997
2998#ifdef __BIG_ENDIAN
2999 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
3000#endif
3001 WREG32(IH_CNTL, ih_cntl); 2995 WREG32(IH_CNTL, ih_cntl);
3002 2996
3003 /* force the active interrupt state to all disabled */ 2997 /* force the active interrupt state to all disabled */
@@ -3308,6 +3302,10 @@ int r600_irq_process(struct radeon_device *rdev)
3308 if (!rdev->ih.enabled || rdev->shutdown) 3302 if (!rdev->ih.enabled || rdev->shutdown)
3309 return IRQ_NONE; 3303 return IRQ_NONE;
3310 3304
3305 /* No MSIs, need a dummy read to flush PCI DMAs */
3306 if (!rdev->msi_enabled)
3307 RREG32(IH_RB_WPTR);
3308
3311 wptr = r600_get_ih_wptr(rdev); 3309 wptr = r600_get_ih_wptr(rdev);
3312 rptr = rdev->ih.rptr; 3310 rptr = rdev->ih.rptr;
3313 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3311 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
@@ -3320,6 +3318,9 @@ int r600_irq_process(struct radeon_device *rdev)
3320 } 3318 }
3321 3319
3322restart_ih: 3320restart_ih:
3321 /* Order reading of wptr vs. reading of IH ring data */
3322 rmb();
3323
3323 /* display interrupts */ 3324 /* display interrupts */
3324 r600_irq_ack(rdev); 3325 r600_irq_ack(rdev);
3325 3326
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c3ab959bdc7c..45fd592f9606 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1802,8 +1802,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1802 /* Set ring buffer size */ 1802 /* Set ring buffer size */
1803#ifdef __BIG_ENDIAN 1803#ifdef __BIG_ENDIAN
1804 RADEON_WRITE(R600_CP_RB_CNTL, 1804 RADEON_WRITE(R600_CP_RB_CNTL,
1805 RADEON_BUF_SWAP_32BIT | 1805 R600_BUF_SWAP_32BIT |
1806 RADEON_RB_NO_UPDATE | 1806 R600_RB_NO_UPDATE |
1807 (dev_priv->ring.rptr_update_l2qw << 8) | 1807 (dev_priv->ring.rptr_update_l2qw << 8) |
1808 dev_priv->ring.size_l2qw); 1808 dev_priv->ring.size_l2qw);
1809#else 1809#else
@@ -1820,15 +1820,15 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1820 1820
1821#ifdef __BIG_ENDIAN 1821#ifdef __BIG_ENDIAN
1822 RADEON_WRITE(R600_CP_RB_CNTL, 1822 RADEON_WRITE(R600_CP_RB_CNTL,
1823 RADEON_BUF_SWAP_32BIT | 1823 R600_BUF_SWAP_32BIT |
1824 RADEON_RB_NO_UPDATE | 1824 R600_RB_NO_UPDATE |
1825 RADEON_RB_RPTR_WR_ENA | 1825 R600_RB_RPTR_WR_ENA |
1826 (dev_priv->ring.rptr_update_l2qw << 8) | 1826 (dev_priv->ring.rptr_update_l2qw << 8) |
1827 dev_priv->ring.size_l2qw); 1827 dev_priv->ring.size_l2qw);
1828#else 1828#else
1829 RADEON_WRITE(R600_CP_RB_CNTL, 1829 RADEON_WRITE(R600_CP_RB_CNTL,
1830 RADEON_RB_NO_UPDATE | 1830 R600_RB_NO_UPDATE |
1831 RADEON_RB_RPTR_WR_ENA | 1831 R600_RB_RPTR_WR_ENA |
1832 (dev_priv->ring.rptr_update_l2qw << 8) | 1832 (dev_priv->ring.rptr_update_l2qw << 8) |
1833 dev_priv->ring.size_l2qw); 1833 dev_priv->ring.size_l2qw);
1834#endif 1834#endif
@@ -1851,13 +1851,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1851 - ((unsigned long) dev->sg->virtual) 1851 - ((unsigned long) dev->sg->virtual)
1852 + dev_priv->gart_vm_start; 1852 + dev_priv->gart_vm_start;
1853 } 1853 }
1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, 1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
1855#ifdef __BIG_ENDIAN 1855 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
1856 (2 << 0) |
1857#endif
1858 (rptr_addr & 0xfffffffc));
1859 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
1860 upper_32_bits(rptr_addr));
1861 1856
1862#ifdef __BIG_ENDIAN 1857#ifdef __BIG_ENDIAN
1863 RADEON_WRITE(R600_CP_RB_CNTL, 1858 RADEON_WRITE(R600_CP_RB_CNTL,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 909bda8dd550..db8ef1905d5f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1200,6 +1200,15 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
1200 } 1200 }
1201 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1201 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1202 break; 1202 break;
1203 case SX_MEMORY_EXPORT_BASE:
1204 r = r600_cs_packet_next_reloc(p, &reloc);
1205 if (r) {
1206 dev_warn(p->dev, "bad SET_CONFIG_REG "
1207 "0x%04X\n", reg);
1208 return -EINVAL;
1209 }
1210 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1211 break;
1203 default: 1212 default:
1204 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1213 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1205 return -EINVAL; 1214 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ef0e0e016914..ef37a9b5a3cc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1003,6 +1003,7 @@ struct r600_asic {
1003 unsigned tiling_npipes; 1003 unsigned tiling_npipes;
1004 unsigned tiling_group_size; 1004 unsigned tiling_group_size;
1005 unsigned tile_config; 1005 unsigned tile_config;
1006 unsigned backend_map;
1006 struct r100_gpu_lockup lockup; 1007 struct r100_gpu_lockup lockup;
1007}; 1008};
1008 1009
@@ -1028,6 +1029,7 @@ struct rv770_asic {
1028 unsigned tiling_npipes; 1029 unsigned tiling_npipes;
1029 unsigned tiling_group_size; 1030 unsigned tiling_group_size;
1030 unsigned tile_config; 1031 unsigned tile_config;
1032 unsigned backend_map;
1031 struct r100_gpu_lockup lockup; 1033 struct r100_gpu_lockup lockup;
1032}; 1034};
1033 1035
@@ -1054,6 +1056,7 @@ struct evergreen_asic {
1054 unsigned tiling_npipes; 1056 unsigned tiling_npipes;
1055 unsigned tiling_group_size; 1057 unsigned tiling_group_size;
1056 unsigned tile_config; 1058 unsigned tile_config;
1059 unsigned backend_map;
1057 struct r100_gpu_lockup lockup; 1060 struct r100_gpu_lockup lockup;
1058}; 1061};
1059 1062
@@ -1174,7 +1177,7 @@ struct radeon_device {
1174 /* Register mmio */ 1177 /* Register mmio */
1175 resource_size_t rmmio_base; 1178 resource_size_t rmmio_base;
1176 resource_size_t rmmio_size; 1179 resource_size_t rmmio_size;
1177 void *rmmio; 1180 void __iomem *rmmio;
1178 radeon_rreg_t mc_rreg; 1181 radeon_rreg_t mc_rreg;
1179 radeon_wreg_t mc_wreg; 1182 radeon_wreg_t mc_wreg;
1180 radeon_rreg_t pll_rreg; 1183 radeon_rreg_t pll_rreg;
@@ -1251,20 +1254,20 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1251static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 1254static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1252{ 1255{
1253 if (reg < rdev->rmmio_size) 1256 if (reg < rdev->rmmio_size)
1254 return readl(((void __iomem *)rdev->rmmio) + reg); 1257 return readl((rdev->rmmio) + reg);
1255 else { 1258 else {
1256 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 1259 writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
1257 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 1260 return readl((rdev->rmmio) + RADEON_MM_DATA);
1258 } 1261 }
1259} 1262}
1260 1263
1261static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1264static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1262{ 1265{
1263 if (reg < rdev->rmmio_size) 1266 if (reg < rdev->rmmio_size)
1264 writel(v, ((void __iomem *)rdev->rmmio) + reg); 1267 writel(v, (rdev->rmmio) + reg);
1265 else { 1268 else {
1266 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 1269 writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
1267 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 1270 writel(v, (rdev->rmmio) + RADEON_MM_DATA);
1268 } 1271 }
1269} 1272}
1270 1273
@@ -1296,10 +1299,10 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1296/* 1299/*
1297 * Registers read & write functions. 1300 * Registers read & write functions.
1298 */ 1301 */
1299#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) 1302#define RREG8(reg) readb((rdev->rmmio) + (reg))
1300#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) 1303#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1301#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) 1304#define RREG16(reg) readw((rdev->rmmio) + (reg))
1302#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) 1305#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
1303#define RREG32(reg) r100_mm_rreg(rdev, (reg)) 1306#define RREG32(reg) r100_mm_rreg(rdev, (reg))
1304#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1307#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
1305#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 1308#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b2449629537d..df8218bb83a6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -625,7 +625,7 @@ static struct radeon_asic r600_asic = {
625 .fence_ring_emit = &r600_fence_ring_emit, 625 .fence_ring_emit = &r600_fence_ring_emit,
626 .cs_parse = &r600_cs_parse, 626 .cs_parse = &r600_cs_parse,
627 .copy_blit = &r600_copy_blit, 627 .copy_blit = &r600_copy_blit,
628 .copy_dma = &r600_copy_blit, 628 .copy_dma = NULL,
629 .copy = &r600_copy_blit, 629 .copy = &r600_copy_blit,
630 .get_engine_clock = &radeon_atom_get_engine_clock, 630 .get_engine_clock = &radeon_atom_get_engine_clock,
631 .set_engine_clock = &radeon_atom_set_engine_clock, 631 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -672,7 +672,7 @@ static struct radeon_asic rs780_asic = {
672 .fence_ring_emit = &r600_fence_ring_emit, 672 .fence_ring_emit = &r600_fence_ring_emit,
673 .cs_parse = &r600_cs_parse, 673 .cs_parse = &r600_cs_parse,
674 .copy_blit = &r600_copy_blit, 674 .copy_blit = &r600_copy_blit,
675 .copy_dma = &r600_copy_blit, 675 .copy_dma = NULL,
676 .copy = &r600_copy_blit, 676 .copy = &r600_copy_blit,
677 .get_engine_clock = &radeon_atom_get_engine_clock, 677 .get_engine_clock = &radeon_atom_get_engine_clock,
678 .set_engine_clock = &radeon_atom_set_engine_clock, 678 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -719,7 +719,7 @@ static struct radeon_asic rv770_asic = {
719 .fence_ring_emit = &r600_fence_ring_emit, 719 .fence_ring_emit = &r600_fence_ring_emit,
720 .cs_parse = &r600_cs_parse, 720 .cs_parse = &r600_cs_parse,
721 .copy_blit = &r600_copy_blit, 721 .copy_blit = &r600_copy_blit,
722 .copy_dma = &r600_copy_blit, 722 .copy_dma = NULL,
723 .copy = &r600_copy_blit, 723 .copy = &r600_copy_blit,
724 .get_engine_clock = &radeon_atom_get_engine_clock, 724 .get_engine_clock = &radeon_atom_get_engine_clock,
725 .set_engine_clock = &radeon_atom_set_engine_clock, 725 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -766,7 +766,7 @@ static struct radeon_asic evergreen_asic = {
766 .fence_ring_emit = &r600_fence_ring_emit, 766 .fence_ring_emit = &r600_fence_ring_emit,
767 .cs_parse = &evergreen_cs_parse, 767 .cs_parse = &evergreen_cs_parse,
768 .copy_blit = &evergreen_copy_blit, 768 .copy_blit = &evergreen_copy_blit,
769 .copy_dma = &evergreen_copy_blit, 769 .copy_dma = NULL,
770 .copy = &evergreen_copy_blit, 770 .copy = &evergreen_copy_blit,
771 .get_engine_clock = &radeon_atom_get_engine_clock, 771 .get_engine_clock = &radeon_atom_get_engine_clock,
772 .set_engine_clock = &radeon_atom_set_engine_clock, 772 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -813,7 +813,7 @@ static struct radeon_asic sumo_asic = {
813 .fence_ring_emit = &r600_fence_ring_emit, 813 .fence_ring_emit = &r600_fence_ring_emit,
814 .cs_parse = &evergreen_cs_parse, 814 .cs_parse = &evergreen_cs_parse,
815 .copy_blit = &evergreen_copy_blit, 815 .copy_blit = &evergreen_copy_blit,
816 .copy_dma = &evergreen_copy_blit, 816 .copy_dma = NULL,
817 .copy = &evergreen_copy_blit, 817 .copy = &evergreen_copy_blit,
818 .get_engine_clock = &radeon_atom_get_engine_clock, 818 .get_engine_clock = &radeon_atom_get_engine_clock,
819 .set_engine_clock = &radeon_atom_set_engine_clock, 819 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -860,7 +860,7 @@ static struct radeon_asic btc_asic = {
860 .fence_ring_emit = &r600_fence_ring_emit, 860 .fence_ring_emit = &r600_fence_ring_emit,
861 .cs_parse = &evergreen_cs_parse, 861 .cs_parse = &evergreen_cs_parse,
862 .copy_blit = &evergreen_copy_blit, 862 .copy_blit = &evergreen_copy_blit,
863 .copy_dma = &evergreen_copy_blit, 863 .copy_dma = NULL,
864 .copy = &evergreen_copy_blit, 864 .copy = &evergreen_copy_blit,
865 .get_engine_clock = &radeon_atom_get_engine_clock, 865 .get_engine_clock = &radeon_atom_get_engine_clock,
866 .set_engine_clock = &radeon_atom_set_engine_clock, 866 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -907,7 +907,7 @@ static struct radeon_asic cayman_asic = {
907 .fence_ring_emit = &r600_fence_ring_emit, 907 .fence_ring_emit = &r600_fence_ring_emit,
908 .cs_parse = &evergreen_cs_parse, 908 .cs_parse = &evergreen_cs_parse,
909 .copy_blit = &evergreen_copy_blit, 909 .copy_blit = &evergreen_copy_blit,
910 .copy_dma = &evergreen_copy_blit, 910 .copy_dma = NULL,
911 .copy = &evergreen_copy_blit, 911 .copy = &evergreen_copy_blit,
912 .get_engine_clock = &radeon_atom_get_engine_clock, 912 .get_engine_clock = &radeon_atom_get_engine_clock,
913 .set_engine_clock = &radeon_atom_set_engine_clock, 913 .set_engine_clock = &radeon_atom_set_engine_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 2d48e7a1474b..dcd0863e31ae 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,7 +96,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
96 * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device 96 * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
97 * tree. Hopefully, ATI OF driver is kind enough to fill these 97 * tree. Hopefully, ATI OF driver is kind enough to fill these
98 */ 98 */
99static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) 99static bool radeon_read_clocks_OF(struct drm_device *dev)
100{ 100{
101 struct radeon_device *rdev = dev->dev_private; 101 struct radeon_device *rdev = dev->dev_private;
102 struct device_node *dp = rdev->pdev->dev.of_node; 102 struct device_node *dp = rdev->pdev->dev.of_node;
@@ -166,7 +166,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
166 return true; 166 return true;
167} 167}
168#else 168#else
169static bool __devinit radeon_read_clocks_OF(struct drm_device *dev) 169static bool radeon_read_clocks_OF(struct drm_device *dev)
170{ 170{
171 return false; 171 return false;
172} 172}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e4594676a07c..a74217cd192f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -779,7 +779,8 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
779 } 779 }
780 } 780 }
781 } 781 }
782 } else if (rdev->family >= CHIP_R200) { 782 } else if ((rdev->family == CHIP_R200) ||
783 (rdev->family >= CHIP_R300)) {
783 /* 0x68 */ 784 /* 0x68 */
784 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 785 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
785 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 786 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 75867792a4e2..045ec59478f9 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2115,7 +2115,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2115 2115
2116 if (drm_pci_device_is_agp(dev)) 2116 if (drm_pci_device_is_agp(dev))
2117 dev_priv->flags |= RADEON_IS_AGP; 2117 dev_priv->flags |= RADEON_IS_AGP;
2118 else if (drm_pci_device_is_pcie(dev)) 2118 else if (pci_is_pcie(dev->pdev))
2119 dev_priv->flags |= RADEON_IS_PCIE; 2119 dev_priv->flags |= RADEON_IS_PCIE;
2120 else 2120 else
2121 dev_priv->flags |= RADEON_IS_PCI; 2121 dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 292f73f0ddbd..28f4655905bc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
282 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 282 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
283 work = radeon_crtc->unpin_work; 283 work = radeon_crtc->unpin_work;
284 if (work == NULL || 284 if (work == NULL ||
285 !radeon_fence_signaled(work->fence)) { 285 (work->fence && !radeon_fence_signaled(work->fence))) {
286 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 286 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
287 return; 287 return;
288 } 288 }
@@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
348 struct radeon_framebuffer *new_radeon_fb; 348 struct radeon_framebuffer *new_radeon_fb;
349 struct drm_gem_object *obj; 349 struct drm_gem_object *obj;
350 struct radeon_bo *rbo; 350 struct radeon_bo *rbo;
351 struct radeon_fence *fence;
352 struct radeon_unpin_work *work; 351 struct radeon_unpin_work *work;
353 unsigned long flags; 352 unsigned long flags;
354 u32 tiling_flags, pitch_pixels; 353 u32 tiling_flags, pitch_pixels;
@@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
359 if (work == NULL) 358 if (work == NULL)
360 return -ENOMEM; 359 return -ENOMEM;
361 360
362 r = radeon_fence_create(rdev, &fence);
363 if (unlikely(r != 0)) {
364 kfree(work);
365 DRM_ERROR("flip queue: failed to create fence.\n");
366 return -ENOMEM;
367 }
368 work->event = event; 361 work->event = event;
369 work->rdev = rdev; 362 work->rdev = rdev;
370 work->crtc_id = radeon_crtc->crtc_id; 363 work->crtc_id = radeon_crtc->crtc_id;
371 work->fence = radeon_fence_ref(fence);
372 old_radeon_fb = to_radeon_framebuffer(crtc->fb); 364 old_radeon_fb = to_radeon_framebuffer(crtc->fb);
373 new_radeon_fb = to_radeon_framebuffer(fb); 365 new_radeon_fb = to_radeon_framebuffer(fb);
374 /* schedule unpin of the old buffer */ 366 /* schedule unpin of the old buffer */
@@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
377 drm_gem_object_reference(obj); 369 drm_gem_object_reference(obj);
378 rbo = gem_to_radeon_bo(obj); 370 rbo = gem_to_radeon_bo(obj);
379 work->old_rbo = rbo; 371 work->old_rbo = rbo;
372 obj = new_radeon_fb->obj;
373 rbo = gem_to_radeon_bo(obj);
374 if (rbo->tbo.sync_obj)
375 work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
380 INIT_WORK(&work->work, radeon_unpin_work_func); 376 INIT_WORK(&work->work, radeon_unpin_work_func);
381 377
382 /* We borrow the event spin lock for protecting unpin_work */ 378 /* We borrow the event spin lock for protecting unpin_work */
@@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
391 spin_unlock_irqrestore(&dev->event_lock, flags); 387 spin_unlock_irqrestore(&dev->event_lock, flags);
392 388
393 /* pin the new buffer */ 389 /* pin the new buffer */
394 obj = new_radeon_fb->obj;
395 rbo = gem_to_radeon_bo(obj);
396
397 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 390 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
398 work->old_rbo, rbo); 391 work->old_rbo, rbo);
399 392
@@ -461,37 +454,18 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
461 goto pflip_cleanup1; 454 goto pflip_cleanup1;
462 } 455 }
463 456
464 /* 32 ought to cover us */
465 r = radeon_ring_lock(rdev, 32);
466 if (r) {
467 DRM_ERROR("failed to lock the ring before flip\n");
468 goto pflip_cleanup2;
469 }
470
471 /* emit the fence */
472 radeon_fence_emit(rdev, fence);
473 /* set the proper interrupt */ 457 /* set the proper interrupt */
474 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); 458 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
475 /* fire the ring */
476 radeon_ring_unlock_commit(rdev);
477 459
478 return 0; 460 return 0;
479 461
480pflip_cleanup2:
481 drm_vblank_put(dev, radeon_crtc->crtc_id);
482
483pflip_cleanup1: 462pflip_cleanup1:
484 r = radeon_bo_reserve(rbo, false); 463 if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
485 if (unlikely(r != 0)) {
486 DRM_ERROR("failed to reserve new rbo in error path\n"); 464 DRM_ERROR("failed to reserve new rbo in error path\n");
487 goto pflip_cleanup; 465 goto pflip_cleanup;
488 } 466 }
489 r = radeon_bo_unpin(rbo); 467 if (unlikely(radeon_bo_unpin(rbo) != 0)) {
490 if (unlikely(r != 0)) {
491 radeon_bo_unreserve(rbo);
492 r = -EINVAL;
493 DRM_ERROR("failed to unpin new rbo in error path\n"); 468 DRM_ERROR("failed to unpin new rbo in error path\n");
494 goto pflip_cleanup;
495 } 469 }
496 radeon_bo_unreserve(rbo); 470 radeon_bo_unreserve(rbo);
497 471
@@ -501,7 +475,7 @@ pflip_cleanup:
501unlock_free: 475unlock_free:
502 drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 476 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
503 spin_unlock_irqrestore(&dev->event_lock, flags); 477 spin_unlock_irqrestore(&dev->event_lock, flags);
504 radeon_fence_unref(&fence); 478 radeon_fence_unref(&work->fence);
505 kfree(work); 479 kfree(work);
506 480
507 return r; 481 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 73dfbe8e5f9e..85f033f19a8a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -50,10 +50,11 @@
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
53 * 2.10.0 - fusion 2D tiling 53 * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker
54 * 2.11.0 - backend map
54 */ 55 */
55#define KMS_DRIVER_MAJOR 2 56#define KMS_DRIVER_MAJOR 2
56#define KMS_DRIVER_MINOR 10 57#define KMS_DRIVER_MINOR 11
57#define KMS_DRIVER_PATCHLEVEL 0 58#define KMS_DRIVER_PATCHLEVEL 0
58int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 59int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
59int radeon_driver_unload_kms(struct drm_device *dev); 60int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bd58af658581..be2c1224e68a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -60,7 +60,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
60 /* update BUS flag */ 60 /* update BUS flag */
61 if (drm_pci_device_is_agp(dev)) { 61 if (drm_pci_device_is_agp(dev)) {
62 flags |= RADEON_IS_AGP; 62 flags |= RADEON_IS_AGP;
63 } else if (drm_pci_device_is_pcie(dev)) { 63 } else if (pci_is_pcie(dev->pdev)) {
64 flags |= RADEON_IS_PCIE; 64 flags |= RADEON_IS_PCIE;
65 } else { 65 } else {
66 flags |= RADEON_IS_PCI; 66 flags |= RADEON_IS_PCI;
@@ -237,6 +237,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
237 case RADEON_INFO_FUSION_GART_WORKING: 237 case RADEON_INFO_FUSION_GART_WORKING:
238 value = 1; 238 value = 1;
239 break; 239 break;
240 case RADEON_INFO_BACKEND_MAP:
241 if (rdev->family >= CHIP_CAYMAN)
242 value = rdev->config.cayman.backend_map;
243 else if (rdev->family >= CHIP_CEDAR)
244 value = rdev->config.evergreen.backend_map;
245 else if (rdev->family >= CHIP_RV770)
246 value = rdev->config.rv770.backend_map;
247 else if (rdev->family >= CHIP_R600)
248 value = rdev->config.r600.backend_map;
249 else {
250 return -EINVAL;
251 }
252 break;
240 default: 253 default:
241 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 254 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
242 return -EINVAL; 255 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aaa19dc418a0..6fabe89fa6a1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -594,6 +594,9 @@ int radeon_pm_init(struct radeon_device *rdev)
594 if (rdev->pm.default_vddc) 594 if (rdev->pm.default_vddc)
595 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 595 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
596 SET_VOLTAGE_TYPE_ASIC_VDDC); 596 SET_VOLTAGE_TYPE_ASIC_VDDC);
597 if (rdev->pm.default_vddci)
598 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
599 SET_VOLTAGE_TYPE_ASIC_VDDCI);
597 if (rdev->pm.default_sclk) 600 if (rdev->pm.default_sclk)
598 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 601 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
599 if (rdev->pm.default_mclk) 602 if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index bc44a3d35ec6..b4ce86455707 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3295,7 +3295,7 @@
3295# define RADEON_RB_BUFSZ_MASK (0x3f << 0) 3295# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
3296# define RADEON_RB_BLKSZ_SHIFT 8 3296# define RADEON_RB_BLKSZ_SHIFT 8
3297# define RADEON_RB_BLKSZ_MASK (0x3f << 8) 3297# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
3298# define RADEON_BUF_SWAP_32BIT (1 << 17) 3298# define RADEON_BUF_SWAP_32BIT (2 << 16)
3299# define RADEON_MAX_FETCH_SHIFT 18 3299# define RADEON_MAX_FETCH_SHIFT 18
3300# define RADEON_MAX_FETCH_MASK (0x3 << 18) 3300# define RADEON_MAX_FETCH_MASK (0x3 << 18)
3301# define RADEON_RB_NO_UPDATE (1 << 27) 3301# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0aa8e85a9457..2316977eb924 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -208,6 +208,7 @@ cayman 0x9400
2080x0002834C PA_SC_VPORT_ZMAX_15 2080x0002834C PA_SC_VPORT_ZMAX_15
2090x00028350 SX_MISC 2090x00028350 SX_MISC
2100x00028354 SX_SURFACE_SYNC 2100x00028354 SX_SURFACE_SYNC
2110x0002835C SX_SCATTER_EXPORT_SIZE
2110x00028380 SQ_VTX_SEMANTIC_0 2120x00028380 SQ_VTX_SEMANTIC_0
2120x00028384 SQ_VTX_SEMANTIC_1 2130x00028384 SQ_VTX_SEMANTIC_1
2130x00028388 SQ_VTX_SEMANTIC_2 2140x00028388 SQ_VTX_SEMANTIC_2
@@ -432,6 +433,7 @@ cayman 0x9400
4320x00028700 SPI_STACK_MGMT 4330x00028700 SPI_STACK_MGMT
4330x00028704 SPI_WAVE_MGMT_1 4340x00028704 SPI_WAVE_MGMT_1
4340x00028708 SPI_WAVE_MGMT_2 4350x00028708 SPI_WAVE_MGMT_2
4360x00028720 GDS_ADDR_BASE
4350x00028724 GDS_ADDR_SIZE 4370x00028724 GDS_ADDR_SIZE
4360x00028780 CB_BLEND0_CONTROL 4380x00028780 CB_BLEND0_CONTROL
4370x00028784 CB_BLEND1_CONTROL 4390x00028784 CB_BLEND1_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 0e28cae7ea43..161737a28c23 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -44,6 +44,7 @@ evergreen 0x9400
440x00008E28 SQ_STATIC_THREAD_MGMT_3 440x00008E28 SQ_STATIC_THREAD_MGMT_3
450x00008E2C SQ_LDS_RESOURCE_MGMT 450x00008E2C SQ_LDS_RESOURCE_MGMT
460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
470x00009014 SX_MEMORY_EXPORT_SIZE
470x00009100 SPI_CONFIG_CNTL 480x00009100 SPI_CONFIG_CNTL
480x0000913C SPI_CONFIG_CNTL_1 490x0000913C SPI_CONFIG_CNTL_1
490x00009508 TA_CNTL_AUX 500x00009508 TA_CNTL_AUX
@@ -442,7 +443,9 @@ evergreen 0x9400
4420x000286EC SPI_COMPUTE_NUM_THREAD_X 4430x000286EC SPI_COMPUTE_NUM_THREAD_X
4430x000286F0 SPI_COMPUTE_NUM_THREAD_Y 4440x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4440x000286F4 SPI_COMPUTE_NUM_THREAD_Z 4450x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4460x00028720 GDS_ADDR_BASE
4450x00028724 GDS_ADDR_SIZE 4470x00028724 GDS_ADDR_SIZE
4480x00028728 GDS_ORDERED_WAVE_PER_SE
4460x00028780 CB_BLEND0_CONTROL 4490x00028780 CB_BLEND0_CONTROL
4470x00028784 CB_BLEND1_CONTROL 4500x00028784 CB_BLEND1_CONTROL
4480x00028788 CB_BLEND2_CONTROL 4510x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index ea49752ee99c..0380c5c15f80 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -429,6 +429,7 @@ r600 0x9400
4290x00028438 SX_ALPHA_REF 4290x00028438 SX_ALPHA_REF
4300x00028410 SX_ALPHA_TEST_CONTROL 4300x00028410 SX_ALPHA_TEST_CONTROL
4310x00028350 SX_MISC 4310x00028350 SX_MISC
4320x00009014 SX_MEMORY_EXPORT_SIZE
4320x00009604 TC_INVALIDATE 4330x00009604 TC_INVALIDATE
4330x00009400 TD_FILTER4 4340x00009400 TD_FILTER4
4340x00009404 TD_FILTER4_1 4350x00009404 TD_FILTER4_1
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 1f5850e473cc..4b5d0e6974a8 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -530,7 +530,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
530 addr = addr & 0xFFFFFFFFFFFFF000ULL; 530 addr = addr & 0xFFFFFFFFFFFFF000ULL;
531 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 531 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
532 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 532 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
533 writeq(addr, ((void __iomem *)ptr) + (i * 8)); 533 writeq(addr, ptr + (i * 8));
534 return 0; 534 return 0;
535} 535}
536 536
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4de51891aa6d..4720d000d440 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -778,6 +778,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
778 (cc_rb_backend_disable >> 16)); 778 (cc_rb_backend_disable >> 16));
779 779
780 rdev->config.rv770.tile_config = gb_tiling_config; 780 rdev->config.rv770.tile_config = gb_tiling_config;
781 rdev->config.rv770.backend_map = backend_map;
781 gb_tiling_config |= BACKEND_MAP(backend_map); 782 gb_tiling_config |= BACKEND_MAP(backend_map);
782 783
783 WREG32(GB_TILING_CONFIG, gb_tiling_config); 784 WREG32(GB_TILING_CONFIG, gb_tiling_config);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index d948575717bf..170e751c283e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -355,7 +355,7 @@ restart:
355 if (nr_free) 355 if (nr_free)
356 goto restart; 356 goto restart;
357 357
358 /* Not allowed to fall tough or break because 358 /* Not allowed to fall through or break because
359 * following context is inside spinlock while we are 359 * following context is inside spinlock while we are
360 * outside here. 360 * outside here.
361 */ 361 */
@@ -556,7 +556,7 @@ out:
556} 556}
557 557
558/** 558/**
559 * Fill the given pool if there isn't enough pages and requested number of 559 * Fill the given pool if there aren't enough pages and the requested number of
560 * pages is small. 560 * pages is small.
561 */ 561 */
562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, 562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
576 576
577 pool->fill_lock = true; 577 pool->fill_lock = true;
578 578
579 /* If allocation request is small and there is not enough 579 /* If allocation request is small and there are not enough
580 * pages in pool we fill the pool first */ 580 * pages in a pool we fill the pool up first. */
581 if (count < _manager->options.small 581 if (count < _manager->options.small
582 && count > pool->npages) { 582 && count > pool->npages) {
583 struct list_head new_pages; 583 struct list_head new_pages;
@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
614} 614}
615 615
616/** 616/**
617 * Cut count nubmer of pages from the pool and put them to return list 617 * Cut 'count' number of pages from the pool and put them on the return list.
618 * 618 *
619 * @return count of pages still to allocate to fill the request. 619 * @return count of pages still required to fulfill the request.
620 */ 620 */
621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, 621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622 struct list_head *pages, int ttm_flags, 622 struct list_head *pages, int ttm_flags,
@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
637 goto out; 637 goto out;
638 } 638 }
639 /* find the last pages to include for requested number of pages. Split 639 /* find the last pages to include for requested number of pages. Split
640 * pool to begin and halves to reduce search space. */ 640 * pool to begin and halve it to reduce search space. */
641 if (count <= pool->npages/2) { 641 if (count <= pool->npages/2) {
642 i = 0; 642 i = 0;
643 list_for_each(p, &pool->list) { 643 list_for_each(p, &pool->list) {
@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
651 break; 651 break;
652 } 652 }
653 } 653 }
654 /* Cut count number of pages from pool */ 654 /* Cut 'count' number of pages from the pool */
655 list_cut_position(pages, &pool->list, p); 655 list_cut_position(pages, &pool->list, p);
656 pool->npages -= count; 656 pool->npages -= count;
657 count = 0; 657 count = 0;