diff options
Diffstat (limited to 'drivers/gpu/drm')
59 files changed, 4876 insertions, 1268 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e4d971c8b9d0..f831ea159291 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -102,6 +102,7 @@ config DRM_I915 | |||
102 | select BACKLIGHT_CLASS_DEVICE if ACPI | 102 | select BACKLIGHT_CLASS_DEVICE if ACPI |
103 | select INPUT if ACPI | 103 | select INPUT if ACPI |
104 | select ACPI_VIDEO if ACPI | 104 | select ACPI_VIDEO if ACPI |
105 | select ACPI_BUTTON if ACPI | ||
105 | help | 106 | help |
106 | Choose this option if you have a system that has Intel 830M, 845G, | 107 | Choose this option if you have a system that has Intel 830M, 845G, |
107 | 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the | 108 | 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ba728ad77f2a..8e7b0ebece0c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -482,6 +482,7 @@ void drm_connector_cleanup(struct drm_connector *connector) | |||
482 | list_for_each_entry_safe(mode, t, &connector->user_modes, head) | 482 | list_for_each_entry_safe(mode, t, &connector->user_modes, head) |
483 | drm_mode_remove(connector, mode); | 483 | drm_mode_remove(connector, mode); |
484 | 484 | ||
485 | kfree(connector->fb_helper_private); | ||
485 | mutex_lock(&dev->mode_config.mutex); | 486 | mutex_lock(&dev->mode_config.mutex); |
486 | drm_mode_object_put(dev, &connector->base); | 487 | drm_mode_object_put(dev, &connector->base); |
487 | list_del(&connector->head); | 488 | list_del(&connector->head); |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index fe8697447f32..1fe4e1d344fd 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm_crtc.h" | 33 | #include "drm_crtc.h" |
34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
35 | #include "drm_fb_helper.h" | ||
35 | 36 | ||
36 | static void drm_mode_validate_flag(struct drm_connector *connector, | 37 | static void drm_mode_validate_flag(struct drm_connector *connector, |
37 | int flags) | 38 | int flags) |
@@ -90,7 +91,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
90 | list_for_each_entry_safe(mode, t, &connector->modes, head) | 91 | list_for_each_entry_safe(mode, t, &connector->modes, head) |
91 | mode->status = MODE_UNVERIFIED; | 92 | mode->status = MODE_UNVERIFIED; |
92 | 93 | ||
93 | connector->status = connector->funcs->detect(connector); | 94 | if (connector->force) { |
95 | if (connector->force == DRM_FORCE_ON) | ||
96 | connector->status = connector_status_connected; | ||
97 | else | ||
98 | connector->status = connector_status_disconnected; | ||
99 | if (connector->funcs->force) | ||
100 | connector->funcs->force(connector); | ||
101 | } else | ||
102 | connector->status = connector->funcs->detect(connector); | ||
94 | 103 | ||
95 | if (connector->status == connector_status_disconnected) { | 104 | if (connector->status == connector_status_disconnected) { |
96 | DRM_DEBUG_KMS("%s is disconnected\n", | 105 | DRM_DEBUG_KMS("%s is disconnected\n", |
@@ -267,6 +276,65 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *con | |||
267 | return NULL; | 276 | return NULL; |
268 | } | 277 | } |
269 | 278 | ||
279 | static bool drm_has_cmdline_mode(struct drm_connector *connector) | ||
280 | { | ||
281 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | ||
282 | struct drm_fb_helper_cmdline_mode *cmdline_mode; | ||
283 | |||
284 | if (!fb_help_conn) | ||
285 | return false; | ||
286 | |||
287 | cmdline_mode = &fb_help_conn->cmdline_mode; | ||
288 | return cmdline_mode->specified; | ||
289 | } | ||
290 | |||
291 | static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height) | ||
292 | { | ||
293 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | ||
294 | struct drm_fb_helper_cmdline_mode *cmdline_mode; | ||
295 | struct drm_display_mode *mode = NULL; | ||
296 | |||
297 | if (!fb_help_conn) | ||
298 | return mode; | ||
299 | |||
300 | cmdline_mode = &fb_help_conn->cmdline_mode; | ||
301 | if (cmdline_mode->specified == false) | ||
302 | return mode; | ||
303 | |||
304 | /* attempt to find a matching mode in the list of modes | ||
305 | * we have gotten so far, if not add a CVT mode that conforms | ||
306 | */ | ||
307 | if (cmdline_mode->rb || cmdline_mode->margins) | ||
308 | goto create_mode; | ||
309 | |||
310 | list_for_each_entry(mode, &connector->modes, head) { | ||
311 | /* check width/height */ | ||
312 | if (mode->hdisplay != cmdline_mode->xres || | ||
313 | mode->vdisplay != cmdline_mode->yres) | ||
314 | continue; | ||
315 | |||
316 | if (cmdline_mode->refresh_specified) { | ||
317 | if (mode->vrefresh != cmdline_mode->refresh) | ||
318 | continue; | ||
319 | } | ||
320 | |||
321 | if (cmdline_mode->interlace) { | ||
322 | if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) | ||
323 | continue; | ||
324 | } | ||
325 | return mode; | ||
326 | } | ||
327 | |||
328 | create_mode: | ||
329 | mode = drm_cvt_mode(connector->dev, cmdline_mode->xres, | ||
330 | cmdline_mode->yres, | ||
331 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | ||
332 | cmdline_mode->rb, cmdline_mode->interlace, | ||
333 | cmdline_mode->margins); | ||
334 | list_add(&mode->head, &connector->modes); | ||
335 | return mode; | ||
336 | } | ||
337 | |||
270 | static bool drm_connector_enabled(struct drm_connector *connector, bool strict) | 338 | static bool drm_connector_enabled(struct drm_connector *connector, bool strict) |
271 | { | 339 | { |
272 | bool enable; | 340 | bool enable; |
@@ -317,10 +385,16 @@ static bool drm_target_preferred(struct drm_device *dev, | |||
317 | continue; | 385 | continue; |
318 | } | 386 | } |
319 | 387 | ||
320 | DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", | 388 | DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", |
321 | connector->base.id); | 389 | connector->base.id); |
322 | 390 | ||
323 | modes[i] = drm_has_preferred_mode(connector, width, height); | 391 | /* got for command line mode first */ |
392 | modes[i] = drm_pick_cmdline_mode(connector, width, height); | ||
393 | if (!modes[i]) { | ||
394 | DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", | ||
395 | connector->base.id); | ||
396 | modes[i] = drm_has_preferred_mode(connector, width, height); | ||
397 | } | ||
324 | /* No preferred modes, pick one off the list */ | 398 | /* No preferred modes, pick one off the list */ |
325 | if (!modes[i] && !list_empty(&connector->modes)) { | 399 | if (!modes[i] && !list_empty(&connector->modes)) { |
326 | list_for_each_entry(modes[i], &connector->modes, head) | 400 | list_for_each_entry(modes[i], &connector->modes, head) |
@@ -369,6 +443,8 @@ static int drm_pick_crtcs(struct drm_device *dev, | |||
369 | my_score = 1; | 443 | my_score = 1; |
370 | if (connector->status == connector_status_connected) | 444 | if (connector->status == connector_status_connected) |
371 | my_score++; | 445 | my_score++; |
446 | if (drm_has_cmdline_mode(connector)) | ||
447 | my_score++; | ||
372 | if (drm_has_preferred_mode(connector, width, height)) | 448 | if (drm_has_preferred_mode(connector, width, height)) |
373 | my_score++; | 449 | my_score++; |
374 | 450 | ||
@@ -943,6 +1019,8 @@ bool drm_helper_initial_config(struct drm_device *dev) | |||
943 | { | 1019 | { |
944 | int count = 0; | 1020 | int count = 0; |
945 | 1021 | ||
1022 | drm_fb_helper_parse_command_line(dev); | ||
1023 | |||
946 | count = drm_helper_probe_connector_modes(dev, | 1024 | count = drm_helper_probe_connector_modes(dev, |
947 | dev->mode_config.max_width, | 1025 | dev->mode_config.max_width, |
948 | dev->mode_config.max_height); | 1026 | dev->mode_config.max_height); |
@@ -950,7 +1028,7 @@ bool drm_helper_initial_config(struct drm_device *dev) | |||
950 | /* | 1028 | /* |
951 | * we shouldn't end up with no modes here. | 1029 | * we shouldn't end up with no modes here. |
952 | */ | 1030 | */ |
953 | WARN(!count, "Connected connector with 0 modes\n"); | 1031 | WARN(!count, "No connectors reported connected with modes\n"); |
954 | 1032 | ||
955 | drm_setup_crtcs(dev); | 1033 | drm_setup_crtcs(dev); |
956 | 1034 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 90d76bacff17..3c0d2b3aed76 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -109,7 +109,9 @@ static struct edid_quirk { | |||
109 | 109 | ||
110 | 110 | ||
111 | /* Valid EDID header has these bytes */ | 111 | /* Valid EDID header has these bytes */ |
112 | static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; | 112 | static const u8 edid_header[] = { |
113 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 | ||
114 | }; | ||
113 | 115 | ||
114 | /** | 116 | /** |
115 | * edid_is_valid - sanity check EDID data | 117 | * edid_is_valid - sanity check EDID data |
@@ -500,6 +502,19 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | |||
500 | } | 502 | } |
501 | return mode; | 503 | return mode; |
502 | } | 504 | } |
505 | |||
506 | /* | ||
507 | * 0 is reserved. The spec says 0x01 fill for unused timings. Some old | ||
508 | * monitors fill with ascii space (0x20) instead. | ||
509 | */ | ||
510 | static int | ||
511 | bad_std_timing(u8 a, u8 b) | ||
512 | { | ||
513 | return (a == 0x00 && b == 0x00) || | ||
514 | (a == 0x01 && b == 0x01) || | ||
515 | (a == 0x20 && b == 0x20); | ||
516 | } | ||
517 | |||
503 | /** | 518 | /** |
504 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode | 519 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode |
505 | * @t: standard timing params | 520 | * @t: standard timing params |
@@ -513,6 +528,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | |||
513 | */ | 528 | */ |
514 | struct drm_display_mode *drm_mode_std(struct drm_device *dev, | 529 | struct drm_display_mode *drm_mode_std(struct drm_device *dev, |
515 | struct std_timing *t, | 530 | struct std_timing *t, |
531 | int revision, | ||
516 | int timing_level) | 532 | int timing_level) |
517 | { | 533 | { |
518 | struct drm_display_mode *mode; | 534 | struct drm_display_mode *mode; |
@@ -523,14 +539,20 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
523 | unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) | 539 | unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) |
524 | >> EDID_TIMING_VFREQ_SHIFT; | 540 | >> EDID_TIMING_VFREQ_SHIFT; |
525 | 541 | ||
542 | if (bad_std_timing(t->hsize, t->vfreq_aspect)) | ||
543 | return NULL; | ||
544 | |||
526 | /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ | 545 | /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ |
527 | hsize = t->hsize * 8 + 248; | 546 | hsize = t->hsize * 8 + 248; |
528 | /* vrefresh_rate = vfreq + 60 */ | 547 | /* vrefresh_rate = vfreq + 60 */ |
529 | vrefresh_rate = vfreq + 60; | 548 | vrefresh_rate = vfreq + 60; |
530 | /* the vdisplay is calculated based on the aspect ratio */ | 549 | /* the vdisplay is calculated based on the aspect ratio */ |
531 | if (aspect_ratio == 0) | 550 | if (aspect_ratio == 0) { |
532 | vsize = (hsize * 10) / 16; | 551 | if (revision < 3) |
533 | else if (aspect_ratio == 1) | 552 | vsize = hsize; |
553 | else | ||
554 | vsize = (hsize * 10) / 16; | ||
555 | } else if (aspect_ratio == 1) | ||
534 | vsize = (hsize * 3) / 4; | 556 | vsize = (hsize * 3) / 4; |
535 | else if (aspect_ratio == 2) | 557 | else if (aspect_ratio == 2) |
536 | vsize = (hsize * 4) / 5; | 558 | vsize = (hsize * 4) / 5; |
@@ -538,7 +560,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
538 | vsize = (hsize * 9) / 16; | 560 | vsize = (hsize * 9) / 16; |
539 | /* HDTV hack */ | 561 | /* HDTV hack */ |
540 | if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { | 562 | if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { |
541 | mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); | 563 | mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, |
564 | false); | ||
542 | mode->hdisplay = 1366; | 565 | mode->hdisplay = 1366; |
543 | mode->vsync_start = mode->vsync_start - 1; | 566 | mode->vsync_start = mode->vsync_start - 1; |
544 | mode->vsync_end = mode->vsync_end - 1; | 567 | mode->vsync_end = mode->vsync_end - 1; |
@@ -557,7 +580,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
557 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); | 580 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
558 | break; | 581 | break; |
559 | case LEVEL_CVT: | 582 | case LEVEL_CVT: |
560 | mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); | 583 | mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, |
584 | false); | ||
561 | break; | 585 | break; |
562 | } | 586 | } |
563 | return mode; | 587 | return mode; |
@@ -779,7 +803,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
779 | continue; | 803 | continue; |
780 | 804 | ||
781 | newmode = drm_mode_std(dev, &edid->standard_timings[i], | 805 | newmode = drm_mode_std(dev, &edid->standard_timings[i], |
782 | timing_level); | 806 | edid->revision, timing_level); |
783 | if (newmode) { | 807 | if (newmode) { |
784 | drm_mode_probed_add(connector, newmode); | 808 | drm_mode_probed_add(connector, newmode); |
785 | modes++; | 809 | modes++; |
@@ -829,13 +853,13 @@ static int add_detailed_info(struct drm_connector *connector, | |||
829 | case EDID_DETAIL_MONITOR_CPDATA: | 853 | case EDID_DETAIL_MONITOR_CPDATA: |
830 | break; | 854 | break; |
831 | case EDID_DETAIL_STD_MODES: | 855 | case EDID_DETAIL_STD_MODES: |
832 | /* Five modes per detailed section */ | 856 | for (j = 0; j < 6; i++) { |
833 | for (j = 0; j < 5; i++) { | ||
834 | struct std_timing *std; | 857 | struct std_timing *std; |
835 | struct drm_display_mode *newmode; | 858 | struct drm_display_mode *newmode; |
836 | 859 | ||
837 | std = &data->data.timings[j]; | 860 | std = &data->data.timings[j]; |
838 | newmode = drm_mode_std(dev, std, | 861 | newmode = drm_mode_std(dev, std, |
862 | edid->revision, | ||
839 | timing_level); | 863 | timing_level); |
840 | if (newmode) { | 864 | if (newmode) { |
841 | drm_mode_probed_add(connector, newmode); | 865 | drm_mode_probed_add(connector, newmode); |
@@ -964,7 +988,9 @@ static int add_detailed_info_eedid(struct drm_connector *connector, | |||
964 | struct drm_display_mode *newmode; | 988 | struct drm_display_mode *newmode; |
965 | 989 | ||
966 | std = &data->data.timings[j]; | 990 | std = &data->data.timings[j]; |
967 | newmode = drm_mode_std(dev, std, timing_level); | 991 | newmode = drm_mode_std(dev, std, |
992 | edid->revision, | ||
993 | timing_level); | ||
968 | if (newmode) { | 994 | if (newmode) { |
969 | drm_mode_probed_add(connector, newmode); | 995 | drm_mode_probed_add(connector, newmode); |
970 | modes++; | 996 | modes++; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 2c4671314884..819ddcbfcce5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -40,6 +40,199 @@ MODULE_LICENSE("GPL and additional rights"); | |||
40 | 40 | ||
41 | static LIST_HEAD(kernel_fb_helper_list); | 41 | static LIST_HEAD(kernel_fb_helper_list); |
42 | 42 | ||
43 | int drm_fb_helper_add_connector(struct drm_connector *connector) | ||
44 | { | ||
45 | connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); | ||
46 | if (!connector->fb_helper_private) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | EXPORT_SYMBOL(drm_fb_helper_add_connector); | ||
52 | |||
53 | static int my_atoi(const char *name) | ||
54 | { | ||
55 | int val = 0; | ||
56 | |||
57 | for (;; name++) { | ||
58 | switch (*name) { | ||
59 | case '0' ... '9': | ||
60 | val = 10*val+(*name-'0'); | ||
61 | break; | ||
62 | default: | ||
63 | return val; | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * drm_fb_helper_connector_parse_command_line - parse command line for connector | ||
70 | * @connector - connector to parse line for | ||
71 | * @mode_option - per connector mode option | ||
72 | * | ||
73 | * This parses the connector specific then generic command lines for | ||
74 | * modes and options to configure the connector. | ||
75 | * | ||
76 | * This uses the same parameters as the fb modedb.c, except for extra | ||
77 | * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] | ||
78 | * | ||
79 | * enable/enable Digital/disable bit at the end | ||
80 | */ | ||
81 | static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector, | ||
82 | const char *mode_option) | ||
83 | { | ||
84 | const char *name; | ||
85 | unsigned int namelen; | ||
86 | int res_specified = 0, bpp_specified = 0, refresh_specified = 0; | ||
87 | unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; | ||
88 | int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; | ||
89 | int i; | ||
90 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; | ||
91 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | ||
92 | struct drm_fb_helper_cmdline_mode *cmdline_mode; | ||
93 | |||
94 | if (!fb_help_conn) | ||
95 | return false; | ||
96 | |||
97 | cmdline_mode = &fb_help_conn->cmdline_mode; | ||
98 | if (!mode_option) | ||
99 | mode_option = fb_mode_option; | ||
100 | |||
101 | if (!mode_option) { | ||
102 | cmdline_mode->specified = false; | ||
103 | return false; | ||
104 | } | ||
105 | |||
106 | name = mode_option; | ||
107 | namelen = strlen(name); | ||
108 | for (i = namelen-1; i >= 0; i--) { | ||
109 | switch (name[i]) { | ||
110 | case '@': | ||
111 | namelen = i; | ||
112 | if (!refresh_specified && !bpp_specified && | ||
113 | !yres_specified) { | ||
114 | refresh = my_atoi(&name[i+1]); | ||
115 | refresh_specified = 1; | ||
116 | if (cvt || rb) | ||
117 | cvt = 0; | ||
118 | } else | ||
119 | goto done; | ||
120 | break; | ||
121 | case '-': | ||
122 | namelen = i; | ||
123 | if (!bpp_specified && !yres_specified) { | ||
124 | bpp = my_atoi(&name[i+1]); | ||
125 | bpp_specified = 1; | ||
126 | if (cvt || rb) | ||
127 | cvt = 0; | ||
128 | } else | ||
129 | goto done; | ||
130 | break; | ||
131 | case 'x': | ||
132 | if (!yres_specified) { | ||
133 | yres = my_atoi(&name[i+1]); | ||
134 | yres_specified = 1; | ||
135 | } else | ||
136 | goto done; | ||
137 | case '0' ... '9': | ||
138 | break; | ||
139 | case 'M': | ||
140 | if (!yres_specified) | ||
141 | cvt = 1; | ||
142 | break; | ||
143 | case 'R': | ||
144 | if (!cvt) | ||
145 | rb = 1; | ||
146 | break; | ||
147 | case 'm': | ||
148 | if (!cvt) | ||
149 | margins = 1; | ||
150 | break; | ||
151 | case 'i': | ||
152 | if (!cvt) | ||
153 | interlace = 1; | ||
154 | break; | ||
155 | case 'e': | ||
156 | force = DRM_FORCE_ON; | ||
157 | break; | ||
158 | case 'D': | ||
159 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) || | ||
160 | (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) | ||
161 | force = DRM_FORCE_ON; | ||
162 | else | ||
163 | force = DRM_FORCE_ON_DIGITAL; | ||
164 | break; | ||
165 | case 'd': | ||
166 | force = DRM_FORCE_OFF; | ||
167 | break; | ||
168 | default: | ||
169 | goto done; | ||
170 | } | ||
171 | } | ||
172 | if (i < 0 && yres_specified) { | ||
173 | xres = my_atoi(name); | ||
174 | res_specified = 1; | ||
175 | } | ||
176 | done: | ||
177 | |||
178 | DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", | ||
179 | drm_get_connector_name(connector), xres, yres, | ||
180 | (refresh) ? refresh : 60, (rb) ? " reduced blanking" : | ||
181 | "", (margins) ? " with margins" : "", (interlace) ? | ||
182 | " interlaced" : ""); | ||
183 | |||
184 | if (force) { | ||
185 | const char *s; | ||
186 | switch (force) { | ||
187 | case DRM_FORCE_OFF: s = "OFF"; break; | ||
188 | case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; | ||
189 | default: | ||
190 | case DRM_FORCE_ON: s = "ON"; break; | ||
191 | } | ||
192 | |||
193 | DRM_INFO("forcing %s connector %s\n", | ||
194 | drm_get_connector_name(connector), s); | ||
195 | connector->force = force; | ||
196 | } | ||
197 | |||
198 | if (res_specified) { | ||
199 | cmdline_mode->specified = true; | ||
200 | cmdline_mode->xres = xres; | ||
201 | cmdline_mode->yres = yres; | ||
202 | } | ||
203 | |||
204 | if (refresh_specified) { | ||
205 | cmdline_mode->refresh_specified = true; | ||
206 | cmdline_mode->refresh = refresh; | ||
207 | } | ||
208 | |||
209 | if (bpp_specified) { | ||
210 | cmdline_mode->bpp_specified = true; | ||
211 | cmdline_mode->bpp = bpp; | ||
212 | } | ||
213 | cmdline_mode->rb = rb ? true : false; | ||
214 | cmdline_mode->cvt = cvt ? true : false; | ||
215 | cmdline_mode->interlace = interlace ? true : false; | ||
216 | |||
217 | return true; | ||
218 | } | ||
219 | |||
220 | int drm_fb_helper_parse_command_line(struct drm_device *dev) | ||
221 | { | ||
222 | struct drm_connector *connector; | ||
223 | |||
224 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
225 | char *option = NULL; | ||
226 | |||
227 | /* do something on return - turn off connector maybe */ | ||
228 | if (fb_get_options(drm_get_connector_name(connector), &option)) | ||
229 | continue; | ||
230 | |||
231 | drm_fb_helper_connector_parse_command_line(connector, option); | ||
232 | } | ||
233 | return 0; | ||
234 | } | ||
235 | |||
43 | bool drm_fb_helper_force_kernel_mode(void) | 236 | bool drm_fb_helper_force_kernel_mode(void) |
44 | { | 237 | { |
45 | int i = 0; | 238 | int i = 0; |
@@ -87,6 +280,7 @@ void drm_fb_helper_restore(void) | |||
87 | } | 280 | } |
88 | EXPORT_SYMBOL(drm_fb_helper_restore); | 281 | EXPORT_SYMBOL(drm_fb_helper_restore); |
89 | 282 | ||
283 | #ifdef CONFIG_MAGIC_SYSRQ | ||
90 | static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) | 284 | static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) |
91 | { | 285 | { |
92 | drm_fb_helper_restore(); | 286 | drm_fb_helper_restore(); |
@@ -103,6 +297,7 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { | |||
103 | .help_msg = "force-fb(V)", | 297 | .help_msg = "force-fb(V)", |
104 | .action_msg = "Restore framebuffer console", | 298 | .action_msg = "Restore framebuffer console", |
105 | }; | 299 | }; |
300 | #endif | ||
106 | 301 | ||
107 | static void drm_fb_helper_on(struct fb_info *info) | 302 | static void drm_fb_helper_on(struct fb_info *info) |
108 | { | 303 | { |
@@ -484,6 +679,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
484 | uint32_t fb_height, | 679 | uint32_t fb_height, |
485 | uint32_t surface_width, | 680 | uint32_t surface_width, |
486 | uint32_t surface_height, | 681 | uint32_t surface_height, |
682 | uint32_t surface_depth, | ||
683 | uint32_t surface_bpp, | ||
487 | struct drm_framebuffer **fb_ptr)) | 684 | struct drm_framebuffer **fb_ptr)) |
488 | { | 685 | { |
489 | struct drm_crtc *crtc; | 686 | struct drm_crtc *crtc; |
@@ -497,8 +694,43 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
497 | struct drm_framebuffer *fb; | 694 | struct drm_framebuffer *fb; |
498 | struct drm_mode_set *modeset = NULL; | 695 | struct drm_mode_set *modeset = NULL; |
499 | struct drm_fb_helper *fb_helper; | 696 | struct drm_fb_helper *fb_helper; |
697 | uint32_t surface_depth = 24, surface_bpp = 32; | ||
500 | 698 | ||
501 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ | 699 | /* first up get a count of crtcs now in use and new min/maxes width/heights */ |
700 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
701 | struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; | ||
702 | |||
703 | struct drm_fb_helper_cmdline_mode *cmdline_mode; | ||
704 | |||
705 | if (!fb_help_conn) | ||
706 | continue; | ||
707 | |||
708 | cmdline_mode = &fb_help_conn->cmdline_mode; | ||
709 | |||
710 | if (cmdline_mode->bpp_specified) { | ||
711 | switch (cmdline_mode->bpp) { | ||
712 | case 8: | ||
713 | surface_depth = surface_bpp = 8; | ||
714 | break; | ||
715 | case 15: | ||
716 | surface_depth = 15; | ||
717 | surface_bpp = 16; | ||
718 | break; | ||
719 | case 16: | ||
720 | surface_depth = surface_bpp = 16; | ||
721 | break; | ||
722 | case 24: | ||
723 | surface_depth = surface_bpp = 24; | ||
724 | break; | ||
725 | case 32: | ||
726 | surface_depth = 24; | ||
727 | surface_bpp = 32; | ||
728 | break; | ||
729 | } | ||
730 | break; | ||
731 | } | ||
732 | } | ||
733 | |||
502 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 734 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
503 | if (drm_helper_crtc_in_use(crtc)) { | 735 | if (drm_helper_crtc_in_use(crtc)) { |
504 | if (crtc->desired_mode) { | 736 | if (crtc->desired_mode) { |
@@ -527,7 +759,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
527 | /* do we have an fb already? */ | 759 | /* do we have an fb already? */ |
528 | if (list_empty(&dev->mode_config.fb_kernel_list)) { | 760 | if (list_empty(&dev->mode_config.fb_kernel_list)) { |
529 | ret = (*fb_create)(dev, fb_width, fb_height, surface_width, | 761 | ret = (*fb_create)(dev, fb_width, fb_height, surface_width, |
530 | surface_height, &fb); | 762 | surface_height, surface_depth, surface_bpp, |
763 | &fb); | ||
531 | if (ret) | 764 | if (ret) |
532 | return -EINVAL; | 765 | return -EINVAL; |
533 | new_fb = 1; | 766 | new_fb = 1; |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 230c9ffdd5e9..80391995bdec 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
145 | kref_init(&obj->refcount); | 158 | kref_init(&obj->refcount); |
146 | kref_init(&obj->handlecount); | 159 | kref_init(&obj->handlecount); |
147 | obj->size = size; | 160 | obj->size = size; |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 49404ce1666e..51f677215f1d 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline); | |||
88 | #define HV_FACTOR 1000 | 88 | #define HV_FACTOR 1000 |
89 | struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, | 89 | struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, |
90 | int vdisplay, int vrefresh, | 90 | int vdisplay, int vrefresh, |
91 | bool reduced, bool interlaced) | 91 | bool reduced, bool interlaced, bool margins) |
92 | { | 92 | { |
93 | /* 1) top/bottom margin size (% of height) - default: 1.8, */ | 93 | /* 1) top/bottom margin size (% of height) - default: 1.8, */ |
94 | #define CVT_MARGIN_PERCENTAGE 18 | 94 | #define CVT_MARGIN_PERCENTAGE 18 |
@@ -101,7 +101,6 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, | |||
101 | /* Pixel Clock step (kHz) */ | 101 | /* Pixel Clock step (kHz) */ |
102 | #define CVT_CLOCK_STEP 250 | 102 | #define CVT_CLOCK_STEP 250 |
103 | struct drm_display_mode *drm_mode; | 103 | struct drm_display_mode *drm_mode; |
104 | bool margins = false; | ||
105 | unsigned int vfieldrate, hperiod; | 104 | unsigned int vfieldrate, hperiod; |
106 | int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; | 105 | int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; |
107 | int interlace; | 106 | int interlace; |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 7e1fbe5d4779..4ac900f4647f 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
369 | } | 369 | } |
370 | 370 | ||
371 | /** AGP virtual memory operations */ | 371 | /** AGP virtual memory operations */ |
372 | static struct vm_operations_struct drm_vm_ops = { | 372 | static const struct vm_operations_struct drm_vm_ops = { |
373 | .fault = drm_vm_fault, | 373 | .fault = drm_vm_fault, |
374 | .open = drm_vm_open, | 374 | .open = drm_vm_open, |
375 | .close = drm_vm_close, | 375 | .close = drm_vm_close, |
376 | }; | 376 | }; |
377 | 377 | ||
378 | /** Shared virtual memory operations */ | 378 | /** Shared virtual memory operations */ |
379 | static struct vm_operations_struct drm_vm_shm_ops = { | 379 | static const struct vm_operations_struct drm_vm_shm_ops = { |
380 | .fault = drm_vm_shm_fault, | 380 | .fault = drm_vm_shm_fault, |
381 | .open = drm_vm_open, | 381 | .open = drm_vm_open, |
382 | .close = drm_vm_shm_close, | 382 | .close = drm_vm_shm_close, |
383 | }; | 383 | }; |
384 | 384 | ||
385 | /** DMA virtual memory operations */ | 385 | /** DMA virtual memory operations */ |
386 | static struct vm_operations_struct drm_vm_dma_ops = { | 386 | static const struct vm_operations_struct drm_vm_dma_ops = { |
387 | .fault = drm_vm_dma_fault, | 387 | .fault = drm_vm_dma_fault, |
388 | .open = drm_vm_open, | 388 | .open = drm_vm_open, |
389 | .close = drm_vm_close, | 389 | .close = drm_vm_close, |
390 | }; | 390 | }; |
391 | 391 | ||
392 | /** Scatter-gather virtual memory operations */ | 392 | /** Scatter-gather virtual memory operations */ |
393 | static struct vm_operations_struct drm_vm_sg_ops = { | 393 | static const struct vm_operations_struct drm_vm_sg_ops = { |
394 | .fault = drm_vm_sg_fault, | 394 | .fault = drm_vm_sg_fault, |
395 | .open = drm_vm_open, | 395 | .open = drm_vm_open, |
396 | .close = drm_vm_close, | 396 | .close = drm_vm_close, |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5269dfa5f620..fa7b9be096bc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
9 | i915_gem.o \ | 9 | i915_gem.o \ |
10 | i915_gem_debug.o \ | 10 | i915_gem_debug.o \ |
11 | i915_gem_tiling.o \ | 11 | i915_gem_tiling.o \ |
12 | i915_trace_points.o \ | ||
12 | intel_display.o \ | 13 | intel_display.o \ |
13 | intel_crt.o \ | 14 | intel_crt.o \ |
14 | intel_lvds.o \ | 15 | intel_lvds.o \ |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1e3bdcee863c..f8ce9a3a420d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
96 | { | 96 | { |
97 | struct drm_gem_object *obj = obj_priv->obj; | 97 | struct drm_gem_object *obj = obj_priv->obj; |
98 | 98 | ||
99 | seq_printf(m, " %p: %s %08x %08x %d", | 99 | seq_printf(m, " %p: %s %8zd %08x %08x %d %s", |
100 | obj, | 100 | obj, |
101 | get_pin_flag(obj_priv), | 101 | get_pin_flag(obj_priv), |
102 | obj->size, | ||
102 | obj->read_domains, obj->write_domain, | 103 | obj->read_domains, obj->write_domain, |
103 | obj_priv->last_rendering_seqno); | 104 | obj_priv->last_rendering_seqno, |
105 | obj_priv->dirty ? "dirty" : ""); | ||
104 | 106 | ||
105 | if (obj->name) | 107 | if (obj->name) |
106 | seq_printf(m, " (name: %d)", obj->name); | 108 | seq_printf(m, " (name: %d)", obj->name); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5a49a1867b35..45d507ebd3ff 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "i915_trace.h" | ||
36 | #include <linux/vgaarb.h> | 37 | #include <linux/vgaarb.h> |
37 | 38 | ||
38 | /* Really want an OS-independent resettable timer. Would like to have | 39 | /* Really want an OS-independent resettable timer. Would like to have |
@@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
50 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 51 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
51 | int i; | 52 | int i; |
52 | 53 | ||
54 | trace_i915_ring_wait_begin (dev); | ||
55 | |||
53 | for (i = 0; i < 100000; i++) { | 56 | for (i = 0; i < 100000; i++) { |
54 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 57 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
55 | acthd = I915_READ(acthd_reg); | 58 | acthd = I915_READ(acthd_reg); |
56 | ring->space = ring->head - (ring->tail + 8); | 59 | ring->space = ring->head - (ring->tail + 8); |
57 | if (ring->space < 0) | 60 | if (ring->space < 0) |
58 | ring->space += ring->Size; | 61 | ring->space += ring->Size; |
59 | if (ring->space >= n) | 62 | if (ring->space >= n) { |
63 | trace_i915_ring_wait_end (dev); | ||
60 | return 0; | 64 | return 0; |
65 | } | ||
61 | 66 | ||
62 | if (dev->primary->master) { | 67 | if (dev->primary->master) { |
63 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 68 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
@@ -77,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
77 | 82 | ||
78 | } | 83 | } |
79 | 84 | ||
85 | trace_i915_ring_wait_end (dev); | ||
80 | return -EBUSY; | 86 | return -EBUSY; |
81 | } | 87 | } |
82 | 88 | ||
@@ -922,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev) | |||
922 | * how much was set aside so we can use it for our own purposes. | 928 | * how much was set aside so we can use it for our own purposes. |
923 | */ | 929 | */ |
924 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | 930 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, |
925 | uint32_t *preallocated_size) | 931 | uint32_t *preallocated_size, |
932 | uint32_t *start) | ||
926 | { | 933 | { |
927 | struct drm_i915_private *dev_priv = dev->dev_private; | 934 | struct drm_i915_private *dev_priv = dev->dev_private; |
928 | u16 tmp = 0; | 935 | u16 tmp = 0; |
@@ -1009,10 +1016,159 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | |||
1009 | return -1; | 1016 | return -1; |
1010 | } | 1017 | } |
1011 | *preallocated_size = stolen - overhead; | 1018 | *preallocated_size = stolen - overhead; |
1019 | *start = overhead; | ||
1012 | 1020 | ||
1013 | return 0; | 1021 | return 0; |
1014 | } | 1022 | } |
1015 | 1023 | ||
1024 | #define PTE_ADDRESS_MASK 0xfffff000 | ||
1025 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ | ||
1026 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) | ||
1027 | #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ | ||
1028 | #define PTE_MAPPING_TYPE_CACHED (3 << 1) | ||
1029 | #define PTE_MAPPING_TYPE_MASK (3 << 1) | ||
1030 | #define PTE_VALID (1 << 0) | ||
1031 | |||
1032 | /** | ||
1033 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | ||
1034 | * @dev: drm device | ||
1035 | * @gtt_addr: address to translate | ||
1036 | * | ||
1037 | * Some chip functions require allocations from stolen space but need the | ||
1038 | * physical address of the memory in question. We use this routine | ||
1039 | * to get a physical address suitable for register programming from a given | ||
1040 | * GTT address. | ||
1041 | */ | ||
1042 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | ||
1043 | unsigned long gtt_addr) | ||
1044 | { | ||
1045 | unsigned long *gtt; | ||
1046 | unsigned long entry, phys; | ||
1047 | int gtt_bar = IS_I9XX(dev) ? 0 : 1; | ||
1048 | int gtt_offset, gtt_size; | ||
1049 | |||
1050 | if (IS_I965G(dev)) { | ||
1051 | if (IS_G4X(dev) || IS_IGDNG(dev)) { | ||
1052 | gtt_offset = 2*1024*1024; | ||
1053 | gtt_size = 2*1024*1024; | ||
1054 | } else { | ||
1055 | gtt_offset = 512*1024; | ||
1056 | gtt_size = 512*1024; | ||
1057 | } | ||
1058 | } else { | ||
1059 | gtt_bar = 3; | ||
1060 | gtt_offset = 0; | ||
1061 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | ||
1062 | } | ||
1063 | |||
1064 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | ||
1065 | gtt_size); | ||
1066 | if (!gtt) { | ||
1067 | DRM_ERROR("ioremap of GTT failed\n"); | ||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | ||
1072 | |||
1073 | DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | ||
1074 | |||
1075 | /* Mask out these reserved bits on this hardware. */ | ||
1076 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || | ||
1077 | IS_I945G(dev) || IS_I945GM(dev)) { | ||
1078 | entry &= ~PTE_ADDRESS_MASK_HIGH; | ||
1079 | } | ||
1080 | |||
1081 | /* If it's not a mapping type we know, then bail. */ | ||
1082 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | ||
1083 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | ||
1084 | iounmap(gtt); | ||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | if (!(entry & PTE_VALID)) { | ||
1089 | DRM_ERROR("bad GTT entry in stolen space\n"); | ||
1090 | iounmap(gtt); | ||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | iounmap(gtt); | ||
1095 | |||
1096 | phys =(entry & PTE_ADDRESS_MASK) | | ||
1097 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | ||
1098 | |||
1099 | DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | ||
1100 | |||
1101 | return phys; | ||
1102 | } | ||
1103 | |||
1104 | static void i915_warn_stolen(struct drm_device *dev) | ||
1105 | { | ||
1106 | DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); | ||
1107 | DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); | ||
1108 | } | ||
1109 | |||
1110 | static void i915_setup_compression(struct drm_device *dev, int size) | ||
1111 | { | ||
1112 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1113 | struct drm_mm_node *compressed_fb, *compressed_llb; | ||
1114 | unsigned long cfb_base, ll_base; | ||
1115 | |||
1116 | /* Leave 1M for line length buffer & misc. */ | ||
1117 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | ||
1118 | if (!compressed_fb) { | ||
1119 | i915_warn_stolen(dev); | ||
1120 | return; | ||
1121 | } | ||
1122 | |||
1123 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | ||
1124 | if (!compressed_fb) { | ||
1125 | i915_warn_stolen(dev); | ||
1126 | return; | ||
1127 | } | ||
1128 | |||
1129 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); | ||
1130 | if (!cfb_base) { | ||
1131 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1132 | drm_mm_put_block(compressed_fb); | ||
1133 | } | ||
1134 | |||
1135 | if (!IS_GM45(dev)) { | ||
1136 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | ||
1137 | 4096, 0); | ||
1138 | if (!compressed_llb) { | ||
1139 | i915_warn_stolen(dev); | ||
1140 | return; | ||
1141 | } | ||
1142 | |||
1143 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | ||
1144 | if (!compressed_llb) { | ||
1145 | i915_warn_stolen(dev); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | ||
1150 | if (!ll_base) { | ||
1151 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1152 | drm_mm_put_block(compressed_fb); | ||
1153 | drm_mm_put_block(compressed_llb); | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | dev_priv->cfb_size = size; | ||
1158 | |||
1159 | if (IS_GM45(dev)) { | ||
1160 | g4x_disable_fbc(dev); | ||
1161 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | ||
1162 | } else { | ||
1163 | i8xx_disable_fbc(dev); | ||
1164 | I915_WRITE(FBC_CFB_BASE, cfb_base); | ||
1165 | I915_WRITE(FBC_LL_BASE, ll_base); | ||
1166 | } | ||
1167 | |||
1168 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | ||
1169 | ll_base, size >> 20); | ||
1170 | } | ||
1171 | |||
1016 | /* true = enable decode, false = disable decoder */ | 1172 | /* true = enable decode, false = disable decoder */ |
1017 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1173 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1018 | { | 1174 | { |
@@ -1027,6 +1183,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) | |||
1027 | } | 1183 | } |
1028 | 1184 | ||
1029 | static int i915_load_modeset_init(struct drm_device *dev, | 1185 | static int i915_load_modeset_init(struct drm_device *dev, |
1186 | unsigned long prealloc_start, | ||
1030 | unsigned long prealloc_size, | 1187 | unsigned long prealloc_size, |
1031 | unsigned long agp_size) | 1188 | unsigned long agp_size) |
1032 | { | 1189 | { |
@@ -1047,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1047 | 1204 | ||
1048 | /* Basic memrange allocator for stolen space (aka vram) */ | 1205 | /* Basic memrange allocator for stolen space (aka vram) */ |
1049 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1206 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
1207 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | ||
1208 | |||
1209 | /* We're off and running w/KMS */ | ||
1210 | dev_priv->mm.suspended = 0; | ||
1050 | 1211 | ||
1051 | /* Let GEM Manage from end of prealloc space to end of aperture. | 1212 | /* Let GEM Manage from end of prealloc space to end of aperture. |
1052 | * | 1213 | * |
@@ -1059,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1059 | */ | 1220 | */ |
1060 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); | 1221 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); |
1061 | 1222 | ||
1223 | mutex_lock(&dev->struct_mutex); | ||
1062 | ret = i915_gem_init_ringbuffer(dev); | 1224 | ret = i915_gem_init_ringbuffer(dev); |
1225 | mutex_unlock(&dev->struct_mutex); | ||
1063 | if (ret) | 1226 | if (ret) |
1064 | goto out; | 1227 | goto out; |
1065 | 1228 | ||
1229 | /* Try to set up FBC with a reasonable compressed buffer size */ | ||
1230 | if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && | ||
1231 | i915_powersave) { | ||
1232 | int cfb_size; | ||
1233 | |||
1234 | /* Try to get an 8M buffer... */ | ||
1235 | if (prealloc_size > (9*1024*1024)) | ||
1236 | cfb_size = 8*1024*1024; | ||
1237 | else /* fall back to 7/8 of the stolen space */ | ||
1238 | cfb_size = prealloc_size * 7 / 8; | ||
1239 | i915_setup_compression(dev, cfb_size); | ||
1240 | } | ||
1241 | |||
1066 | /* Allow hardware batchbuffers unless told otherwise. | 1242 | /* Allow hardware batchbuffers unless told otherwise. |
1067 | */ | 1243 | */ |
1068 | dev_priv->allow_batchbuffer = 1; | 1244 | dev_priv->allow_batchbuffer = 1; |
@@ -1180,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1180 | struct drm_i915_private *dev_priv = dev->dev_private; | 1356 | struct drm_i915_private *dev_priv = dev->dev_private; |
1181 | resource_size_t base, size; | 1357 | resource_size_t base, size; |
1182 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1358 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
1183 | uint32_t agp_size, prealloc_size; | 1359 | uint32_t agp_size, prealloc_size, prealloc_start; |
1184 | 1360 | ||
1185 | /* i915 has 4 more counters */ | 1361 | /* i915 has 4 more counters */ |
1186 | dev->counters += 4; | 1362 | dev->counters += 4; |
@@ -1234,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1234 | "performance may suffer.\n"); | 1410 | "performance may suffer.\n"); |
1235 | } | 1411 | } |
1236 | 1412 | ||
1237 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); | 1413 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); |
1238 | if (ret) | 1414 | if (ret) |
1239 | goto out_iomapfree; | 1415 | goto out_iomapfree; |
1240 | 1416 | ||
@@ -1300,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1300 | return ret; | 1476 | return ret; |
1301 | } | 1477 | } |
1302 | 1478 | ||
1479 | /* Start out suspended */ | ||
1480 | dev_priv->mm.suspended = 1; | ||
1481 | |||
1303 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1482 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1304 | ret = i915_load_modeset_init(dev, prealloc_size, agp_size); | 1483 | ret = i915_load_modeset_init(dev, prealloc_start, |
1484 | prealloc_size, agp_size); | ||
1305 | if (ret < 0) { | 1485 | if (ret < 0) { |
1306 | DRM_ERROR("failed to init modeset\n"); | 1486 | DRM_ERROR("failed to init modeset\n"); |
1307 | goto out_workqueue_free; | 1487 | goto out_workqueue_free; |
@@ -1313,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1313 | if (!IS_IGDNG(dev)) | 1493 | if (!IS_IGDNG(dev)) |
1314 | intel_opregion_init(dev, 0); | 1494 | intel_opregion_init(dev, 0); |
1315 | 1495 | ||
1496 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | ||
1497 | (unsigned long) dev); | ||
1316 | return 0; | 1498 | return 0; |
1317 | 1499 | ||
1318 | out_workqueue_free: | 1500 | out_workqueue_free: |
@@ -1333,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
1333 | struct drm_i915_private *dev_priv = dev->dev_private; | 1515 | struct drm_i915_private *dev_priv = dev->dev_private; |
1334 | 1516 | ||
1335 | destroy_workqueue(dev_priv->wq); | 1517 | destroy_workqueue(dev_priv->wq); |
1518 | del_timer_sync(&dev_priv->hangcheck_timer); | ||
1336 | 1519 | ||
1337 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1520 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1338 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1521 | if (dev_priv->mm.gtt_mtrr >= 0) { |
@@ -1472,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1472 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), | 1655 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), |
1473 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), | 1656 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), |
1474 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), | 1657 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), |
1658 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), | ||
1475 | }; | 1659 | }; |
1476 | 1660 | ||
1477 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 1661 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dbe568c9327b..b93814c0d3e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -89,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
89 | pci_set_power_state(dev->pdev, PCI_D3hot); | 89 | pci_set_power_state(dev->pdev, PCI_D3hot); |
90 | } | 90 | } |
91 | 91 | ||
92 | dev_priv->suspended = 1; | ||
93 | |||
92 | return 0; | 94 | return 0; |
93 | } | 95 | } |
94 | 96 | ||
@@ -97,8 +99,6 @@ static int i915_resume(struct drm_device *dev) | |||
97 | struct drm_i915_private *dev_priv = dev->dev_private; | 99 | struct drm_i915_private *dev_priv = dev->dev_private; |
98 | int ret = 0; | 100 | int ret = 0; |
99 | 101 | ||
100 | pci_set_power_state(dev->pdev, PCI_D0); | ||
101 | pci_restore_state(dev->pdev); | ||
102 | if (pci_enable_device(dev->pdev)) | 102 | if (pci_enable_device(dev->pdev)) |
103 | return -1; | 103 | return -1; |
104 | pci_set_master(dev->pdev); | 104 | pci_set_master(dev->pdev); |
@@ -124,9 +124,135 @@ static int i915_resume(struct drm_device *dev) | |||
124 | drm_helper_resume_force_mode(dev); | 124 | drm_helper_resume_force_mode(dev); |
125 | } | 125 | } |
126 | 126 | ||
127 | dev_priv->suspended = 0; | ||
128 | |||
127 | return ret; | 129 | return ret; |
128 | } | 130 | } |
129 | 131 | ||
132 | /** | ||
133 | * i965_reset - reset chip after a hang | ||
134 | * @dev: drm device to reset | ||
135 | * @flags: reset domains | ||
136 | * | ||
137 | * Reset the chip. Useful if a hang is detected. Returns zero on successful | ||
138 | * reset or otherwise an error code. | ||
139 | * | ||
140 | * Procedure is fairly simple: | ||
141 | * - reset the chip using the reset reg | ||
142 | * - re-init context state | ||
143 | * - re-init hardware status page | ||
144 | * - re-init ring buffer | ||
145 | * - re-init interrupt state | ||
146 | * - re-init display | ||
147 | */ | ||
148 | int i965_reset(struct drm_device *dev, u8 flags) | ||
149 | { | ||
150 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
151 | unsigned long timeout; | ||
152 | u8 gdrst; | ||
153 | /* | ||
154 | * We really should only reset the display subsystem if we actually | ||
155 | * need to | ||
156 | */ | ||
157 | bool need_display = true; | ||
158 | |||
159 | mutex_lock(&dev->struct_mutex); | ||
160 | |||
161 | /* | ||
162 | * Clear request list | ||
163 | */ | ||
164 | i915_gem_retire_requests(dev); | ||
165 | |||
166 | if (need_display) | ||
167 | i915_save_display(dev); | ||
168 | |||
169 | if (IS_I965G(dev) || IS_G4X(dev)) { | ||
170 | /* | ||
171 | * Set the domains we want to reset, then the reset bit (bit 0). | ||
172 | * Clear the reset bit after a while and wait for hardware status | ||
173 | * bit (bit 1) to be set | ||
174 | */ | ||
175 | pci_read_config_byte(dev->pdev, GDRST, &gdrst); | ||
176 | pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); | ||
177 | udelay(50); | ||
178 | pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); | ||
179 | |||
180 | /* ...we don't want to loop forever though, 500ms should be plenty */ | ||
181 | timeout = jiffies + msecs_to_jiffies(500); | ||
182 | do { | ||
183 | udelay(100); | ||
184 | pci_read_config_byte(dev->pdev, GDRST, &gdrst); | ||
185 | } while ((gdrst & 0x1) && time_after(timeout, jiffies)); | ||
186 | |||
187 | if (gdrst & 0x1) { | ||
188 | WARN(true, "i915: Failed to reset chip\n"); | ||
189 | mutex_unlock(&dev->struct_mutex); | ||
190 | return -EIO; | ||
191 | } | ||
192 | } else { | ||
193 | DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); | ||
194 | return -ENODEV; | ||
195 | } | ||
196 | |||
197 | /* Ok, now get things going again... */ | ||
198 | |||
199 | /* | ||
200 | * Everything depends on having the GTT running, so we need to start | ||
201 | * there. Fortunately we don't need to do this unless we reset the | ||
202 | * chip at a PCI level. | ||
203 | * | ||
204 | * Next we need to restore the context, but we don't use those | ||
205 | * yet either... | ||
206 | * | ||
207 | * Ring buffer needs to be re-initialized in the KMS case, or if X | ||
208 | * was running at the time of the reset (i.e. we weren't VT | ||
209 | * switched away). | ||
210 | */ | ||
211 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | ||
212 | !dev_priv->mm.suspended) { | ||
213 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | ||
214 | struct drm_gem_object *obj = ring->ring_obj; | ||
215 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
216 | dev_priv->mm.suspended = 0; | ||
217 | |||
218 | /* Stop the ring if it's running. */ | ||
219 | I915_WRITE(PRB0_CTL, 0); | ||
220 | I915_WRITE(PRB0_TAIL, 0); | ||
221 | I915_WRITE(PRB0_HEAD, 0); | ||
222 | |||
223 | /* Initialize the ring. */ | ||
224 | I915_WRITE(PRB0_START, obj_priv->gtt_offset); | ||
225 | I915_WRITE(PRB0_CTL, | ||
226 | ((obj->size - 4096) & RING_NR_PAGES) | | ||
227 | RING_NO_REPORT | | ||
228 | RING_VALID); | ||
229 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
230 | i915_kernel_lost_context(dev); | ||
231 | else { | ||
232 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
233 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | ||
234 | ring->space = ring->head - (ring->tail + 8); | ||
235 | if (ring->space < 0) | ||
236 | ring->space += ring->Size; | ||
237 | } | ||
238 | |||
239 | mutex_unlock(&dev->struct_mutex); | ||
240 | drm_irq_uninstall(dev); | ||
241 | drm_irq_install(dev); | ||
242 | mutex_lock(&dev->struct_mutex); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Display needs restore too... | ||
247 | */ | ||
248 | if (need_display) | ||
249 | i915_restore_display(dev); | ||
250 | |||
251 | mutex_unlock(&dev->struct_mutex); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | |||
130 | static int __devinit | 256 | static int __devinit |
131 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 257 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
132 | { | 258 | { |
@@ -234,6 +360,8 @@ static int __init i915_init(void) | |||
234 | { | 360 | { |
235 | driver.num_ioctls = i915_max_ioctl; | 361 | driver.num_ioctls = i915_max_ioctl; |
236 | 362 | ||
363 | i915_gem_shrinker_init(); | ||
364 | |||
237 | /* | 365 | /* |
238 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 366 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
239 | * explicitly disabled with the module pararmeter. | 367 | * explicitly disabled with the module pararmeter. |
@@ -260,6 +388,7 @@ static int __init i915_init(void) | |||
260 | 388 | ||
261 | static void __exit i915_exit(void) | 389 | static void __exit i915_exit(void) |
262 | { | 390 | { |
391 | i915_gem_shrinker_exit(); | ||
263 | drm_exit(&driver); | 392 | drm_exit(&driver); |
264 | } | 393 | } |
265 | 394 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0632f8e76ac..b24b2d145b75 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -48,6 +48,11 @@ enum pipe { | |||
48 | PIPE_B, | 48 | PIPE_B, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | enum plane { | ||
52 | PLANE_A = 0, | ||
53 | PLANE_B, | ||
54 | }; | ||
55 | |||
51 | #define I915_NUM_PIPE 2 | 56 | #define I915_NUM_PIPE 2 |
52 | 57 | ||
53 | /* Interface history: | 58 | /* Interface history: |
@@ -148,6 +153,23 @@ struct drm_i915_error_state { | |||
148 | struct timeval time; | 153 | struct timeval time; |
149 | }; | 154 | }; |
150 | 155 | ||
156 | struct drm_i915_display_funcs { | ||
157 | void (*dpms)(struct drm_crtc *crtc, int mode); | ||
158 | bool (*fbc_enabled)(struct drm_crtc *crtc); | ||
159 | void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); | ||
160 | void (*disable_fbc)(struct drm_device *dev); | ||
161 | int (*get_display_clock_speed)(struct drm_device *dev); | ||
162 | int (*get_fifo_size)(struct drm_device *dev, int plane); | ||
163 | void (*update_wm)(struct drm_device *dev, int planea_clock, | ||
164 | int planeb_clock, int sr_hdisplay, int pixel_size); | ||
165 | /* clock updates for mode set */ | ||
166 | /* cursor updates */ | ||
167 | /* render clock increase/decrease */ | ||
168 | /* display clock increase/decrease */ | ||
169 | /* pll clock increase/decrease */ | ||
170 | /* clock gating init */ | ||
171 | }; | ||
172 | |||
151 | typedef struct drm_i915_private { | 173 | typedef struct drm_i915_private { |
152 | struct drm_device *dev; | 174 | struct drm_device *dev; |
153 | 175 | ||
@@ -198,10 +220,21 @@ typedef struct drm_i915_private { | |||
198 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 220 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
199 | int vblank_pipe; | 221 | int vblank_pipe; |
200 | 222 | ||
223 | /* For hangcheck timer */ | ||
224 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ | ||
225 | struct timer_list hangcheck_timer; | ||
226 | int hangcheck_count; | ||
227 | uint32_t last_acthd; | ||
228 | |||
201 | bool cursor_needs_physical; | 229 | bool cursor_needs_physical; |
202 | 230 | ||
203 | struct drm_mm vram; | 231 | struct drm_mm vram; |
204 | 232 | ||
233 | unsigned long cfb_size; | ||
234 | unsigned long cfb_pitch; | ||
235 | int cfb_fence; | ||
236 | int cfb_plane; | ||
237 | |||
205 | int irq_enabled; | 238 | int irq_enabled; |
206 | 239 | ||
207 | struct intel_opregion opregion; | 240 | struct intel_opregion opregion; |
@@ -222,6 +255,8 @@ typedef struct drm_i915_private { | |||
222 | unsigned int edp_support:1; | 255 | unsigned int edp_support:1; |
223 | int lvds_ssc_freq; | 256 | int lvds_ssc_freq; |
224 | 257 | ||
258 | struct notifier_block lid_notifier; | ||
259 | |||
225 | int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ | 260 | int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ |
226 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 261 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
227 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 262 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
@@ -234,7 +269,11 @@ typedef struct drm_i915_private { | |||
234 | struct work_struct error_work; | 269 | struct work_struct error_work; |
235 | struct workqueue_struct *wq; | 270 | struct workqueue_struct *wq; |
236 | 271 | ||
272 | /* Display functions */ | ||
273 | struct drm_i915_display_funcs display; | ||
274 | |||
237 | /* Register state */ | 275 | /* Register state */ |
276 | bool suspended; | ||
238 | u8 saveLBB; | 277 | u8 saveLBB; |
239 | u32 saveDSPACNTR; | 278 | u32 saveDSPACNTR; |
240 | u32 saveDSPBCNTR; | 279 | u32 saveDSPBCNTR; |
@@ -350,6 +389,15 @@ typedef struct drm_i915_private { | |||
350 | int gtt_mtrr; | 389 | int gtt_mtrr; |
351 | 390 | ||
352 | /** | 391 | /** |
392 | * Membership on list of all loaded devices, used to evict | ||
393 | * inactive buffers under memory pressure. | ||
394 | * | ||
395 | * Modifications should only be done whilst holding the | ||
396 | * shrink_list_lock spinlock. | ||
397 | */ | ||
398 | struct list_head shrink_list; | ||
399 | |||
400 | /** | ||
353 | * List of objects currently involved in rendering from the | 401 | * List of objects currently involved in rendering from the |
354 | * ringbuffer. | 402 | * ringbuffer. |
355 | * | 403 | * |
@@ -432,7 +480,7 @@ typedef struct drm_i915_private { | |||
432 | * It prevents command submission from occuring and makes | 480 | * It prevents command submission from occuring and makes |
433 | * every pending request fail | 481 | * every pending request fail |
434 | */ | 482 | */ |
435 | int wedged; | 483 | atomic_t wedged; |
436 | 484 | ||
437 | /** Bit 6 swizzling required for X tiling */ | 485 | /** Bit 6 swizzling required for X tiling */ |
438 | uint32_t bit_6_swizzle_x; | 486 | uint32_t bit_6_swizzle_x; |
@@ -491,10 +539,7 @@ struct drm_i915_gem_object { | |||
491 | * This is the same as gtt_space->start | 539 | * This is the same as gtt_space->start |
492 | */ | 540 | */ |
493 | uint32_t gtt_offset; | 541 | uint32_t gtt_offset; |
494 | /** | 542 | |
495 | * Required alignment for the object | ||
496 | */ | ||
497 | uint32_t gtt_alignment; | ||
498 | /** | 543 | /** |
499 | * Fake offset for use by mmap(2) | 544 | * Fake offset for use by mmap(2) |
500 | */ | 545 | */ |
@@ -541,6 +586,11 @@ struct drm_i915_gem_object { | |||
541 | * in an execbuffer object list. | 586 | * in an execbuffer object list. |
542 | */ | 587 | */ |
543 | int in_execbuffer; | 588 | int in_execbuffer; |
589 | |||
590 | /** | ||
591 | * Advice: are the backing pages purgeable? | ||
592 | */ | ||
593 | int madv; | ||
544 | }; | 594 | }; |
545 | 595 | ||
546 | /** | 596 | /** |
@@ -585,6 +635,8 @@ extern int i915_max_ioctl; | |||
585 | extern unsigned int i915_fbpercrtc; | 635 | extern unsigned int i915_fbpercrtc; |
586 | extern unsigned int i915_powersave; | 636 | extern unsigned int i915_powersave; |
587 | 637 | ||
638 | extern void i915_save_display(struct drm_device *dev); | ||
639 | extern void i915_restore_display(struct drm_device *dev); | ||
588 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); | 640 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
589 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); | 641 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
590 | 642 | ||
@@ -604,8 +656,10 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | |||
604 | extern int i915_emit_box(struct drm_device *dev, | 656 | extern int i915_emit_box(struct drm_device *dev, |
605 | struct drm_clip_rect *boxes, | 657 | struct drm_clip_rect *boxes, |
606 | int i, int DR1, int DR4); | 658 | int i, int DR1, int DR4); |
659 | extern int i965_reset(struct drm_device *dev, u8 flags); | ||
607 | 660 | ||
608 | /* i915_irq.c */ | 661 | /* i915_irq.c */ |
662 | void i915_hangcheck_elapsed(unsigned long data); | ||
609 | extern int i915_irq_emit(struct drm_device *dev, void *data, | 663 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
610 | struct drm_file *file_priv); | 664 | struct drm_file *file_priv); |
611 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 665 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
@@ -676,6 +730,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
676 | struct drm_file *file_priv); | 730 | struct drm_file *file_priv); |
677 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 731 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
678 | struct drm_file *file_priv); | 732 | struct drm_file *file_priv); |
733 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | ||
734 | struct drm_file *file_priv); | ||
679 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | 735 | int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
680 | struct drm_file *file_priv); | 736 | struct drm_file *file_priv); |
681 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 737 | int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
@@ -695,6 +751,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj); | |||
695 | void i915_gem_release_mmap(struct drm_gem_object *obj); | 751 | void i915_gem_release_mmap(struct drm_gem_object *obj); |
696 | void i915_gem_lastclose(struct drm_device *dev); | 752 | void i915_gem_lastclose(struct drm_device *dev); |
697 | uint32_t i915_get_gem_seqno(struct drm_device *dev); | 753 | uint32_t i915_get_gem_seqno(struct drm_device *dev); |
754 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); | ||
698 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 755 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); |
699 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); | 756 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); |
700 | void i915_gem_retire_requests(struct drm_device *dev); | 757 | void i915_gem_retire_requests(struct drm_device *dev); |
@@ -720,6 +777,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj); | |||
720 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 777 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
721 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 778 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
722 | 779 | ||
780 | void i915_gem_shrinker_init(void); | ||
781 | void i915_gem_shrinker_exit(void); | ||
782 | |||
723 | /* i915_gem_tiling.c */ | 783 | /* i915_gem_tiling.c */ |
724 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 784 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
725 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 785 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
@@ -767,6 +827,8 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } | |||
767 | extern void intel_modeset_init(struct drm_device *dev); | 827 | extern void intel_modeset_init(struct drm_device *dev); |
768 | extern void intel_modeset_cleanup(struct drm_device *dev); | 828 | extern void intel_modeset_cleanup(struct drm_device *dev); |
769 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 829 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
830 | extern void i8xx_disable_fbc(struct drm_device *dev); | ||
831 | extern void g4x_disable_fbc(struct drm_device *dev); | ||
770 | 832 | ||
771 | /** | 833 | /** |
772 | * Lock test for when it's just for synchronization of ring access. | 834 | * Lock test for when it's just for synchronization of ring access. |
@@ -864,6 +926,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
864 | (dev)->pci_device == 0x2E12 || \ | 926 | (dev)->pci_device == 0x2E12 || \ |
865 | (dev)->pci_device == 0x2E22 || \ | 927 | (dev)->pci_device == 0x2E22 || \ |
866 | (dev)->pci_device == 0x2E32 || \ | 928 | (dev)->pci_device == 0x2E32 || \ |
929 | (dev)->pci_device == 0x2E42 || \ | ||
867 | (dev)->pci_device == 0x0042 || \ | 930 | (dev)->pci_device == 0x0042 || \ |
868 | (dev)->pci_device == 0x0046) | 931 | (dev)->pci_device == 0x0046) |
869 | 932 | ||
@@ -876,6 +939,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
876 | (dev)->pci_device == 0x2E12 || \ | 939 | (dev)->pci_device == 0x2E12 || \ |
877 | (dev)->pci_device == 0x2E22 || \ | 940 | (dev)->pci_device == 0x2E22 || \ |
878 | (dev)->pci_device == 0x2E32 || \ | 941 | (dev)->pci_device == 0x2E32 || \ |
942 | (dev)->pci_device == 0x2E42 || \ | ||
879 | IS_GM45(dev)) | 943 | IS_GM45(dev)) |
880 | 944 | ||
881 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) | 945 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) |
@@ -909,12 +973,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
909 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 973 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
910 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 974 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
911 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) | 975 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) |
912 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) | 976 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) |
913 | /* dsparb controlled by hw only */ | 977 | /* dsparb controlled by hw only */ |
914 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 978 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
915 | 979 | ||
916 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) | 980 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) |
917 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 981 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
982 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) | ||
918 | 983 | ||
919 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 984 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
920 | 985 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c67317112f4a..40727d4c2919 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "drm.h" | 29 | #include "drm.h" |
30 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | ||
32 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
33 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
@@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | |||
48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
49 | unsigned alignment); | 50 | unsigned alignment); |
50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 51 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
51 | static int i915_gem_evict_something(struct drm_device *dev); | 52 | static int i915_gem_evict_something(struct drm_device *dev, int min_size); |
53 | static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | ||
52 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 54 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
53 | struct drm_i915_gem_pwrite *args, | 55 | struct drm_i915_gem_pwrite *args, |
54 | struct drm_file *file_priv); | 56 | struct drm_file *file_priv); |
55 | 57 | ||
58 | static LIST_HEAD(shrink_list); | ||
59 | static DEFINE_SPINLOCK(shrink_list_lock); | ||
60 | |||
56 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 61 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
57 | unsigned long end) | 62 | unsigned long end) |
58 | { | 63 | { |
@@ -316,6 +321,45 @@ fail_unlock: | |||
316 | return ret; | 321 | return ret; |
317 | } | 322 | } |
318 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | ||
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | ||
338 | { | ||
339 | int ret; | ||
340 | |||
341 | ret = i915_gem_object_get_pages(obj); | ||
342 | |||
343 | /* If we've insufficient memory to map in the pages, attempt | ||
344 | * to make some space by throwing out some old buffers. | ||
345 | */ | ||
346 | if (ret == -ENOMEM) { | ||
347 | struct drm_device *dev = obj->dev; | ||
348 | gfp_t gfp; | ||
349 | |||
350 | ret = i915_gem_evict_something(dev, obj->size); | ||
351 | if (ret) | ||
352 | return ret; | ||
353 | |||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | ||
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | ||
359 | |||
360 | return ret; | ||
361 | } | ||
362 | |||
319 | /** | 363 | /** |
320 | * This is the fallback shmem pread path, which allocates temporary storage | 364 | * This is the fallback shmem pread path, which allocates temporary storage |
321 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 365 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
@@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
367 | 411 | ||
368 | mutex_lock(&dev->struct_mutex); | 412 | mutex_lock(&dev->struct_mutex); |
369 | 413 | ||
370 | ret = i915_gem_object_get_pages(obj); | 414 | ret = i915_gem_object_get_pages_or_evict(obj); |
371 | if (ret != 0) | 415 | if (ret) |
372 | goto fail_unlock; | 416 | goto fail_unlock; |
373 | 417 | ||
374 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 418 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, |
@@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
842 | 886 | ||
843 | mutex_lock(&dev->struct_mutex); | 887 | mutex_lock(&dev->struct_mutex); |
844 | 888 | ||
845 | ret = i915_gem_object_get_pages(obj); | 889 | ret = i915_gem_object_get_pages_or_evict(obj); |
846 | if (ret != 0) | 890 | if (ret) |
847 | goto fail_unlock; | 891 | goto fail_unlock; |
848 | 892 | ||
849 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 893 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
@@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1155 | /* Now bind it into the GTT if needed */ | 1199 | /* Now bind it into the GTT if needed */ |
1156 | mutex_lock(&dev->struct_mutex); | 1200 | mutex_lock(&dev->struct_mutex); |
1157 | if (!obj_priv->gtt_space) { | 1201 | if (!obj_priv->gtt_space) { |
1158 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | 1202 | ret = i915_gem_object_bind_to_gtt(obj, 0); |
1159 | if (ret) { | 1203 | if (ret) |
1160 | mutex_unlock(&dev->struct_mutex); | 1204 | goto unlock; |
1161 | return VM_FAULT_SIGBUS; | ||
1162 | } | ||
1163 | |||
1164 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | ||
1165 | if (ret) { | ||
1166 | mutex_unlock(&dev->struct_mutex); | ||
1167 | return VM_FAULT_SIGBUS; | ||
1168 | } | ||
1169 | 1205 | ||
1170 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1206 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1207 | |||
1208 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | ||
1209 | if (ret) | ||
1210 | goto unlock; | ||
1171 | } | 1211 | } |
1172 | 1212 | ||
1173 | /* Need a new fence register? */ | 1213 | /* Need a new fence register? */ |
1174 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1214 | if (obj_priv->tiling_mode != I915_TILING_NONE) { |
1175 | ret = i915_gem_object_get_fence_reg(obj); | 1215 | ret = i915_gem_object_get_fence_reg(obj); |
1176 | if (ret) { | 1216 | if (ret) |
1177 | mutex_unlock(&dev->struct_mutex); | 1217 | goto unlock; |
1178 | return VM_FAULT_SIGBUS; | ||
1179 | } | ||
1180 | } | 1218 | } |
1181 | 1219 | ||
1182 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1220 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + |
@@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1184 | 1222 | ||
1185 | /* Finally, remap it using the new GTT offset */ | 1223 | /* Finally, remap it using the new GTT offset */ |
1186 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 1224 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
1187 | 1225 | unlock: | |
1188 | mutex_unlock(&dev->struct_mutex); | 1226 | mutex_unlock(&dev->struct_mutex); |
1189 | 1227 | ||
1190 | switch (ret) { | 1228 | switch (ret) { |
1229 | case 0: | ||
1230 | case -ERESTARTSYS: | ||
1231 | return VM_FAULT_NOPAGE; | ||
1191 | case -ENOMEM: | 1232 | case -ENOMEM: |
1192 | case -EAGAIN: | 1233 | case -EAGAIN: |
1193 | return VM_FAULT_OOM; | 1234 | return VM_FAULT_OOM; |
1194 | case -EFAULT: | ||
1195 | case -EINVAL: | ||
1196 | return VM_FAULT_SIGBUS; | ||
1197 | default: | 1235 | default: |
1198 | return VM_FAULT_NOPAGE; | 1236 | return VM_FAULT_SIGBUS; |
1199 | } | 1237 | } |
1200 | } | 1238 | } |
1201 | 1239 | ||
@@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1388 | 1426 | ||
1389 | obj_priv = obj->driver_private; | 1427 | obj_priv = obj->driver_private; |
1390 | 1428 | ||
1429 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
1430 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | ||
1431 | drm_gem_object_unreference(obj); | ||
1432 | mutex_unlock(&dev->struct_mutex); | ||
1433 | return -EINVAL; | ||
1434 | } | ||
1435 | |||
1436 | |||
1391 | if (!obj_priv->mmap_offset) { | 1437 | if (!obj_priv->mmap_offset) { |
1392 | ret = i915_gem_create_mmap_offset(obj); | 1438 | ret = i915_gem_create_mmap_offset(obj); |
1393 | if (ret) { | 1439 | if (ret) { |
@@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1399 | 1445 | ||
1400 | args->offset = obj_priv->mmap_offset; | 1446 | args->offset = obj_priv->mmap_offset; |
1401 | 1447 | ||
1402 | obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); | ||
1403 | |||
1404 | /* Make sure the alignment is correct for fence regs etc */ | ||
1405 | if (obj_priv->agp_mem && | ||
1406 | (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { | ||
1407 | drm_gem_object_unreference(obj); | ||
1408 | mutex_unlock(&dev->struct_mutex); | ||
1409 | return -EINVAL; | ||
1410 | } | ||
1411 | |||
1412 | /* | 1448 | /* |
1413 | * Pull it into the GTT so that we have a page list (makes the | 1449 | * Pull it into the GTT so that we have a page list (makes the |
1414 | * initial fault faster and any subsequent flushing possible). | 1450 | * initial fault faster and any subsequent flushing possible). |
1415 | */ | 1451 | */ |
1416 | if (!obj_priv->agp_mem) { | 1452 | if (!obj_priv->agp_mem) { |
1417 | ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); | 1453 | ret = i915_gem_object_bind_to_gtt(obj, 0); |
1418 | if (ret) { | 1454 | if (ret) { |
1419 | drm_gem_object_unreference(obj); | 1455 | drm_gem_object_unreference(obj); |
1420 | mutex_unlock(&dev->struct_mutex); | 1456 | mutex_unlock(&dev->struct_mutex); |
@@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1437 | int i; | 1473 | int i; |
1438 | 1474 | ||
1439 | BUG_ON(obj_priv->pages_refcount == 0); | 1475 | BUG_ON(obj_priv->pages_refcount == 0); |
1476 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | ||
1440 | 1477 | ||
1441 | if (--obj_priv->pages_refcount != 0) | 1478 | if (--obj_priv->pages_refcount != 0) |
1442 | return; | 1479 | return; |
@@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1444 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1481 | if (obj_priv->tiling_mode != I915_TILING_NONE) |
1445 | i915_gem_object_save_bit_17_swizzle(obj); | 1482 | i915_gem_object_save_bit_17_swizzle(obj); |
1446 | 1483 | ||
1447 | for (i = 0; i < page_count; i++) | 1484 | if (obj_priv->madv == I915_MADV_DONTNEED) |
1448 | if (obj_priv->pages[i] != NULL) { | 1485 | obj_priv->dirty = 0; |
1449 | if (obj_priv->dirty) | 1486 | |
1450 | set_page_dirty(obj_priv->pages[i]); | 1487 | for (i = 0; i < page_count; i++) { |
1488 | if (obj_priv->pages[i] == NULL) | ||
1489 | break; | ||
1490 | |||
1491 | if (obj_priv->dirty) | ||
1492 | set_page_dirty(obj_priv->pages[i]); | ||
1493 | |||
1494 | if (obj_priv->madv == I915_MADV_WILLNEED) | ||
1451 | mark_page_accessed(obj_priv->pages[i]); | 1495 | mark_page_accessed(obj_priv->pages[i]); |
1452 | page_cache_release(obj_priv->pages[i]); | 1496 | |
1453 | } | 1497 | page_cache_release(obj_priv->pages[i]); |
1498 | } | ||
1454 | obj_priv->dirty = 0; | 1499 | obj_priv->dirty = 0; |
1455 | 1500 | ||
1456 | drm_free_large(obj_priv->pages); | 1501 | drm_free_large(obj_priv->pages); |
@@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1489 | obj_priv->last_rendering_seqno = 0; | 1534 | obj_priv->last_rendering_seqno = 0; |
1490 | } | 1535 | } |
1491 | 1536 | ||
1537 | /* Immediately discard the backing storage */ | ||
1538 | static void | ||
1539 | i915_gem_object_truncate(struct drm_gem_object *obj) | ||
1540 | { | ||
1541 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1542 | struct inode *inode; | ||
1543 | |||
1544 | inode = obj->filp->f_path.dentry->d_inode; | ||
1545 | if (inode->i_op->truncate) | ||
1546 | inode->i_op->truncate (inode); | ||
1547 | |||
1548 | obj_priv->madv = __I915_MADV_PURGED; | ||
1549 | } | ||
1550 | |||
1551 | static inline int | ||
1552 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | ||
1553 | { | ||
1554 | return obj_priv->madv == I915_MADV_DONTNEED; | ||
1555 | } | ||
1556 | |||
1492 | static void | 1557 | static void |
1493 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 1558 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) |
1494 | { | 1559 | { |
@@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1577 | 1642 | ||
1578 | if ((obj->write_domain & flush_domains) == | 1643 | if ((obj->write_domain & flush_domains) == |
1579 | obj->write_domain) { | 1644 | obj->write_domain) { |
1645 | uint32_t old_write_domain = obj->write_domain; | ||
1646 | |||
1580 | obj->write_domain = 0; | 1647 | obj->write_domain = 0; |
1581 | i915_gem_object_move_to_active(obj, seqno); | 1648 | i915_gem_object_move_to_active(obj, seqno); |
1649 | |||
1650 | trace_i915_gem_object_change_domain(obj, | ||
1651 | obj->read_domains, | ||
1652 | old_write_domain); | ||
1582 | } | 1653 | } |
1583 | } | 1654 | } |
1584 | 1655 | ||
1585 | } | 1656 | } |
1586 | 1657 | ||
1587 | if (was_empty && !dev_priv->mm.suspended) | 1658 | if (!dev_priv->mm.suspended) { |
1588 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1659 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
1660 | if (was_empty) | ||
1661 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | ||
1662 | } | ||
1589 | return seqno; | 1663 | return seqno; |
1590 | } | 1664 | } |
1591 | 1665 | ||
@@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev, | |||
1623 | { | 1697 | { |
1624 | drm_i915_private_t *dev_priv = dev->dev_private; | 1698 | drm_i915_private_t *dev_priv = dev->dev_private; |
1625 | 1699 | ||
1700 | trace_i915_gem_request_retire(dev, request->seqno); | ||
1701 | |||
1626 | /* Move any buffers on the active list that are no longer referenced | 1702 | /* Move any buffers on the active list that are no longer referenced |
1627 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1703 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
1628 | */ | 1704 | */ |
@@ -1671,7 +1747,7 @@ out: | |||
1671 | /** | 1747 | /** |
1672 | * Returns true if seq1 is later than seq2. | 1748 | * Returns true if seq1 is later than seq2. |
1673 | */ | 1749 | */ |
1674 | static int | 1750 | bool |
1675 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | 1751 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
1676 | { | 1752 | { |
1677 | return (int32_t)(seq1 - seq2) >= 0; | 1753 | return (int32_t)(seq1 - seq2) >= 0; |
@@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1709 | retiring_seqno = request->seqno; | 1785 | retiring_seqno = request->seqno; |
1710 | 1786 | ||
1711 | if (i915_seqno_passed(seqno, retiring_seqno) || | 1787 | if (i915_seqno_passed(seqno, retiring_seqno) || |
1712 | dev_priv->mm.wedged) { | 1788 | atomic_read(&dev_priv->mm.wedged)) { |
1713 | i915_gem_retire_request(dev, request); | 1789 | i915_gem_retire_request(dev, request); |
1714 | 1790 | ||
1715 | list_del(&request->list); | 1791 | list_del(&request->list); |
@@ -1751,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1751 | 1827 | ||
1752 | BUG_ON(seqno == 0); | 1828 | BUG_ON(seqno == 0); |
1753 | 1829 | ||
1830 | if (atomic_read(&dev_priv->mm.wedged)) | ||
1831 | return -EIO; | ||
1832 | |||
1754 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { | 1833 | if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { |
1755 | if (IS_IGDNG(dev)) | 1834 | if (IS_IGDNG(dev)) |
1756 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 1835 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
@@ -1763,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) | |||
1763 | i915_driver_irq_postinstall(dev); | 1842 | i915_driver_irq_postinstall(dev); |
1764 | } | 1843 | } |
1765 | 1844 | ||
1845 | trace_i915_gem_request_wait_begin(dev, seqno); | ||
1846 | |||
1766 | dev_priv->mm.waiting_gem_seqno = seqno; | 1847 | dev_priv->mm.waiting_gem_seqno = seqno; |
1767 | i915_user_irq_get(dev); | 1848 | i915_user_irq_get(dev); |
1768 | ret = wait_event_interruptible(dev_priv->irq_queue, | 1849 | ret = wait_event_interruptible(dev_priv->irq_queue, |
1769 | i915_seqno_passed(i915_get_gem_seqno(dev), | 1850 | i915_seqno_passed(i915_get_gem_seqno(dev), |
1770 | seqno) || | 1851 | seqno) || |
1771 | dev_priv->mm.wedged); | 1852 | atomic_read(&dev_priv->mm.wedged)); |
1772 | i915_user_irq_put(dev); | 1853 | i915_user_irq_put(dev); |
1773 | dev_priv->mm.waiting_gem_seqno = 0; | 1854 | dev_priv->mm.waiting_gem_seqno = 0; |
1855 | |||
1856 | trace_i915_gem_request_wait_end(dev, seqno); | ||
1774 | } | 1857 | } |
1775 | if (dev_priv->mm.wedged) | 1858 | if (atomic_read(&dev_priv->mm.wedged)) |
1776 | ret = -EIO; | 1859 | ret = -EIO; |
1777 | 1860 | ||
1778 | if (ret && ret != -ERESTARTSYS) | 1861 | if (ret && ret != -ERESTARTSYS) |
@@ -1803,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev, | |||
1803 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 1886 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
1804 | invalidate_domains, flush_domains); | 1887 | invalidate_domains, flush_domains); |
1805 | #endif | 1888 | #endif |
1889 | trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, | ||
1890 | invalidate_domains, flush_domains); | ||
1806 | 1891 | ||
1807 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 1892 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
1808 | drm_agp_chipset_flush(dev); | 1893 | drm_agp_chipset_flush(dev); |
@@ -1915,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1915 | return -EINVAL; | 2000 | return -EINVAL; |
1916 | } | 2001 | } |
1917 | 2002 | ||
2003 | /* blow away mappings if mapped through GTT */ | ||
2004 | i915_gem_release_mmap(obj); | ||
2005 | |||
2006 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2007 | i915_gem_clear_fence_reg(obj); | ||
2008 | |||
1918 | /* Move the object to the CPU domain to ensure that | 2009 | /* Move the object to the CPU domain to ensure that |
1919 | * any possible CPU writes while it's not in the GTT | 2010 | * any possible CPU writes while it's not in the GTT |
1920 | * are flushed when we go to remap it. This will | 2011 | * are flushed when we go to remap it. This will |
@@ -1928,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1928 | return ret; | 2019 | return ret; |
1929 | } | 2020 | } |
1930 | 2021 | ||
2022 | BUG_ON(obj_priv->active); | ||
2023 | |||
1931 | if (obj_priv->agp_mem != NULL) { | 2024 | if (obj_priv->agp_mem != NULL) { |
1932 | drm_unbind_agp(obj_priv->agp_mem); | 2025 | drm_unbind_agp(obj_priv->agp_mem); |
1933 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2026 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
1934 | obj_priv->agp_mem = NULL; | 2027 | obj_priv->agp_mem = NULL; |
1935 | } | 2028 | } |
1936 | 2029 | ||
1937 | BUG_ON(obj_priv->active); | ||
1938 | |||
1939 | /* blow away mappings if mapped through GTT */ | ||
1940 | i915_gem_release_mmap(obj); | ||
1941 | |||
1942 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
1943 | i915_gem_clear_fence_reg(obj); | ||
1944 | |||
1945 | i915_gem_object_put_pages(obj); | 2030 | i915_gem_object_put_pages(obj); |
2031 | BUG_ON(obj_priv->pages_refcount); | ||
1946 | 2032 | ||
1947 | if (obj_priv->gtt_space) { | 2033 | if (obj_priv->gtt_space) { |
1948 | atomic_dec(&dev->gtt_count); | 2034 | atomic_dec(&dev->gtt_count); |
@@ -1956,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1956 | if (!list_empty(&obj_priv->list)) | 2042 | if (!list_empty(&obj_priv->list)) |
1957 | list_del_init(&obj_priv->list); | 2043 | list_del_init(&obj_priv->list); |
1958 | 2044 | ||
2045 | if (i915_gem_object_is_purgeable(obj_priv)) | ||
2046 | i915_gem_object_truncate(obj); | ||
2047 | |||
2048 | trace_i915_gem_object_unbind(obj); | ||
2049 | |||
1959 | return 0; | 2050 | return 0; |
1960 | } | 2051 | } |
1961 | 2052 | ||
2053 | static struct drm_gem_object * | ||
2054 | i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | ||
2055 | { | ||
2056 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2057 | struct drm_i915_gem_object *obj_priv; | ||
2058 | struct drm_gem_object *best = NULL; | ||
2059 | struct drm_gem_object *first = NULL; | ||
2060 | |||
2061 | /* Try to find the smallest clean object */ | ||
2062 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
2063 | struct drm_gem_object *obj = obj_priv->obj; | ||
2064 | if (obj->size >= min_size) { | ||
2065 | if ((!obj_priv->dirty || | ||
2066 | i915_gem_object_is_purgeable(obj_priv)) && | ||
2067 | (!best || obj->size < best->size)) { | ||
2068 | best = obj; | ||
2069 | if (best->size == min_size) | ||
2070 | return best; | ||
2071 | } | ||
2072 | if (!first) | ||
2073 | first = obj; | ||
2074 | } | ||
2075 | } | ||
2076 | |||
2077 | return best ? best : first; | ||
2078 | } | ||
2079 | |||
1962 | static int | 2080 | static int |
1963 | i915_gem_evict_something(struct drm_device *dev) | 2081 | i915_gem_evict_everything(struct drm_device *dev) |
2082 | { | ||
2083 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2084 | uint32_t seqno; | ||
2085 | int ret; | ||
2086 | bool lists_empty; | ||
2087 | |||
2088 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2089 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2090 | list_empty(&dev_priv->mm.flushing_list) && | ||
2091 | list_empty(&dev_priv->mm.active_list)); | ||
2092 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2093 | |||
2094 | if (lists_empty) | ||
2095 | return -ENOSPC; | ||
2096 | |||
2097 | /* Flush everything (on to the inactive lists) and evict */ | ||
2098 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2099 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | ||
2100 | if (seqno == 0) | ||
2101 | return -ENOMEM; | ||
2102 | |||
2103 | ret = i915_wait_request(dev, seqno); | ||
2104 | if (ret) | ||
2105 | return ret; | ||
2106 | |||
2107 | ret = i915_gem_evict_from_inactive_list(dev); | ||
2108 | if (ret) | ||
2109 | return ret; | ||
2110 | |||
2111 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2112 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2113 | list_empty(&dev_priv->mm.flushing_list) && | ||
2114 | list_empty(&dev_priv->mm.active_list)); | ||
2115 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2116 | BUG_ON(!lists_empty); | ||
2117 | |||
2118 | return 0; | ||
2119 | } | ||
2120 | |||
2121 | static int | ||
2122 | i915_gem_evict_something(struct drm_device *dev, int min_size) | ||
1964 | { | 2123 | { |
1965 | drm_i915_private_t *dev_priv = dev->dev_private; | 2124 | drm_i915_private_t *dev_priv = dev->dev_private; |
1966 | struct drm_gem_object *obj; | 2125 | struct drm_gem_object *obj; |
1967 | struct drm_i915_gem_object *obj_priv; | 2126 | int ret; |
1968 | int ret = 0; | ||
1969 | 2127 | ||
1970 | for (;;) { | 2128 | for (;;) { |
2129 | i915_gem_retire_requests(dev); | ||
2130 | |||
1971 | /* If there's an inactive buffer available now, grab it | 2131 | /* If there's an inactive buffer available now, grab it |
1972 | * and be done. | 2132 | * and be done. |
1973 | */ | 2133 | */ |
1974 | if (!list_empty(&dev_priv->mm.inactive_list)) { | 2134 | obj = i915_gem_find_inactive_object(dev, min_size); |
1975 | obj_priv = list_first_entry(&dev_priv->mm.inactive_list, | 2135 | if (obj) { |
1976 | struct drm_i915_gem_object, | 2136 | struct drm_i915_gem_object *obj_priv; |
1977 | list); | 2137 | |
1978 | obj = obj_priv->obj; | ||
1979 | BUG_ON(obj_priv->pin_count != 0); | ||
1980 | #if WATCH_LRU | 2138 | #if WATCH_LRU |
1981 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2139 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
1982 | #endif | 2140 | #endif |
2141 | obj_priv = obj->driver_private; | ||
2142 | BUG_ON(obj_priv->pin_count != 0); | ||
1983 | BUG_ON(obj_priv->active); | 2143 | BUG_ON(obj_priv->active); |
1984 | 2144 | ||
1985 | /* Wait on the rendering and unbind the buffer. */ | 2145 | /* Wait on the rendering and unbind the buffer. */ |
1986 | ret = i915_gem_object_unbind(obj); | 2146 | return i915_gem_object_unbind(obj); |
1987 | break; | ||
1988 | } | 2147 | } |
1989 | 2148 | ||
1990 | /* If we didn't get anything, but the ring is still processing | 2149 | /* If we didn't get anything, but the ring is still processing |
1991 | * things, wait for one of those things to finish and hopefully | 2150 | * things, wait for the next to finish and hopefully leave us |
1992 | * leave us a buffer to evict. | 2151 | * a buffer to evict. |
1993 | */ | 2152 | */ |
1994 | if (!list_empty(&dev_priv->mm.request_list)) { | 2153 | if (!list_empty(&dev_priv->mm.request_list)) { |
1995 | struct drm_i915_gem_request *request; | 2154 | struct drm_i915_gem_request *request; |
@@ -2000,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2000 | 2159 | ||
2001 | ret = i915_wait_request(dev, request->seqno); | 2160 | ret = i915_wait_request(dev, request->seqno); |
2002 | if (ret) | 2161 | if (ret) |
2003 | break; | 2162 | return ret; |
2004 | 2163 | ||
2005 | /* if waiting caused an object to become inactive, | 2164 | continue; |
2006 | * then loop around and wait for it. Otherwise, we | ||
2007 | * assume that waiting freed and unbound something, | ||
2008 | * so there should now be some space in the GTT | ||
2009 | */ | ||
2010 | if (!list_empty(&dev_priv->mm.inactive_list)) | ||
2011 | continue; | ||
2012 | break; | ||
2013 | } | 2165 | } |
2014 | 2166 | ||
2015 | /* If we didn't have anything on the request list but there | 2167 | /* If we didn't have anything on the request list but there |
@@ -2018,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev) | |||
2018 | * will get moved to inactive. | 2170 | * will get moved to inactive. |
2019 | */ | 2171 | */ |
2020 | if (!list_empty(&dev_priv->mm.flushing_list)) { | 2172 | if (!list_empty(&dev_priv->mm.flushing_list)) { |
2021 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 2173 | struct drm_i915_gem_object *obj_priv; |
2022 | struct drm_i915_gem_object, | ||
2023 | list); | ||
2024 | obj = obj_priv->obj; | ||
2025 | 2174 | ||
2026 | i915_gem_flush(dev, | 2175 | /* Find an object that we can immediately reuse */ |
2027 | obj->write_domain, | 2176 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { |
2028 | obj->write_domain); | 2177 | obj = obj_priv->obj; |
2029 | i915_add_request(dev, NULL, obj->write_domain); | 2178 | if (obj->size >= min_size) |
2179 | break; | ||
2030 | 2180 | ||
2031 | obj = NULL; | 2181 | obj = NULL; |
2032 | continue; | 2182 | } |
2033 | } | ||
2034 | 2183 | ||
2035 | DRM_ERROR("inactive empty %d request empty %d " | 2184 | if (obj != NULL) { |
2036 | "flushing empty %d\n", | 2185 | uint32_t seqno; |
2037 | list_empty(&dev_priv->mm.inactive_list), | ||
2038 | list_empty(&dev_priv->mm.request_list), | ||
2039 | list_empty(&dev_priv->mm.flushing_list)); | ||
2040 | /* If we didn't do any of the above, there's nothing to be done | ||
2041 | * and we just can't fit it in. | ||
2042 | */ | ||
2043 | return -ENOSPC; | ||
2044 | } | ||
2045 | return ret; | ||
2046 | } | ||
2047 | 2186 | ||
2048 | static int | 2187 | i915_gem_flush(dev, |
2049 | i915_gem_evict_everything(struct drm_device *dev) | 2188 | obj->write_domain, |
2050 | { | 2189 | obj->write_domain); |
2051 | int ret; | 2190 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2191 | if (seqno == 0) | ||
2192 | return -ENOMEM; | ||
2052 | 2193 | ||
2053 | for (;;) { | 2194 | ret = i915_wait_request(dev, seqno); |
2054 | ret = i915_gem_evict_something(dev); | 2195 | if (ret) |
2055 | if (ret != 0) | 2196 | return ret; |
2056 | break; | 2197 | |
2198 | continue; | ||
2199 | } | ||
2200 | } | ||
2201 | |||
2202 | /* If we didn't do any of the above, there's no single buffer | ||
2203 | * large enough to swap out for the new one, so just evict | ||
2204 | * everything and start again. (This should be rare.) | ||
2205 | */ | ||
2206 | if (!list_empty (&dev_priv->mm.inactive_list)) | ||
2207 | return i915_gem_evict_from_inactive_list(dev); | ||
2208 | else | ||
2209 | return i915_gem_evict_everything(dev); | ||
2057 | } | 2210 | } |
2058 | if (ret == -ENOSPC) | ||
2059 | return 0; | ||
2060 | return ret; | ||
2061 | } | 2211 | } |
2062 | 2212 | ||
2063 | int | 2213 | int |
@@ -2080,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2080 | BUG_ON(obj_priv->pages != NULL); | 2230 | BUG_ON(obj_priv->pages != NULL); |
2081 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); | 2231 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); |
2082 | if (obj_priv->pages == NULL) { | 2232 | if (obj_priv->pages == NULL) { |
2083 | DRM_ERROR("Faled to allocate page list\n"); | ||
2084 | obj_priv->pages_refcount--; | 2233 | obj_priv->pages_refcount--; |
2085 | return -ENOMEM; | 2234 | return -ENOMEM; |
2086 | } | 2235 | } |
@@ -2091,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2091 | page = read_mapping_page(mapping, i, NULL); | 2240 | page = read_mapping_page(mapping, i, NULL); |
2092 | if (IS_ERR(page)) { | 2241 | if (IS_ERR(page)) { |
2093 | ret = PTR_ERR(page); | 2242 | ret = PTR_ERR(page); |
2094 | DRM_ERROR("read_mapping_page failed: %d\n", ret); | ||
2095 | i915_gem_object_put_pages(obj); | 2243 | i915_gem_object_put_pages(obj); |
2096 | return ret; | 2244 | return ret; |
2097 | } | 2245 | } |
@@ -2328,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2328 | else | 2476 | else |
2329 | i830_write_fence_reg(reg); | 2477 | i830_write_fence_reg(reg); |
2330 | 2478 | ||
2479 | trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); | ||
2480 | |||
2331 | return 0; | 2481 | return 0; |
2332 | } | 2482 | } |
2333 | 2483 | ||
@@ -2410,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2410 | drm_i915_private_t *dev_priv = dev->dev_private; | 2560 | drm_i915_private_t *dev_priv = dev->dev_private; |
2411 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2561 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2412 | struct drm_mm_node *free_space; | 2562 | struct drm_mm_node *free_space; |
2413 | int page_count, ret; | 2563 | bool retry_alloc = false; |
2564 | int ret; | ||
2414 | 2565 | ||
2415 | if (dev_priv->mm.suspended) | 2566 | if (dev_priv->mm.suspended) |
2416 | return -EBUSY; | 2567 | return -EBUSY; |
2568 | |||
2569 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
2570 | DRM_ERROR("Attempting to bind a purgeable object\n"); | ||
2571 | return -EINVAL; | ||
2572 | } | ||
2573 | |||
2417 | if (alignment == 0) | 2574 | if (alignment == 0) |
2418 | alignment = i915_gem_get_gtt_alignment(obj); | 2575 | alignment = i915_gem_get_gtt_alignment(obj); |
2419 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { | 2576 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { |
@@ -2433,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2433 | } | 2590 | } |
2434 | } | 2591 | } |
2435 | if (obj_priv->gtt_space == NULL) { | 2592 | if (obj_priv->gtt_space == NULL) { |
2436 | bool lists_empty; | ||
2437 | |||
2438 | /* If the gtt is empty and we're still having trouble | 2593 | /* If the gtt is empty and we're still having trouble |
2439 | * fitting our object in, we're out of memory. | 2594 | * fitting our object in, we're out of memory. |
2440 | */ | 2595 | */ |
2441 | #if WATCH_LRU | 2596 | #if WATCH_LRU |
2442 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 2597 | DRM_INFO("%s: GTT full, evicting something\n", __func__); |
2443 | #endif | 2598 | #endif |
2444 | spin_lock(&dev_priv->mm.active_list_lock); | 2599 | ret = i915_gem_evict_something(dev, obj->size); |
2445 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 2600 | if (ret) |
2446 | list_empty(&dev_priv->mm.flushing_list) && | ||
2447 | list_empty(&dev_priv->mm.active_list)); | ||
2448 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2449 | if (lists_empty) { | ||
2450 | DRM_ERROR("GTT full, but LRU list empty\n"); | ||
2451 | return -ENOSPC; | ||
2452 | } | ||
2453 | |||
2454 | ret = i915_gem_evict_something(dev); | ||
2455 | if (ret != 0) { | ||
2456 | if (ret != -ERESTARTSYS) | ||
2457 | DRM_ERROR("Failed to evict a buffer %d\n", ret); | ||
2458 | return ret; | 2601 | return ret; |
2459 | } | 2602 | |
2460 | goto search_free; | 2603 | goto search_free; |
2461 | } | 2604 | } |
2462 | 2605 | ||
@@ -2464,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2464 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2607 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2465 | obj->size, obj_priv->gtt_offset); | 2608 | obj->size, obj_priv->gtt_offset); |
2466 | #endif | 2609 | #endif |
2610 | if (retry_alloc) { | ||
2611 | i915_gem_object_set_page_gfp_mask (obj, | ||
2612 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2613 | } | ||
2467 | ret = i915_gem_object_get_pages(obj); | 2614 | ret = i915_gem_object_get_pages(obj); |
2615 | if (retry_alloc) { | ||
2616 | i915_gem_object_set_page_gfp_mask (obj, | ||
2617 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2618 | } | ||
2468 | if (ret) { | 2619 | if (ret) { |
2469 | drm_mm_put_block(obj_priv->gtt_space); | 2620 | drm_mm_put_block(obj_priv->gtt_space); |
2470 | obj_priv->gtt_space = NULL; | 2621 | obj_priv->gtt_space = NULL; |
2622 | |||
2623 | if (ret == -ENOMEM) { | ||
2624 | /* first try to clear up some space from the GTT */ | ||
2625 | ret = i915_gem_evict_something(dev, obj->size); | ||
2626 | if (ret) { | ||
2627 | /* now try to shrink everyone else */ | ||
2628 | if (! retry_alloc) { | ||
2629 | retry_alloc = true; | ||
2630 | goto search_free; | ||
2631 | } | ||
2632 | |||
2633 | return ret; | ||
2634 | } | ||
2635 | |||
2636 | goto search_free; | ||
2637 | } | ||
2638 | |||
2471 | return ret; | 2639 | return ret; |
2472 | } | 2640 | } |
2473 | 2641 | ||
2474 | page_count = obj->size / PAGE_SIZE; | ||
2475 | /* Create an AGP memory structure pointing at our pages, and bind it | 2642 | /* Create an AGP memory structure pointing at our pages, and bind it |
2476 | * into the GTT. | 2643 | * into the GTT. |
2477 | */ | 2644 | */ |
2478 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2645 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
2479 | obj_priv->pages, | 2646 | obj_priv->pages, |
2480 | page_count, | 2647 | obj->size >> PAGE_SHIFT, |
2481 | obj_priv->gtt_offset, | 2648 | obj_priv->gtt_offset, |
2482 | obj_priv->agp_type); | 2649 | obj_priv->agp_type); |
2483 | if (obj_priv->agp_mem == NULL) { | 2650 | if (obj_priv->agp_mem == NULL) { |
2484 | i915_gem_object_put_pages(obj); | 2651 | i915_gem_object_put_pages(obj); |
2485 | drm_mm_put_block(obj_priv->gtt_space); | 2652 | drm_mm_put_block(obj_priv->gtt_space); |
2486 | obj_priv->gtt_space = NULL; | 2653 | obj_priv->gtt_space = NULL; |
2487 | return -ENOMEM; | 2654 | |
2655 | ret = i915_gem_evict_something(dev, obj->size); | ||
2656 | if (ret) | ||
2657 | return ret; | ||
2658 | |||
2659 | goto search_free; | ||
2488 | } | 2660 | } |
2489 | atomic_inc(&dev->gtt_count); | 2661 | atomic_inc(&dev->gtt_count); |
2490 | atomic_add(obj->size, &dev->gtt_memory); | 2662 | atomic_add(obj->size, &dev->gtt_memory); |
@@ -2496,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2496 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2668 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); |
2497 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2669 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); |
2498 | 2670 | ||
2671 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | ||
2672 | |||
2499 | return 0; | 2673 | return 0; |
2500 | } | 2674 | } |
2501 | 2675 | ||
@@ -2511,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
2511 | if (obj_priv->pages == NULL) | 2685 | if (obj_priv->pages == NULL) |
2512 | return; | 2686 | return; |
2513 | 2687 | ||
2514 | /* XXX: The 865 in particular appears to be weird in how it handles | 2688 | trace_i915_gem_object_clflush(obj); |
2515 | * cache flushing. We haven't figured it out, but the | ||
2516 | * clflush+agp_chipset_flush doesn't appear to successfully get the | ||
2517 | * data visible to the PGU, while wbinvd + agp_chipset_flush does. | ||
2518 | */ | ||
2519 | if (IS_I865G(obj->dev)) { | ||
2520 | wbinvd(); | ||
2521 | return; | ||
2522 | } | ||
2523 | 2689 | ||
2524 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 2690 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); |
2525 | } | 2691 | } |
@@ -2530,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2530 | { | 2696 | { |
2531 | struct drm_device *dev = obj->dev; | 2697 | struct drm_device *dev = obj->dev; |
2532 | uint32_t seqno; | 2698 | uint32_t seqno; |
2699 | uint32_t old_write_domain; | ||
2533 | 2700 | ||
2534 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2701 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2535 | return; | 2702 | return; |
2536 | 2703 | ||
2537 | /* Queue the GPU write cache flushing we need. */ | 2704 | /* Queue the GPU write cache flushing we need. */ |
2705 | old_write_domain = obj->write_domain; | ||
2538 | i915_gem_flush(dev, 0, obj->write_domain); | 2706 | i915_gem_flush(dev, 0, obj->write_domain); |
2539 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2707 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2540 | obj->write_domain = 0; | 2708 | obj->write_domain = 0; |
2541 | i915_gem_object_move_to_active(obj, seqno); | 2709 | i915_gem_object_move_to_active(obj, seqno); |
2710 | |||
2711 | trace_i915_gem_object_change_domain(obj, | ||
2712 | obj->read_domains, | ||
2713 | old_write_domain); | ||
2542 | } | 2714 | } |
2543 | 2715 | ||
2544 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2716 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2545 | static void | 2717 | static void |
2546 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 2718 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) |
2547 | { | 2719 | { |
2720 | uint32_t old_write_domain; | ||
2721 | |||
2548 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 2722 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) |
2549 | return; | 2723 | return; |
2550 | 2724 | ||
@@ -2552,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | |||
2552 | * to it immediately go to main memory as far as we know, so there's | 2726 | * to it immediately go to main memory as far as we know, so there's |
2553 | * no chipset flush. It also doesn't land in render cache. | 2727 | * no chipset flush. It also doesn't land in render cache. |
2554 | */ | 2728 | */ |
2729 | old_write_domain = obj->write_domain; | ||
2555 | obj->write_domain = 0; | 2730 | obj->write_domain = 0; |
2731 | |||
2732 | trace_i915_gem_object_change_domain(obj, | ||
2733 | obj->read_domains, | ||
2734 | old_write_domain); | ||
2556 | } | 2735 | } |
2557 | 2736 | ||
2558 | /** Flushes the CPU write domain for the object if it's dirty. */ | 2737 | /** Flushes the CPU write domain for the object if it's dirty. */ |
@@ -2560,13 +2739,19 @@ static void | |||
2560 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 2739 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) |
2561 | { | 2740 | { |
2562 | struct drm_device *dev = obj->dev; | 2741 | struct drm_device *dev = obj->dev; |
2742 | uint32_t old_write_domain; | ||
2563 | 2743 | ||
2564 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 2744 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) |
2565 | return; | 2745 | return; |
2566 | 2746 | ||
2567 | i915_gem_clflush_object(obj); | 2747 | i915_gem_clflush_object(obj); |
2568 | drm_agp_chipset_flush(dev); | 2748 | drm_agp_chipset_flush(dev); |
2749 | old_write_domain = obj->write_domain; | ||
2569 | obj->write_domain = 0; | 2750 | obj->write_domain = 0; |
2751 | |||
2752 | trace_i915_gem_object_change_domain(obj, | ||
2753 | obj->read_domains, | ||
2754 | old_write_domain); | ||
2570 | } | 2755 | } |
2571 | 2756 | ||
2572 | /** | 2757 | /** |
@@ -2579,6 +2764,7 @@ int | |||
2579 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2764 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) |
2580 | { | 2765 | { |
2581 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2766 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2767 | uint32_t old_write_domain, old_read_domains; | ||
2582 | int ret; | 2768 | int ret; |
2583 | 2769 | ||
2584 | /* Not valid to be called on unbound objects. */ | 2770 | /* Not valid to be called on unbound objects. */ |
@@ -2591,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2591 | if (ret != 0) | 2777 | if (ret != 0) |
2592 | return ret; | 2778 | return ret; |
2593 | 2779 | ||
2780 | old_write_domain = obj->write_domain; | ||
2781 | old_read_domains = obj->read_domains; | ||
2782 | |||
2594 | /* If we're writing through the GTT domain, then CPU and GPU caches | 2783 | /* If we're writing through the GTT domain, then CPU and GPU caches |
2595 | * will need to be invalidated at next use. | 2784 | * will need to be invalidated at next use. |
2596 | */ | 2785 | */ |
@@ -2609,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2609 | obj_priv->dirty = 1; | 2798 | obj_priv->dirty = 1; |
2610 | } | 2799 | } |
2611 | 2800 | ||
2801 | trace_i915_gem_object_change_domain(obj, | ||
2802 | old_read_domains, | ||
2803 | old_write_domain); | ||
2804 | |||
2612 | return 0; | 2805 | return 0; |
2613 | } | 2806 | } |
2614 | 2807 | ||
@@ -2621,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2621 | static int | 2814 | static int |
2622 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 2815 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) |
2623 | { | 2816 | { |
2817 | uint32_t old_write_domain, old_read_domains; | ||
2624 | int ret; | 2818 | int ret; |
2625 | 2819 | ||
2626 | i915_gem_object_flush_gpu_write_domain(obj); | 2820 | i915_gem_object_flush_gpu_write_domain(obj); |
@@ -2636,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2636 | */ | 2830 | */ |
2637 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 2831 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
2638 | 2832 | ||
2833 | old_write_domain = obj->write_domain; | ||
2834 | old_read_domains = obj->read_domains; | ||
2835 | |||
2639 | /* Flush the CPU cache if it's still invalid. */ | 2836 | /* Flush the CPU cache if it's still invalid. */ |
2640 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 2837 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
2641 | i915_gem_clflush_object(obj); | 2838 | i915_gem_clflush_object(obj); |
@@ -2656,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2656 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 2853 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
2657 | } | 2854 | } |
2658 | 2855 | ||
2856 | trace_i915_gem_object_change_domain(obj, | ||
2857 | old_read_domains, | ||
2858 | old_write_domain); | ||
2859 | |||
2659 | return 0; | 2860 | return 0; |
2660 | } | 2861 | } |
2661 | 2862 | ||
@@ -2777,6 +2978,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2777 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2978 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2778 | uint32_t invalidate_domains = 0; | 2979 | uint32_t invalidate_domains = 0; |
2779 | uint32_t flush_domains = 0; | 2980 | uint32_t flush_domains = 0; |
2981 | uint32_t old_read_domains; | ||
2780 | 2982 | ||
2781 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); | 2983 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); |
2782 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); | 2984 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); |
@@ -2823,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2823 | i915_gem_clflush_object(obj); | 3025 | i915_gem_clflush_object(obj); |
2824 | } | 3026 | } |
2825 | 3027 | ||
3028 | old_read_domains = obj->read_domains; | ||
3029 | |||
2826 | /* The actual obj->write_domain will be updated with | 3030 | /* The actual obj->write_domain will be updated with |
2827 | * pending_write_domain after we emit the accumulated flush for all | 3031 | * pending_write_domain after we emit the accumulated flush for all |
2828 | * of our domain changes in execbuffers (which clears objects' | 3032 | * of our domain changes in execbuffers (which clears objects' |
@@ -2841,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2841 | obj->read_domains, obj->write_domain, | 3045 | obj->read_domains, obj->write_domain, |
2842 | dev->invalidate_domains, dev->flush_domains); | 3046 | dev->invalidate_domains, dev->flush_domains); |
2843 | #endif | 3047 | #endif |
3048 | |||
3049 | trace_i915_gem_object_change_domain(obj, | ||
3050 | old_read_domains, | ||
3051 | obj->write_domain); | ||
2844 | } | 3052 | } |
2845 | 3053 | ||
2846 | /** | 3054 | /** |
@@ -2893,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2893 | uint64_t offset, uint64_t size) | 3101 | uint64_t offset, uint64_t size) |
2894 | { | 3102 | { |
2895 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3103 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3104 | uint32_t old_read_domains; | ||
2896 | int i, ret; | 3105 | int i, ret; |
2897 | 3106 | ||
2898 | if (offset == 0 && size == obj->size) | 3107 | if (offset == 0 && size == obj->size) |
@@ -2939,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2939 | */ | 3148 | */ |
2940 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3149 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
2941 | 3150 | ||
3151 | old_read_domains = obj->read_domains; | ||
2942 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3152 | obj->read_domains |= I915_GEM_DOMAIN_CPU; |
2943 | 3153 | ||
3154 | trace_i915_gem_object_change_domain(obj, | ||
3155 | old_read_domains, | ||
3156 | obj->write_domain); | ||
3157 | |||
2944 | return 0; | 3158 | return 0; |
2945 | } | 3159 | } |
2946 | 3160 | ||
@@ -2984,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2984 | } | 3198 | } |
2985 | target_obj_priv = target_obj->driver_private; | 3199 | target_obj_priv = target_obj->driver_private; |
2986 | 3200 | ||
3201 | #if WATCH_RELOC | ||
3202 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
3203 | "read %08x write %08x gtt %08x " | ||
3204 | "presumed %08x delta %08x\n", | ||
3205 | __func__, | ||
3206 | obj, | ||
3207 | (int) reloc->offset, | ||
3208 | (int) reloc->target_handle, | ||
3209 | (int) reloc->read_domains, | ||
3210 | (int) reloc->write_domain, | ||
3211 | (int) target_obj_priv->gtt_offset, | ||
3212 | (int) reloc->presumed_offset, | ||
3213 | reloc->delta); | ||
3214 | #endif | ||
3215 | |||
2987 | /* The target buffer should have appeared before us in the | 3216 | /* The target buffer should have appeared before us in the |
2988 | * exec_object list, so it should have a GTT space bound by now. | 3217 | * exec_object list, so it should have a GTT space bound by now. |
2989 | */ | 3218 | */ |
@@ -2995,25 +3224,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2995 | return -EINVAL; | 3224 | return -EINVAL; |
2996 | } | 3225 | } |
2997 | 3226 | ||
2998 | if (reloc->offset > obj->size - 4) { | 3227 | /* Validate that the target is in a valid r/w GPU domain */ |
2999 | DRM_ERROR("Relocation beyond object bounds: " | ||
3000 | "obj %p target %d offset %d size %d.\n", | ||
3001 | obj, reloc->target_handle, | ||
3002 | (int) reloc->offset, (int) obj->size); | ||
3003 | drm_gem_object_unreference(target_obj); | ||
3004 | i915_gem_object_unpin(obj); | ||
3005 | return -EINVAL; | ||
3006 | } | ||
3007 | if (reloc->offset & 3) { | ||
3008 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3009 | "obj %p target %d offset %d.\n", | ||
3010 | obj, reloc->target_handle, | ||
3011 | (int) reloc->offset); | ||
3012 | drm_gem_object_unreference(target_obj); | ||
3013 | i915_gem_object_unpin(obj); | ||
3014 | return -EINVAL; | ||
3015 | } | ||
3016 | |||
3017 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3228 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
3018 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | 3229 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { |
3019 | DRM_ERROR("reloc with read/write CPU domains: " | 3230 | DRM_ERROR("reloc with read/write CPU domains: " |
@@ -3027,7 +3238,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3027 | i915_gem_object_unpin(obj); | 3238 | i915_gem_object_unpin(obj); |
3028 | return -EINVAL; | 3239 | return -EINVAL; |
3029 | } | 3240 | } |
3030 | |||
3031 | if (reloc->write_domain && target_obj->pending_write_domain && | 3241 | if (reloc->write_domain && target_obj->pending_write_domain && |
3032 | reloc->write_domain != target_obj->pending_write_domain) { | 3242 | reloc->write_domain != target_obj->pending_write_domain) { |
3033 | DRM_ERROR("Write domain conflict: " | 3243 | DRM_ERROR("Write domain conflict: " |
@@ -3042,21 +3252,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3042 | return -EINVAL; | 3252 | return -EINVAL; |
3043 | } | 3253 | } |
3044 | 3254 | ||
3045 | #if WATCH_RELOC | ||
3046 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
3047 | "read %08x write %08x gtt %08x " | ||
3048 | "presumed %08x delta %08x\n", | ||
3049 | __func__, | ||
3050 | obj, | ||
3051 | (int) reloc->offset, | ||
3052 | (int) reloc->target_handle, | ||
3053 | (int) reloc->read_domains, | ||
3054 | (int) reloc->write_domain, | ||
3055 | (int) target_obj_priv->gtt_offset, | ||
3056 | (int) reloc->presumed_offset, | ||
3057 | reloc->delta); | ||
3058 | #endif | ||
3059 | |||
3060 | target_obj->pending_read_domains |= reloc->read_domains; | 3255 | target_obj->pending_read_domains |= reloc->read_domains; |
3061 | target_obj->pending_write_domain |= reloc->write_domain; | 3256 | target_obj->pending_write_domain |= reloc->write_domain; |
3062 | 3257 | ||
@@ -3068,6 +3263,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3068 | continue; | 3263 | continue; |
3069 | } | 3264 | } |
3070 | 3265 | ||
3266 | /* Check that the relocation address is valid... */ | ||
3267 | if (reloc->offset > obj->size - 4) { | ||
3268 | DRM_ERROR("Relocation beyond object bounds: " | ||
3269 | "obj %p target %d offset %d size %d.\n", | ||
3270 | obj, reloc->target_handle, | ||
3271 | (int) reloc->offset, (int) obj->size); | ||
3272 | drm_gem_object_unreference(target_obj); | ||
3273 | i915_gem_object_unpin(obj); | ||
3274 | return -EINVAL; | ||
3275 | } | ||
3276 | if (reloc->offset & 3) { | ||
3277 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3278 | "obj %p target %d offset %d.\n", | ||
3279 | obj, reloc->target_handle, | ||
3280 | (int) reloc->offset); | ||
3281 | drm_gem_object_unreference(target_obj); | ||
3282 | i915_gem_object_unpin(obj); | ||
3283 | return -EINVAL; | ||
3284 | } | ||
3285 | |||
3286 | /* and points to somewhere within the target object. */ | ||
3287 | if (reloc->delta >= target_obj->size) { | ||
3288 | DRM_ERROR("Relocation beyond target object bounds: " | ||
3289 | "obj %p target %d delta %d size %d.\n", | ||
3290 | obj, reloc->target_handle, | ||
3291 | (int) reloc->delta, (int) target_obj->size); | ||
3292 | drm_gem_object_unreference(target_obj); | ||
3293 | i915_gem_object_unpin(obj); | ||
3294 | return -EINVAL; | ||
3295 | } | ||
3296 | |||
3071 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 3297 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
3072 | if (ret != 0) { | 3298 | if (ret != 0) { |
3073 | drm_gem_object_unreference(target_obj); | 3299 | drm_gem_object_unreference(target_obj); |
@@ -3126,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, | |||
3126 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 3352 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
3127 | exec_len = (uint32_t) exec->batch_len; | 3353 | exec_len = (uint32_t) exec->batch_len; |
3128 | 3354 | ||
3355 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno); | ||
3356 | |||
3129 | count = nbox ? nbox : 1; | 3357 | count = nbox ? nbox : 1; |
3130 | 3358 | ||
3131 | for (i = 0; i < count; i++) { | 3359 | for (i = 0; i < count; i++) { |
@@ -3363,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3363 | 3591 | ||
3364 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3592 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3365 | 3593 | ||
3366 | if (dev_priv->mm.wedged) { | 3594 | if (atomic_read(&dev_priv->mm.wedged)) { |
3367 | DRM_ERROR("Execbuf while wedged\n"); | 3595 | DRM_ERROR("Execbuf while wedged\n"); |
3368 | mutex_unlock(&dev->struct_mutex); | 3596 | mutex_unlock(&dev->struct_mutex); |
3369 | ret = -EIO; | 3597 | ret = -EIO; |
@@ -3421,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3421 | 3649 | ||
3422 | /* error other than GTT full, or we've already tried again */ | 3650 | /* error other than GTT full, or we've already tried again */ |
3423 | if (ret != -ENOSPC || pin_tries >= 1) { | 3651 | if (ret != -ENOSPC || pin_tries >= 1) { |
3424 | if (ret != -ERESTARTSYS) | 3652 | if (ret != -ERESTARTSYS) { |
3425 | DRM_ERROR("Failed to pin buffers %d\n", ret); | 3653 | unsigned long long total_size = 0; |
3654 | for (i = 0; i < args->buffer_count; i++) | ||
3655 | total_size += object_list[i]->size; | ||
3656 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", | ||
3657 | pinned+1, args->buffer_count, | ||
3658 | total_size, ret); | ||
3659 | DRM_ERROR("%d objects [%d pinned], " | ||
3660 | "%d object bytes [%d pinned], " | ||
3661 | "%d/%d gtt bytes\n", | ||
3662 | atomic_read(&dev->object_count), | ||
3663 | atomic_read(&dev->pin_count), | ||
3664 | atomic_read(&dev->object_memory), | ||
3665 | atomic_read(&dev->pin_memory), | ||
3666 | atomic_read(&dev->gtt_memory), | ||
3667 | dev->gtt_total); | ||
3668 | } | ||
3426 | goto err; | 3669 | goto err; |
3427 | } | 3670 | } |
3428 | 3671 | ||
@@ -3433,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3433 | 3676 | ||
3434 | /* evict everyone we can from the aperture */ | 3677 | /* evict everyone we can from the aperture */ |
3435 | ret = i915_gem_evict_everything(dev); | 3678 | ret = i915_gem_evict_everything(dev); |
3436 | if (ret) | 3679 | if (ret && ret != -ENOSPC) |
3437 | goto err; | 3680 | goto err; |
3438 | } | 3681 | } |
3439 | 3682 | ||
@@ -3489,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3489 | 3732 | ||
3490 | for (i = 0; i < args->buffer_count; i++) { | 3733 | for (i = 0; i < args->buffer_count; i++) { |
3491 | struct drm_gem_object *obj = object_list[i]; | 3734 | struct drm_gem_object *obj = object_list[i]; |
3735 | uint32_t old_write_domain = obj->write_domain; | ||
3492 | 3736 | ||
3493 | obj->write_domain = obj->pending_write_domain; | 3737 | obj->write_domain = obj->pending_write_domain; |
3738 | trace_i915_gem_object_change_domain(obj, | ||
3739 | obj->read_domains, | ||
3740 | old_write_domain); | ||
3494 | } | 3741 | } |
3495 | 3742 | ||
3496 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3743 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -3607,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3607 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3854 | i915_verify_inactive(dev, __FILE__, __LINE__); |
3608 | if (obj_priv->gtt_space == NULL) { | 3855 | if (obj_priv->gtt_space == NULL) { |
3609 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 3856 | ret = i915_gem_object_bind_to_gtt(obj, alignment); |
3610 | if (ret != 0) { | 3857 | if (ret) |
3611 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3612 | DRM_ERROR("Failure to bind: %d\n", ret); | ||
3613 | return ret; | 3858 | return ret; |
3614 | } | ||
3615 | } | 3859 | } |
3616 | /* | 3860 | /* |
3617 | * Pre-965 chips need a fence register set up in order to | 3861 | * Pre-965 chips need a fence register set up in order to |
@@ -3691,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3691 | } | 3935 | } |
3692 | obj_priv = obj->driver_private; | 3936 | obj_priv = obj->driver_private; |
3693 | 3937 | ||
3938 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
3939 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | ||
3940 | drm_gem_object_unreference(obj); | ||
3941 | mutex_unlock(&dev->struct_mutex); | ||
3942 | return -EINVAL; | ||
3943 | } | ||
3944 | |||
3694 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 3945 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
3695 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 3946 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
3696 | args->handle); | 3947 | args->handle); |
@@ -3803,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | |||
3803 | return i915_gem_ring_throttle(dev, file_priv); | 4054 | return i915_gem_ring_throttle(dev, file_priv); |
3804 | } | 4055 | } |
3805 | 4056 | ||
4057 | int | ||
4058 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | ||
4059 | struct drm_file *file_priv) | ||
4060 | { | ||
4061 | struct drm_i915_gem_madvise *args = data; | ||
4062 | struct drm_gem_object *obj; | ||
4063 | struct drm_i915_gem_object *obj_priv; | ||
4064 | |||
4065 | switch (args->madv) { | ||
4066 | case I915_MADV_DONTNEED: | ||
4067 | case I915_MADV_WILLNEED: | ||
4068 | break; | ||
4069 | default: | ||
4070 | return -EINVAL; | ||
4071 | } | ||
4072 | |||
4073 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
4074 | if (obj == NULL) { | ||
4075 | DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", | ||
4076 | args->handle); | ||
4077 | return -EBADF; | ||
4078 | } | ||
4079 | |||
4080 | mutex_lock(&dev->struct_mutex); | ||
4081 | obj_priv = obj->driver_private; | ||
4082 | |||
4083 | if (obj_priv->pin_count) { | ||
4084 | drm_gem_object_unreference(obj); | ||
4085 | mutex_unlock(&dev->struct_mutex); | ||
4086 | |||
4087 | DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); | ||
4088 | return -EINVAL; | ||
4089 | } | ||
4090 | |||
4091 | if (obj_priv->madv != __I915_MADV_PURGED) | ||
4092 | obj_priv->madv = args->madv; | ||
4093 | |||
4094 | /* if the object is no longer bound, discard its backing storage */ | ||
4095 | if (i915_gem_object_is_purgeable(obj_priv) && | ||
4096 | obj_priv->gtt_space == NULL) | ||
4097 | i915_gem_object_truncate(obj); | ||
4098 | |||
4099 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | ||
4100 | |||
4101 | drm_gem_object_unreference(obj); | ||
4102 | mutex_unlock(&dev->struct_mutex); | ||
4103 | |||
4104 | return 0; | ||
4105 | } | ||
4106 | |||
3806 | int i915_gem_init_object(struct drm_gem_object *obj) | 4107 | int i915_gem_init_object(struct drm_gem_object *obj) |
3807 | { | 4108 | { |
3808 | struct drm_i915_gem_object *obj_priv; | 4109 | struct drm_i915_gem_object *obj_priv; |
@@ -3827,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
3827 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 4128 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
3828 | INIT_LIST_HEAD(&obj_priv->list); | 4129 | INIT_LIST_HEAD(&obj_priv->list); |
3829 | INIT_LIST_HEAD(&obj_priv->fence_list); | 4130 | INIT_LIST_HEAD(&obj_priv->fence_list); |
4131 | obj_priv->madv = I915_MADV_WILLNEED; | ||
4132 | |||
4133 | trace_i915_gem_object_create(obj); | ||
3830 | 4134 | ||
3831 | return 0; | 4135 | return 0; |
3832 | } | 4136 | } |
@@ -3836,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
3836 | struct drm_device *dev = obj->dev; | 4140 | struct drm_device *dev = obj->dev; |
3837 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4141 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3838 | 4142 | ||
4143 | trace_i915_gem_object_destroy(obj); | ||
4144 | |||
3839 | while (obj_priv->pin_count > 0) | 4145 | while (obj_priv->pin_count > 0) |
3840 | i915_gem_object_unpin(obj); | 4146 | i915_gem_object_unpin(obj); |
3841 | 4147 | ||
@@ -3844,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
3844 | 4150 | ||
3845 | i915_gem_object_unbind(obj); | 4151 | i915_gem_object_unbind(obj); |
3846 | 4152 | ||
3847 | i915_gem_free_mmap_offset(obj); | 4153 | if (obj_priv->mmap_offset) |
4154 | i915_gem_free_mmap_offset(obj); | ||
3848 | 4155 | ||
3849 | kfree(obj_priv->page_cpu_valid); | 4156 | kfree(obj_priv->page_cpu_valid); |
3850 | kfree(obj_priv->bit_17); | 4157 | kfree(obj_priv->bit_17); |
3851 | kfree(obj->driver_private); | 4158 | kfree(obj->driver_private); |
3852 | } | 4159 | } |
3853 | 4160 | ||
3854 | /** Unbinds all objects that are on the given buffer list. */ | 4161 | /** Unbinds all inactive objects. */ |
3855 | static int | 4162 | static int |
3856 | i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | 4163 | i915_gem_evict_from_inactive_list(struct drm_device *dev) |
3857 | { | 4164 | { |
3858 | struct drm_gem_object *obj; | 4165 | drm_i915_private_t *dev_priv = dev->dev_private; |
3859 | struct drm_i915_gem_object *obj_priv; | ||
3860 | int ret; | ||
3861 | 4166 | ||
3862 | while (!list_empty(head)) { | 4167 | while (!list_empty(&dev_priv->mm.inactive_list)) { |
3863 | obj_priv = list_first_entry(head, | 4168 | struct drm_gem_object *obj; |
3864 | struct drm_i915_gem_object, | 4169 | int ret; |
3865 | list); | ||
3866 | obj = obj_priv->obj; | ||
3867 | 4170 | ||
3868 | if (obj_priv->pin_count != 0) { | 4171 | obj = list_first_entry(&dev_priv->mm.inactive_list, |
3869 | DRM_ERROR("Pinned object in unbind list\n"); | 4172 | struct drm_i915_gem_object, |
3870 | mutex_unlock(&dev->struct_mutex); | 4173 | list)->obj; |
3871 | return -EINVAL; | ||
3872 | } | ||
3873 | 4174 | ||
3874 | ret = i915_gem_object_unbind(obj); | 4175 | ret = i915_gem_object_unbind(obj); |
3875 | if (ret != 0) { | 4176 | if (ret != 0) { |
3876 | DRM_ERROR("Error unbinding object in LeaveVT: %d\n", | 4177 | DRM_ERROR("Error unbinding object: %d\n", ret); |
3877 | ret); | ||
3878 | mutex_unlock(&dev->struct_mutex); | ||
3879 | return ret; | 4178 | return ret; |
3880 | } | 4179 | } |
3881 | } | 4180 | } |
3882 | 4181 | ||
3883 | |||
3884 | return 0; | 4182 | return 0; |
3885 | } | 4183 | } |
3886 | 4184 | ||
@@ -3902,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3902 | * We need to replace this with a semaphore, or something. | 4200 | * We need to replace this with a semaphore, or something. |
3903 | */ | 4201 | */ |
3904 | dev_priv->mm.suspended = 1; | 4202 | dev_priv->mm.suspended = 1; |
4203 | del_timer(&dev_priv->hangcheck_timer); | ||
3905 | 4204 | ||
3906 | /* Cancel the retire work handler, wait for it to finish if running | 4205 | /* Cancel the retire work handler, wait for it to finish if running |
3907 | */ | 4206 | */ |
@@ -3931,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3931 | if (last_seqno == cur_seqno) { | 4230 | if (last_seqno == cur_seqno) { |
3932 | if (stuck++ > 100) { | 4231 | if (stuck++ > 100) { |
3933 | DRM_ERROR("hardware wedged\n"); | 4232 | DRM_ERROR("hardware wedged\n"); |
3934 | dev_priv->mm.wedged = 1; | 4233 | atomic_set(&dev_priv->mm.wedged, 1); |
3935 | DRM_WAKEUP(&dev_priv->irq_queue); | 4234 | DRM_WAKEUP(&dev_priv->irq_queue); |
3936 | break; | 4235 | break; |
3937 | } | 4236 | } |
@@ -3944,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3944 | i915_gem_retire_requests(dev); | 4243 | i915_gem_retire_requests(dev); |
3945 | 4244 | ||
3946 | spin_lock(&dev_priv->mm.active_list_lock); | 4245 | spin_lock(&dev_priv->mm.active_list_lock); |
3947 | if (!dev_priv->mm.wedged) { | 4246 | if (!atomic_read(&dev_priv->mm.wedged)) { |
3948 | /* Active and flushing should now be empty as we've | 4247 | /* Active and flushing should now be empty as we've |
3949 | * waited for a sequence higher than any pending execbuffer | 4248 | * waited for a sequence higher than any pending execbuffer |
3950 | */ | 4249 | */ |
@@ -3962,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev) | |||
3962 | * the GPU domains and just stuff them onto inactive. | 4261 | * the GPU domains and just stuff them onto inactive. |
3963 | */ | 4262 | */ |
3964 | while (!list_empty(&dev_priv->mm.active_list)) { | 4263 | while (!list_empty(&dev_priv->mm.active_list)) { |
3965 | struct drm_i915_gem_object *obj_priv; | 4264 | struct drm_gem_object *obj; |
4265 | uint32_t old_write_domain; | ||
3966 | 4266 | ||
3967 | obj_priv = list_first_entry(&dev_priv->mm.active_list, | 4267 | obj = list_first_entry(&dev_priv->mm.active_list, |
3968 | struct drm_i915_gem_object, | 4268 | struct drm_i915_gem_object, |
3969 | list); | 4269 | list)->obj; |
3970 | obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | 4270 | old_write_domain = obj->write_domain; |
3971 | i915_gem_object_move_to_inactive(obj_priv->obj); | 4271 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; |
4272 | i915_gem_object_move_to_inactive(obj); | ||
4273 | |||
4274 | trace_i915_gem_object_change_domain(obj, | ||
4275 | obj->read_domains, | ||
4276 | old_write_domain); | ||
3972 | } | 4277 | } |
3973 | spin_unlock(&dev_priv->mm.active_list_lock); | 4278 | spin_unlock(&dev_priv->mm.active_list_lock); |
3974 | 4279 | ||
3975 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 4280 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
3976 | struct drm_i915_gem_object *obj_priv; | 4281 | struct drm_gem_object *obj; |
4282 | uint32_t old_write_domain; | ||
3977 | 4283 | ||
3978 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 4284 | obj = list_first_entry(&dev_priv->mm.flushing_list, |
3979 | struct drm_i915_gem_object, | 4285 | struct drm_i915_gem_object, |
3980 | list); | 4286 | list)->obj; |
3981 | obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | 4287 | old_write_domain = obj->write_domain; |
3982 | i915_gem_object_move_to_inactive(obj_priv->obj); | 4288 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; |
4289 | i915_gem_object_move_to_inactive(obj); | ||
4290 | |||
4291 | trace_i915_gem_object_change_domain(obj, | ||
4292 | obj->read_domains, | ||
4293 | old_write_domain); | ||
3983 | } | 4294 | } |
3984 | 4295 | ||
3985 | 4296 | ||
3986 | /* Move all inactive buffers out of the GTT. */ | 4297 | /* Move all inactive buffers out of the GTT. */ |
3987 | ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); | 4298 | ret = i915_gem_evict_from_inactive_list(dev); |
3988 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); | 4299 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); |
3989 | if (ret) { | 4300 | if (ret) { |
3990 | mutex_unlock(&dev->struct_mutex); | 4301 | mutex_unlock(&dev->struct_mutex); |
@@ -4206,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4206 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 4517 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4207 | return 0; | 4518 | return 0; |
4208 | 4519 | ||
4209 | if (dev_priv->mm.wedged) { | 4520 | if (atomic_read(&dev_priv->mm.wedged)) { |
4210 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | 4521 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
4211 | dev_priv->mm.wedged = 0; | 4522 | atomic_set(&dev_priv->mm.wedged, 0); |
4212 | } | 4523 | } |
4213 | 4524 | ||
4214 | mutex_lock(&dev->struct_mutex); | 4525 | mutex_lock(&dev->struct_mutex); |
@@ -4274,6 +4585,10 @@ i915_gem_load(struct drm_device *dev) | |||
4274 | i915_gem_retire_work_handler); | 4585 | i915_gem_retire_work_handler); |
4275 | dev_priv->mm.next_gem_seqno = 1; | 4586 | dev_priv->mm.next_gem_seqno = 1; |
4276 | 4587 | ||
4588 | spin_lock(&shrink_list_lock); | ||
4589 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | ||
4590 | spin_unlock(&shrink_list_lock); | ||
4591 | |||
4277 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4592 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4278 | dev_priv->fence_reg_start = 3; | 4593 | dev_priv->fence_reg_start = 3; |
4279 | 4594 | ||
@@ -4491,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | |||
4491 | list_del_init(i915_file_priv->mm.request_list.next); | 4806 | list_del_init(i915_file_priv->mm.request_list.next); |
4492 | mutex_unlock(&dev->struct_mutex); | 4807 | mutex_unlock(&dev->struct_mutex); |
4493 | } | 4808 | } |
4809 | |||
4810 | static int | ||
4811 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | ||
4812 | { | ||
4813 | drm_i915_private_t *dev_priv, *next_dev; | ||
4814 | struct drm_i915_gem_object *obj_priv, *next_obj; | ||
4815 | int cnt = 0; | ||
4816 | int would_deadlock = 1; | ||
4817 | |||
4818 | /* "fast-path" to count number of available objects */ | ||
4819 | if (nr_to_scan == 0) { | ||
4820 | spin_lock(&shrink_list_lock); | ||
4821 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | ||
4822 | struct drm_device *dev = dev_priv->dev; | ||
4823 | |||
4824 | if (mutex_trylock(&dev->struct_mutex)) { | ||
4825 | list_for_each_entry(obj_priv, | ||
4826 | &dev_priv->mm.inactive_list, | ||
4827 | list) | ||
4828 | cnt++; | ||
4829 | mutex_unlock(&dev->struct_mutex); | ||
4830 | } | ||
4831 | } | ||
4832 | spin_unlock(&shrink_list_lock); | ||
4833 | |||
4834 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4835 | } | ||
4836 | |||
4837 | spin_lock(&shrink_list_lock); | ||
4838 | |||
4839 | /* first scan for clean buffers */ | ||
4840 | list_for_each_entry_safe(dev_priv, next_dev, | ||
4841 | &shrink_list, mm.shrink_list) { | ||
4842 | struct drm_device *dev = dev_priv->dev; | ||
4843 | |||
4844 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4845 | continue; | ||
4846 | |||
4847 | spin_unlock(&shrink_list_lock); | ||
4848 | |||
4849 | i915_gem_retire_requests(dev); | ||
4850 | |||
4851 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4852 | &dev_priv->mm.inactive_list, | ||
4853 | list) { | ||
4854 | if (i915_gem_object_is_purgeable(obj_priv)) { | ||
4855 | i915_gem_object_unbind(obj_priv->obj); | ||
4856 | if (--nr_to_scan <= 0) | ||
4857 | break; | ||
4858 | } | ||
4859 | } | ||
4860 | |||
4861 | spin_lock(&shrink_list_lock); | ||
4862 | mutex_unlock(&dev->struct_mutex); | ||
4863 | |||
4864 | would_deadlock = 0; | ||
4865 | |||
4866 | if (nr_to_scan <= 0) | ||
4867 | break; | ||
4868 | } | ||
4869 | |||
4870 | /* second pass, evict/count anything still on the inactive list */ | ||
4871 | list_for_each_entry_safe(dev_priv, next_dev, | ||
4872 | &shrink_list, mm.shrink_list) { | ||
4873 | struct drm_device *dev = dev_priv->dev; | ||
4874 | |||
4875 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4876 | continue; | ||
4877 | |||
4878 | spin_unlock(&shrink_list_lock); | ||
4879 | |||
4880 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4881 | &dev_priv->mm.inactive_list, | ||
4882 | list) { | ||
4883 | if (nr_to_scan > 0) { | ||
4884 | i915_gem_object_unbind(obj_priv->obj); | ||
4885 | nr_to_scan--; | ||
4886 | } else | ||
4887 | cnt++; | ||
4888 | } | ||
4889 | |||
4890 | spin_lock(&shrink_list_lock); | ||
4891 | mutex_unlock(&dev->struct_mutex); | ||
4892 | |||
4893 | would_deadlock = 0; | ||
4894 | } | ||
4895 | |||
4896 | spin_unlock(&shrink_list_lock); | ||
4897 | |||
4898 | if (would_deadlock) | ||
4899 | return -1; | ||
4900 | else if (cnt > 0) | ||
4901 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4902 | else | ||
4903 | return 0; | ||
4904 | } | ||
4905 | |||
4906 | static struct shrinker shrinker = { | ||
4907 | .shrink = i915_gem_shrink, | ||
4908 | .seeks = DEFAULT_SEEKS, | ||
4909 | }; | ||
4910 | |||
4911 | __init void | ||
4912 | i915_gem_shrinker_init(void) | ||
4913 | { | ||
4914 | register_shrinker(&shrinker); | ||
4915 | } | ||
4916 | |||
4917 | __exit void | ||
4918 | i915_gem_shrinker_exit(void) | ||
4919 | { | ||
4920 | unregister_shrinker(&shrinker); | ||
4921 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6c89f2ff2495..4dfeec7cdd42 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drm.h" | 31 | #include "drm.h" |
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | #include "i915_drv.h" | 33 | #include "i915_drv.h" |
34 | #include "i915_trace.h" | ||
34 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
35 | 36 | ||
36 | #define MAX_NOPID ((u32)~0) | 37 | #define MAX_NOPID ((u32)~0) |
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | |||
279 | } | 280 | } |
280 | 281 | ||
281 | if (gt_iir & GT_USER_INTERRUPT) { | 282 | if (gt_iir & GT_USER_INTERRUPT) { |
282 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | 283 | u32 seqno = i915_get_gem_seqno(dev); |
284 | dev_priv->mm.irq_gem_seqno = seqno; | ||
285 | trace_i915_gem_request_complete(dev, seqno); | ||
283 | DRM_WAKEUP(&dev_priv->irq_queue); | 286 | DRM_WAKEUP(&dev_priv->irq_queue); |
284 | } | 287 | } |
285 | 288 | ||
@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work) | |||
302 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 305 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
303 | error_work); | 306 | error_work); |
304 | struct drm_device *dev = dev_priv->dev; | 307 | struct drm_device *dev = dev_priv->dev; |
305 | char *event_string = "ERROR=1"; | 308 | char *error_event[] = { "ERROR=1", NULL }; |
306 | char *envp[] = { event_string, NULL }; | 309 | char *reset_event[] = { "RESET=1", NULL }; |
310 | char *reset_done_event[] = { "ERROR=0", NULL }; | ||
307 | 311 | ||
308 | DRM_DEBUG("generating error event\n"); | 312 | DRM_DEBUG("generating error event\n"); |
309 | 313 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | |
310 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); | 314 | |
315 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
316 | if (IS_I965G(dev)) { | ||
317 | DRM_DEBUG("resetting chip\n"); | ||
318 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | ||
319 | if (!i965_reset(dev, GDRST_RENDER)) { | ||
320 | atomic_set(&dev_priv->mm.wedged, 0); | ||
321 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | ||
322 | } | ||
323 | } else { | ||
324 | printk("reboot required\n"); | ||
325 | } | ||
326 | } | ||
311 | } | 327 | } |
312 | 328 | ||
313 | /** | 329 | /** |
@@ -372,7 +388,7 @@ out: | |||
372 | * so userspace knows something bad happened (should trigger collection | 388 | * so userspace knows something bad happened (should trigger collection |
373 | * of a ring dump etc.). | 389 | * of a ring dump etc.). |
374 | */ | 390 | */ |
375 | static void i915_handle_error(struct drm_device *dev) | 391 | static void i915_handle_error(struct drm_device *dev, bool wedged) |
376 | { | 392 | { |
377 | struct drm_i915_private *dev_priv = dev->dev_private; | 393 | struct drm_i915_private *dev_priv = dev->dev_private; |
378 | u32 eir = I915_READ(EIR); | 394 | u32 eir = I915_READ(EIR); |
@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev) | |||
482 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 498 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
483 | } | 499 | } |
484 | 500 | ||
501 | if (wedged) { | ||
502 | atomic_set(&dev_priv->mm.wedged, 1); | ||
503 | |||
504 | /* | ||
505 | * Wakeup waiting processes so they don't hang | ||
506 | */ | ||
507 | printk("i915: Waking up sleeping processes\n"); | ||
508 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
509 | } | ||
510 | |||
485 | queue_work(dev_priv->wq, &dev_priv->error_work); | 511 | queue_work(dev_priv->wq, &dev_priv->error_work); |
486 | } | 512 | } |
487 | 513 | ||
@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
527 | pipeb_stats = I915_READ(PIPEBSTAT); | 553 | pipeb_stats = I915_READ(PIPEBSTAT); |
528 | 554 | ||
529 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 555 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
530 | i915_handle_error(dev); | 556 | i915_handle_error(dev, false); |
531 | 557 | ||
532 | /* | 558 | /* |
533 | * Clear the PIPE(A|B)STAT regs before the IIR | 559 | * Clear the PIPE(A|B)STAT regs before the IIR |
@@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
599 | } | 625 | } |
600 | 626 | ||
601 | if (iir & I915_USER_INTERRUPT) { | 627 | if (iir & I915_USER_INTERRUPT) { |
602 | dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | 628 | u32 seqno = i915_get_gem_seqno(dev); |
629 | dev_priv->mm.irq_gem_seqno = seqno; | ||
630 | trace_i915_gem_request_complete(dev, seqno); | ||
603 | DRM_WAKEUP(&dev_priv->irq_queue); | 631 | DRM_WAKEUP(&dev_priv->irq_queue); |
632 | dev_priv->hangcheck_count = 0; | ||
633 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
604 | } | 634 | } |
605 | 635 | ||
606 | if (pipea_stats & vblank_status) { | 636 | if (pipea_stats & vblank_status) { |
@@ -880,6 +910,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
880 | return -EINVAL; | 910 | return -EINVAL; |
881 | } | 911 | } |
882 | 912 | ||
913 | struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { | ||
914 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
915 | return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * This is called when the chip hasn't reported back with completed | ||
920 | * batchbuffers in a long time. The first time this is called we simply record | ||
921 | * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses | ||
922 | * again, we assume the chip is wedged and try to fix it. | ||
923 | */ | ||
924 | void i915_hangcheck_elapsed(unsigned long data) | ||
925 | { | ||
926 | struct drm_device *dev = (struct drm_device *)data; | ||
927 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
928 | uint32_t acthd; | ||
929 | |||
930 | if (!IS_I965G(dev)) | ||
931 | acthd = I915_READ(ACTHD); | ||
932 | else | ||
933 | acthd = I915_READ(ACTHD_I965); | ||
934 | |||
935 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
936 | if (list_empty(&dev_priv->mm.request_list) || | ||
937 | i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { | ||
938 | dev_priv->hangcheck_count = 0; | ||
939 | return; | ||
940 | } | ||
941 | |||
942 | if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { | ||
943 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | ||
944 | i915_handle_error(dev, true); | ||
945 | return; | ||
946 | } | ||
947 | |||
948 | /* Reset timer case chip hangs without another request being added */ | ||
949 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
950 | |||
951 | if (acthd != dev_priv->last_acthd) | ||
952 | dev_priv->hangcheck_count = 0; | ||
953 | else | ||
954 | dev_priv->hangcheck_count++; | ||
955 | |||
956 | dev_priv->last_acthd = acthd; | ||
957 | } | ||
958 | |||
883 | /* drm_dma.h hooks | 959 | /* drm_dma.h hooks |
884 | */ | 960 | */ |
885 | static void igdng_irq_preinstall(struct drm_device *dev) | 961 | static void igdng_irq_preinstall(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index e4b4e8898e39..2d5193556d3f 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
148 | struct drm_i915_private *dev_priv = dev->dev_private; | 148 | struct drm_i915_private *dev_priv = dev->dev_private; |
149 | struct opregion_asle *asle = dev_priv->opregion.asle; | 149 | struct opregion_asle *asle = dev_priv->opregion.asle; |
150 | u32 blc_pwm_ctl, blc_pwm_ctl2; | 150 | u32 blc_pwm_ctl, blc_pwm_ctl2; |
151 | u32 max_backlight, level, shift; | ||
151 | 152 | ||
152 | if (!(bclp & ASLE_BCLP_VALID)) | 153 | if (!(bclp & ASLE_BCLP_VALID)) |
153 | return ASLE_BACKLIGHT_FAIL; | 154 | return ASLE_BACKLIGHT_FAIL; |
@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
157 | return ASLE_BACKLIGHT_FAIL; | 158 | return ASLE_BACKLIGHT_FAIL; |
158 | 159 | ||
159 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); | 160 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); |
160 | blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
161 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); | 161 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); |
162 | 162 | ||
163 | if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) | 163 | if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) |
164 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); | 164 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); |
165 | else | 165 | else { |
166 | I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); | 166 | if (IS_IGD(dev)) { |
167 | 167 | blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | |
168 | max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
169 | BACKLIGHT_MODULATION_FREQ_SHIFT; | ||
170 | shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; | ||
171 | } else { | ||
172 | blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
173 | max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
174 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | ||
175 | shift = BACKLIGHT_DUTY_CYCLE_SHIFT; | ||
176 | } | ||
177 | level = (bclp * max_backlight) / 255; | ||
178 | I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); | ||
179 | } | ||
168 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; | 180 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; |
169 | 181 | ||
170 | return 0; | 182 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3f7963553464..0466ddbeba32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -86,6 +86,10 @@ | |||
86 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) | 86 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) |
87 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) | 87 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) |
88 | #define LBB 0xf4 | 88 | #define LBB 0xf4 |
89 | #define GDRST 0xc0 | ||
90 | #define GDRST_FULL (0<<2) | ||
91 | #define GDRST_RENDER (1<<2) | ||
92 | #define GDRST_MEDIA (3<<2) | ||
89 | 93 | ||
90 | /* VGA stuff */ | 94 | /* VGA stuff */ |
91 | 95 | ||
@@ -344,9 +348,37 @@ | |||
344 | #define FBC_CTL_PLANEA (0<<0) | 348 | #define FBC_CTL_PLANEA (0<<0) |
345 | #define FBC_CTL_PLANEB (1<<0) | 349 | #define FBC_CTL_PLANEB (1<<0) |
346 | #define FBC_FENCE_OFF 0x0321b | 350 | #define FBC_FENCE_OFF 0x0321b |
351 | #define FBC_TAG 0x03300 | ||
347 | 352 | ||
348 | #define FBC_LL_SIZE (1536) | 353 | #define FBC_LL_SIZE (1536) |
349 | 354 | ||
355 | /* Framebuffer compression for GM45+ */ | ||
356 | #define DPFC_CB_BASE 0x3200 | ||
357 | #define DPFC_CONTROL 0x3208 | ||
358 | #define DPFC_CTL_EN (1<<31) | ||
359 | #define DPFC_CTL_PLANEA (0<<30) | ||
360 | #define DPFC_CTL_PLANEB (1<<30) | ||
361 | #define DPFC_CTL_FENCE_EN (1<<29) | ||
362 | #define DPFC_SR_EN (1<<10) | ||
363 | #define DPFC_CTL_LIMIT_1X (0<<6) | ||
364 | #define DPFC_CTL_LIMIT_2X (1<<6) | ||
365 | #define DPFC_CTL_LIMIT_4X (2<<6) | ||
366 | #define DPFC_RECOMP_CTL 0x320c | ||
367 | #define DPFC_RECOMP_STALL_EN (1<<27) | ||
368 | #define DPFC_RECOMP_STALL_WM_SHIFT (16) | ||
369 | #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) | ||
370 | #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) | ||
371 | #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) | ||
372 | #define DPFC_STATUS 0x3210 | ||
373 | #define DPFC_INVAL_SEG_SHIFT (16) | ||
374 | #define DPFC_INVAL_SEG_MASK (0x07ff0000) | ||
375 | #define DPFC_COMP_SEG_SHIFT (0) | ||
376 | #define DPFC_COMP_SEG_MASK (0x000003ff) | ||
377 | #define DPFC_STATUS2 0x3214 | ||
378 | #define DPFC_FENCE_YOFF 0x3218 | ||
379 | #define DPFC_CHICKEN 0x3224 | ||
380 | #define DPFC_HT_MODIFY (1<<31) | ||
381 | |||
350 | /* | 382 | /* |
351 | * GPIO regs | 383 | * GPIO regs |
352 | */ | 384 | */ |
@@ -2000,6 +2032,8 @@ | |||
2000 | #define PF_ENABLE (1<<31) | 2032 | #define PF_ENABLE (1<<31) |
2001 | #define PFA_WIN_SZ 0x68074 | 2033 | #define PFA_WIN_SZ 0x68074 |
2002 | #define PFB_WIN_SZ 0x68874 | 2034 | #define PFB_WIN_SZ 0x68874 |
2035 | #define PFA_WIN_POS 0x68070 | ||
2036 | #define PFB_WIN_POS 0x68870 | ||
2003 | 2037 | ||
2004 | /* legacy palette */ | 2038 | /* legacy palette */ |
2005 | #define LGC_PALETTE_A 0x4a000 | 2039 | #define LGC_PALETTE_A 0x4a000 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 20d4d19f5568..bd6d8d91ca9f 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
228 | 228 | ||
229 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 229 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
230 | return; | 230 | return; |
231 | |||
231 | /* Pipe & plane A info */ | 232 | /* Pipe & plane A info */ |
232 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 233 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
233 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 234 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
@@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
285 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); | 286 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); |
286 | return; | 287 | return; |
287 | } | 288 | } |
289 | |||
288 | static void i915_restore_modeset_reg(struct drm_device *dev) | 290 | static void i915_restore_modeset_reg(struct drm_device *dev) |
289 | { | 291 | { |
290 | struct drm_i915_private *dev_priv = dev->dev_private; | 292 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
379 | 381 | ||
380 | return; | 382 | return; |
381 | } | 383 | } |
382 | int i915_save_state(struct drm_device *dev) | 384 | |
385 | void i915_save_display(struct drm_device *dev) | ||
383 | { | 386 | { |
384 | struct drm_i915_private *dev_priv = dev->dev_private; | 387 | struct drm_i915_private *dev_priv = dev->dev_private; |
385 | int i; | ||
386 | |||
387 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | ||
388 | |||
389 | /* Render Standby */ | ||
390 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
391 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
392 | |||
393 | /* Hardware status page */ | ||
394 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
395 | 388 | ||
396 | /* Display arbitration control */ | 389 | /* Display arbitration control */ |
397 | dev_priv->saveDSPARB = I915_READ(DSPARB); | 390 | dev_priv->saveDSPARB = I915_READ(DSPARB); |
@@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev) | |||
399 | /* This is only meaningful in non-KMS mode */ | 392 | /* This is only meaningful in non-KMS mode */ |
400 | /* Don't save them in KMS mode */ | 393 | /* Don't save them in KMS mode */ |
401 | i915_save_modeset_reg(dev); | 394 | i915_save_modeset_reg(dev); |
395 | |||
402 | /* Cursor state */ | 396 | /* Cursor state */ |
403 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | 397 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); |
404 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | 398 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); |
@@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev) | |||
448 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); | 442 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); |
449 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | 443 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); |
450 | 444 | ||
451 | /* Interrupt state */ | ||
452 | dev_priv->saveIIR = I915_READ(IIR); | ||
453 | dev_priv->saveIER = I915_READ(IER); | ||
454 | dev_priv->saveIMR = I915_READ(IMR); | ||
455 | |||
456 | /* VGA state */ | 445 | /* VGA state */ |
457 | dev_priv->saveVGA0 = I915_READ(VGA0); | 446 | dev_priv->saveVGA0 = I915_READ(VGA0); |
458 | dev_priv->saveVGA1 = I915_READ(VGA1); | 447 | dev_priv->saveVGA1 = I915_READ(VGA1); |
459 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 448 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); |
460 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 449 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); |
461 | 450 | ||
462 | /* Clock gating state */ | ||
463 | dev_priv->saveD_STATE = I915_READ(D_STATE); | ||
464 | dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); | ||
465 | |||
466 | /* Cache mode state */ | ||
467 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | ||
468 | |||
469 | /* Memory Arbitration state */ | ||
470 | dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | ||
471 | |||
472 | /* Scratch space */ | ||
473 | for (i = 0; i < 16; i++) { | ||
474 | dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); | ||
475 | dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); | ||
476 | } | ||
477 | for (i = 0; i < 3; i++) | ||
478 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | ||
479 | |||
480 | /* Fences */ | ||
481 | if (IS_I965G(dev)) { | ||
482 | for (i = 0; i < 16; i++) | ||
483 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
484 | } else { | ||
485 | for (i = 0; i < 8; i++) | ||
486 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
487 | |||
488 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
489 | for (i = 0; i < 8; i++) | ||
490 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
491 | } | ||
492 | i915_save_vga(dev); | 451 | i915_save_vga(dev); |
493 | |||
494 | return 0; | ||
495 | } | 452 | } |
496 | 453 | ||
497 | int i915_restore_state(struct drm_device *dev) | 454 | void i915_restore_display(struct drm_device *dev) |
498 | { | 455 | { |
499 | struct drm_i915_private *dev_priv = dev->dev_private; | 456 | struct drm_i915_private *dev_priv = dev->dev_private; |
500 | int i; | ||
501 | |||
502 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | ||
503 | |||
504 | /* Render Standby */ | ||
505 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
506 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
507 | |||
508 | /* Hardware status page */ | ||
509 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | ||
510 | 457 | ||
511 | /* Display arbitration */ | 458 | /* Display arbitration */ |
512 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); | 459 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); |
513 | 460 | ||
514 | /* Fences */ | ||
515 | if (IS_I965G(dev)) { | ||
516 | for (i = 0; i < 16; i++) | ||
517 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
518 | } else { | ||
519 | for (i = 0; i < 8; i++) | ||
520 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
521 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
522 | for (i = 0; i < 8; i++) | ||
523 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
524 | } | ||
525 | |||
526 | /* Display port ratios (must be done before clock is set) */ | 461 | /* Display port ratios (must be done before clock is set) */ |
527 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 462 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
528 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); | 463 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); |
@@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev) | |||
534 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | 469 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); |
535 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | 470 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); |
536 | } | 471 | } |
472 | |||
537 | /* This is only meaningful in non-KMS mode */ | 473 | /* This is only meaningful in non-KMS mode */ |
538 | /* Don't restore them in KMS mode */ | 474 | /* Don't restore them in KMS mode */ |
539 | i915_restore_modeset_reg(dev); | 475 | i915_restore_modeset_reg(dev); |
476 | |||
540 | /* Cursor state */ | 477 | /* Cursor state */ |
541 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | 478 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); |
542 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | 479 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); |
@@ -586,6 +523,95 @@ int i915_restore_state(struct drm_device *dev) | |||
586 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 523 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); |
587 | DRM_UDELAY(150); | 524 | DRM_UDELAY(150); |
588 | 525 | ||
526 | i915_restore_vga(dev); | ||
527 | } | ||
528 | |||
529 | int i915_save_state(struct drm_device *dev) | ||
530 | { | ||
531 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
532 | int i; | ||
533 | |||
534 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | ||
535 | |||
536 | /* Render Standby */ | ||
537 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
538 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
539 | |||
540 | /* Hardware status page */ | ||
541 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
542 | |||
543 | i915_save_display(dev); | ||
544 | |||
545 | /* Interrupt state */ | ||
546 | dev_priv->saveIER = I915_READ(IER); | ||
547 | dev_priv->saveIMR = I915_READ(IMR); | ||
548 | |||
549 | /* Clock gating state */ | ||
550 | dev_priv->saveD_STATE = I915_READ(D_STATE); | ||
551 | dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */ | ||
552 | |||
553 | /* Cache mode state */ | ||
554 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | ||
555 | |||
556 | /* Memory Arbitration state */ | ||
557 | dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | ||
558 | |||
559 | /* Scratch space */ | ||
560 | for (i = 0; i < 16; i++) { | ||
561 | dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); | ||
562 | dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); | ||
563 | } | ||
564 | for (i = 0; i < 3; i++) | ||
565 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | ||
566 | |||
567 | /* Fences */ | ||
568 | if (IS_I965G(dev)) { | ||
569 | for (i = 0; i < 16; i++) | ||
570 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
571 | } else { | ||
572 | for (i = 0; i < 8; i++) | ||
573 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
574 | |||
575 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
576 | for (i = 0; i < 8; i++) | ||
577 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
578 | } | ||
579 | |||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | int i915_restore_state(struct drm_device *dev) | ||
584 | { | ||
585 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
586 | int i; | ||
587 | |||
588 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | ||
589 | |||
590 | /* Render Standby */ | ||
591 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
592 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
593 | |||
594 | /* Hardware status page */ | ||
595 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | ||
596 | |||
597 | /* Fences */ | ||
598 | if (IS_I965G(dev)) { | ||
599 | for (i = 0; i < 16; i++) | ||
600 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
601 | } else { | ||
602 | for (i = 0; i < 8; i++) | ||
603 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
604 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
605 | for (i = 0; i < 8; i++) | ||
606 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
607 | } | ||
608 | |||
609 | i915_restore_display(dev); | ||
610 | |||
611 | /* Interrupt state */ | ||
612 | I915_WRITE (IER, dev_priv->saveIER); | ||
613 | I915_WRITE (IMR, dev_priv->saveIMR); | ||
614 | |||
589 | /* Clock gating state */ | 615 | /* Clock gating state */ |
590 | I915_WRITE (D_STATE, dev_priv->saveD_STATE); | 616 | I915_WRITE (D_STATE, dev_priv->saveD_STATE); |
591 | I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); | 617 | I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); |
@@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev) | |||
603 | for (i = 0; i < 3; i++) | 629 | for (i = 0; i < 3; i++) |
604 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 630 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); |
605 | 631 | ||
606 | i915_restore_vga(dev); | ||
607 | |||
608 | return 0; | 632 | return 0; |
609 | } | 633 | } |
610 | 634 | ||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h new file mode 100644 index 000000000000..5567a40816f3 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -0,0 +1,315 @@ | |||
1 | #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _I915_TRACE_H_ | ||
3 | |||
4 | #include <linux/stringify.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #include <drm/drmP.h> | ||
9 | |||
10 | #undef TRACE_SYSTEM | ||
11 | #define TRACE_SYSTEM i915 | ||
12 | #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) | ||
13 | #define TRACE_INCLUDE_FILE i915_trace | ||
14 | |||
15 | /* object tracking */ | ||
16 | |||
17 | TRACE_EVENT(i915_gem_object_create, | ||
18 | |||
19 | TP_PROTO(struct drm_gem_object *obj), | ||
20 | |||
21 | TP_ARGS(obj), | ||
22 | |||
23 | TP_STRUCT__entry( | ||
24 | __field(struct drm_gem_object *, obj) | ||
25 | __field(u32, size) | ||
26 | ), | ||
27 | |||
28 | TP_fast_assign( | ||
29 | __entry->obj = obj; | ||
30 | __entry->size = obj->size; | ||
31 | ), | ||
32 | |||
33 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) | ||
34 | ); | ||
35 | |||
36 | TRACE_EVENT(i915_gem_object_bind, | ||
37 | |||
38 | TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), | ||
39 | |||
40 | TP_ARGS(obj, gtt_offset), | ||
41 | |||
42 | TP_STRUCT__entry( | ||
43 | __field(struct drm_gem_object *, obj) | ||
44 | __field(u32, gtt_offset) | ||
45 | ), | ||
46 | |||
47 | TP_fast_assign( | ||
48 | __entry->obj = obj; | ||
49 | __entry->gtt_offset = gtt_offset; | ||
50 | ), | ||
51 | |||
52 | TP_printk("obj=%p, gtt_offset=%08x", | ||
53 | __entry->obj, __entry->gtt_offset) | ||
54 | ); | ||
55 | |||
56 | TRACE_EVENT(i915_gem_object_clflush, | ||
57 | |||
58 | TP_PROTO(struct drm_gem_object *obj), | ||
59 | |||
60 | TP_ARGS(obj), | ||
61 | |||
62 | TP_STRUCT__entry( | ||
63 | __field(struct drm_gem_object *, obj) | ||
64 | ), | ||
65 | |||
66 | TP_fast_assign( | ||
67 | __entry->obj = obj; | ||
68 | ), | ||
69 | |||
70 | TP_printk("obj=%p", __entry->obj) | ||
71 | ); | ||
72 | |||
73 | TRACE_EVENT(i915_gem_object_change_domain, | ||
74 | |||
75 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | ||
76 | |||
77 | TP_ARGS(obj, old_read_domains, old_write_domain), | ||
78 | |||
79 | TP_STRUCT__entry( | ||
80 | __field(struct drm_gem_object *, obj) | ||
81 | __field(u32, read_domains) | ||
82 | __field(u32, write_domain) | ||
83 | ), | ||
84 | |||
85 | TP_fast_assign( | ||
86 | __entry->obj = obj; | ||
87 | __entry->read_domains = obj->read_domains | (old_read_domains << 16); | ||
88 | __entry->write_domain = obj->write_domain | (old_write_domain << 16); | ||
89 | ), | ||
90 | |||
91 | TP_printk("obj=%p, read=%04x, write=%04x", | ||
92 | __entry->obj, | ||
93 | __entry->read_domains, __entry->write_domain) | ||
94 | ); | ||
95 | |||
96 | TRACE_EVENT(i915_gem_object_get_fence, | ||
97 | |||
98 | TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), | ||
99 | |||
100 | TP_ARGS(obj, fence, tiling_mode), | ||
101 | |||
102 | TP_STRUCT__entry( | ||
103 | __field(struct drm_gem_object *, obj) | ||
104 | __field(int, fence) | ||
105 | __field(int, tiling_mode) | ||
106 | ), | ||
107 | |||
108 | TP_fast_assign( | ||
109 | __entry->obj = obj; | ||
110 | __entry->fence = fence; | ||
111 | __entry->tiling_mode = tiling_mode; | ||
112 | ), | ||
113 | |||
114 | TP_printk("obj=%p, fence=%d, tiling=%d", | ||
115 | __entry->obj, __entry->fence, __entry->tiling_mode) | ||
116 | ); | ||
117 | |||
118 | TRACE_EVENT(i915_gem_object_unbind, | ||
119 | |||
120 | TP_PROTO(struct drm_gem_object *obj), | ||
121 | |||
122 | TP_ARGS(obj), | ||
123 | |||
124 | TP_STRUCT__entry( | ||
125 | __field(struct drm_gem_object *, obj) | ||
126 | ), | ||
127 | |||
128 | TP_fast_assign( | ||
129 | __entry->obj = obj; | ||
130 | ), | ||
131 | |||
132 | TP_printk("obj=%p", __entry->obj) | ||
133 | ); | ||
134 | |||
135 | TRACE_EVENT(i915_gem_object_destroy, | ||
136 | |||
137 | TP_PROTO(struct drm_gem_object *obj), | ||
138 | |||
139 | TP_ARGS(obj), | ||
140 | |||
141 | TP_STRUCT__entry( | ||
142 | __field(struct drm_gem_object *, obj) | ||
143 | ), | ||
144 | |||
145 | TP_fast_assign( | ||
146 | __entry->obj = obj; | ||
147 | ), | ||
148 | |||
149 | TP_printk("obj=%p", __entry->obj) | ||
150 | ); | ||
151 | |||
152 | /* batch tracing */ | ||
153 | |||
154 | TRACE_EVENT(i915_gem_request_submit, | ||
155 | |||
156 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
157 | |||
158 | TP_ARGS(dev, seqno), | ||
159 | |||
160 | TP_STRUCT__entry( | ||
161 | __field(struct drm_device *, dev) | ||
162 | __field(u32, seqno) | ||
163 | ), | ||
164 | |||
165 | TP_fast_assign( | ||
166 | __entry->dev = dev; | ||
167 | __entry->seqno = seqno; | ||
168 | ), | ||
169 | |||
170 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
171 | ); | ||
172 | |||
173 | TRACE_EVENT(i915_gem_request_flush, | ||
174 | |||
175 | TP_PROTO(struct drm_device *dev, u32 seqno, | ||
176 | u32 flush_domains, u32 invalidate_domains), | ||
177 | |||
178 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), | ||
179 | |||
180 | TP_STRUCT__entry( | ||
181 | __field(struct drm_device *, dev) | ||
182 | __field(u32, seqno) | ||
183 | __field(u32, flush_domains) | ||
184 | __field(u32, invalidate_domains) | ||
185 | ), | ||
186 | |||
187 | TP_fast_assign( | ||
188 | __entry->dev = dev; | ||
189 | __entry->seqno = seqno; | ||
190 | __entry->flush_domains = flush_domains; | ||
191 | __entry->invalidate_domains = invalidate_domains; | ||
192 | ), | ||
193 | |||
194 | TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x", | ||
195 | __entry->dev, __entry->seqno, | ||
196 | __entry->flush_domains, __entry->invalidate_domains) | ||
197 | ); | ||
198 | |||
199 | |||
200 | TRACE_EVENT(i915_gem_request_complete, | ||
201 | |||
202 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
203 | |||
204 | TP_ARGS(dev, seqno), | ||
205 | |||
206 | TP_STRUCT__entry( | ||
207 | __field(struct drm_device *, dev) | ||
208 | __field(u32, seqno) | ||
209 | ), | ||
210 | |||
211 | TP_fast_assign( | ||
212 | __entry->dev = dev; | ||
213 | __entry->seqno = seqno; | ||
214 | ), | ||
215 | |||
216 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
217 | ); | ||
218 | |||
219 | TRACE_EVENT(i915_gem_request_retire, | ||
220 | |||
221 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
222 | |||
223 | TP_ARGS(dev, seqno), | ||
224 | |||
225 | TP_STRUCT__entry( | ||
226 | __field(struct drm_device *, dev) | ||
227 | __field(u32, seqno) | ||
228 | ), | ||
229 | |||
230 | TP_fast_assign( | ||
231 | __entry->dev = dev; | ||
232 | __entry->seqno = seqno; | ||
233 | ), | ||
234 | |||
235 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
236 | ); | ||
237 | |||
238 | TRACE_EVENT(i915_gem_request_wait_begin, | ||
239 | |||
240 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
241 | |||
242 | TP_ARGS(dev, seqno), | ||
243 | |||
244 | TP_STRUCT__entry( | ||
245 | __field(struct drm_device *, dev) | ||
246 | __field(u32, seqno) | ||
247 | ), | ||
248 | |||
249 | TP_fast_assign( | ||
250 | __entry->dev = dev; | ||
251 | __entry->seqno = seqno; | ||
252 | ), | ||
253 | |||
254 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
255 | ); | ||
256 | |||
257 | TRACE_EVENT(i915_gem_request_wait_end, | ||
258 | |||
259 | TP_PROTO(struct drm_device *dev, u32 seqno), | ||
260 | |||
261 | TP_ARGS(dev, seqno), | ||
262 | |||
263 | TP_STRUCT__entry( | ||
264 | __field(struct drm_device *, dev) | ||
265 | __field(u32, seqno) | ||
266 | ), | ||
267 | |||
268 | TP_fast_assign( | ||
269 | __entry->dev = dev; | ||
270 | __entry->seqno = seqno; | ||
271 | ), | ||
272 | |||
273 | TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno) | ||
274 | ); | ||
275 | |||
276 | TRACE_EVENT(i915_ring_wait_begin, | ||
277 | |||
278 | TP_PROTO(struct drm_device *dev), | ||
279 | |||
280 | TP_ARGS(dev), | ||
281 | |||
282 | TP_STRUCT__entry( | ||
283 | __field(struct drm_device *, dev) | ||
284 | ), | ||
285 | |||
286 | TP_fast_assign( | ||
287 | __entry->dev = dev; | ||
288 | ), | ||
289 | |||
290 | TP_printk("dev=%p", __entry->dev) | ||
291 | ); | ||
292 | |||
293 | TRACE_EVENT(i915_ring_wait_end, | ||
294 | |||
295 | TP_PROTO(struct drm_device *dev), | ||
296 | |||
297 | TP_ARGS(dev), | ||
298 | |||
299 | TP_STRUCT__entry( | ||
300 | __field(struct drm_device *, dev) | ||
301 | ), | ||
302 | |||
303 | TP_fast_assign( | ||
304 | __entry->dev = dev; | ||
305 | ), | ||
306 | |||
307 | TP_printk("dev=%p", __entry->dev) | ||
308 | ); | ||
309 | |||
310 | #endif /* _I915_TRACE_H_ */ | ||
311 | |||
312 | /* This part must be outside protection */ | ||
313 | #undef TRACE_INCLUDE_PATH | ||
314 | #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 | ||
315 | #include <trace/define_trace.h> | ||
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c new file mode 100644 index 000000000000..ead876eb6ea0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace_points.c | |||
@@ -0,0 +1,11 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 Intel Corporation | ||
3 | * | ||
4 | * Authors: | ||
5 | * Chris Wilson <chris@chris-wilson.co.uk> | ||
6 | */ | ||
7 | |||
8 | #include "i915_drv.h" | ||
9 | |||
10 | #define CREATE_TRACE_POINTS | ||
11 | #include "i915_trace.h" | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1e28c1652fd0..4337414846b6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
217 | if (IS_I85X(dev_priv->dev)) | 217 | if (IS_I85X(dev_priv->dev)) |
218 | dev_priv->lvds_ssc_freq = | 218 | dev_priv->lvds_ssc_freq = |
219 | general->ssc_freq ? 66 : 48; | 219 | general->ssc_freq ? 66 : 48; |
220 | else if (IS_IGDNG(dev_priv->dev)) | ||
221 | dev_priv->lvds_ssc_freq = | ||
222 | general->ssc_freq ? 100 : 120; | ||
220 | else | 223 | else |
221 | dev_priv->lvds_ssc_freq = | 224 | dev_priv->lvds_ssc_freq = |
222 | general->ssc_freq ? 100 : 96; | 225 | general->ssc_freq ? 100 : 96; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 88814fa2dfd2..212e22740fc1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
179 | { | 179 | { |
180 | struct drm_device *dev = connector->dev; | 180 | struct drm_device *dev = connector->dev; |
181 | struct drm_i915_private *dev_priv = dev->dev_private; | 181 | struct drm_i915_private *dev_priv = dev->dev_private; |
182 | u32 adpa, temp; | 182 | u32 adpa; |
183 | bool ret; | 183 | bool ret; |
184 | 184 | ||
185 | temp = adpa = I915_READ(PCH_ADPA); | 185 | adpa = I915_READ(PCH_ADPA); |
186 | |||
187 | adpa &= ~ADPA_DAC_ENABLE; | ||
188 | I915_WRITE(PCH_ADPA, adpa); | ||
189 | 186 | ||
190 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 187 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
191 | 188 | ||
@@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
212 | else | 209 | else |
213 | ret = false; | 210 | ret = false; |
214 | 211 | ||
215 | /* restore origin register */ | ||
216 | I915_WRITE(PCH_ADPA, temp); | ||
217 | return ret; | 212 | return ret; |
218 | } | 213 | } |
219 | 214 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0227b1652906..93ff6c03733e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -24,6 +24,8 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/input.h> | ||
27 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
28 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
29 | #include "drmP.h" | 31 | #include "drmP.h" |
@@ -875,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
875 | refclk, best_clock); | 877 | refclk, best_clock); |
876 | 878 | ||
877 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 879 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
878 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 880 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
879 | LVDS_CLKB_POWER_UP) | 881 | LVDS_CLKB_POWER_UP) |
880 | clock.p2 = limit->p2.p2_fast; | 882 | clock.p2 = limit->p2.p2_fast; |
881 | else | 883 | else |
@@ -952,6 +954,241 @@ intel_wait_for_vblank(struct drm_device *dev) | |||
952 | mdelay(20); | 954 | mdelay(20); |
953 | } | 955 | } |
954 | 956 | ||
957 | /* Parameters have changed, update FBC info */ | ||
958 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
959 | { | ||
960 | struct drm_device *dev = crtc->dev; | ||
961 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
962 | struct drm_framebuffer *fb = crtc->fb; | ||
963 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
964 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | ||
965 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
966 | int plane, i; | ||
967 | u32 fbc_ctl, fbc_ctl2; | ||
968 | |||
969 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; | ||
970 | |||
971 | if (fb->pitch < dev_priv->cfb_pitch) | ||
972 | dev_priv->cfb_pitch = fb->pitch; | ||
973 | |||
974 | /* FBC_CTL wants 64B units */ | ||
975 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
976 | dev_priv->cfb_fence = obj_priv->fence_reg; | ||
977 | dev_priv->cfb_plane = intel_crtc->plane; | ||
978 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | ||
979 | |||
980 | /* Clear old tags */ | ||
981 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | ||
982 | I915_WRITE(FBC_TAG + (i * 4), 0); | ||
983 | |||
984 | /* Set it up... */ | ||
985 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | ||
986 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
987 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | ||
988 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | ||
989 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | ||
990 | |||
991 | /* enable it... */ | ||
992 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
993 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | ||
994 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | ||
995 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
996 | fbc_ctl |= dev_priv->cfb_fence; | ||
997 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
998 | |||
999 | DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", | ||
1000 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | ||
1001 | } | ||
1002 | |||
1003 | void i8xx_disable_fbc(struct drm_device *dev) | ||
1004 | { | ||
1005 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1006 | u32 fbc_ctl; | ||
1007 | |||
1008 | if (!I915_HAS_FBC(dev)) | ||
1009 | return; | ||
1010 | |||
1011 | /* Disable compression */ | ||
1012 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
1013 | fbc_ctl &= ~FBC_CTL_EN; | ||
1014 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
1015 | |||
1016 | /* Wait for compressing bit to clear */ | ||
1017 | while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) | ||
1018 | ; /* nothing */ | ||
1019 | |||
1020 | intel_wait_for_vblank(dev); | ||
1021 | |||
1022 | DRM_DEBUG("disabled FBC\n"); | ||
1023 | } | ||
1024 | |||
1025 | static bool i8xx_fbc_enabled(struct drm_crtc *crtc) | ||
1026 | { | ||
1027 | struct drm_device *dev = crtc->dev; | ||
1028 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1029 | |||
1030 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | ||
1031 | } | ||
1032 | |||
1033 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
1034 | { | ||
1035 | struct drm_device *dev = crtc->dev; | ||
1036 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1037 | struct drm_framebuffer *fb = crtc->fb; | ||
1038 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
1039 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | ||
1040 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1041 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | ||
1042 | DPFC_CTL_PLANEB); | ||
1043 | unsigned long stall_watermark = 200; | ||
1044 | u32 dpfc_ctl; | ||
1045 | |||
1046 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
1047 | dev_priv->cfb_fence = obj_priv->fence_reg; | ||
1048 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1049 | |||
1050 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | ||
1051 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1052 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | ||
1053 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | ||
1054 | } else { | ||
1055 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); | ||
1056 | } | ||
1057 | |||
1058 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1059 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
1060 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
1061 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
1062 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | ||
1063 | |||
1064 | /* enable it... */ | ||
1065 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); | ||
1066 | |||
1067 | DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); | ||
1068 | } | ||
1069 | |||
1070 | void g4x_disable_fbc(struct drm_device *dev) | ||
1071 | { | ||
1072 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1073 | u32 dpfc_ctl; | ||
1074 | |||
1075 | /* Disable compression */ | ||
1076 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
1077 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
1078 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1079 | intel_wait_for_vblank(dev); | ||
1080 | |||
1081 | DRM_DEBUG("disabled FBC\n"); | ||
1082 | } | ||
1083 | |||
1084 | static bool g4x_fbc_enabled(struct drm_crtc *crtc) | ||
1085 | { | ||
1086 | struct drm_device *dev = crtc->dev; | ||
1087 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1088 | |||
1089 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | ||
1090 | } | ||
1091 | |||
1092 | /** | ||
1093 | * intel_update_fbc - enable/disable FBC as needed | ||
1094 | * @crtc: CRTC to point the compressor at | ||
1095 | * @mode: mode in use | ||
1096 | * | ||
1097 | * Set up the framebuffer compression hardware at mode set time. We | ||
1098 | * enable it if possible: | ||
1099 | * - plane A only (on pre-965) | ||
1100 | * - no pixel mulitply/line duplication | ||
1101 | * - no alpha buffer discard | ||
1102 | * - no dual wide | ||
1103 | * - framebuffer <= 2048 in width, 1536 in height | ||
1104 | * | ||
1105 | * We can't assume that any compression will take place (worst case), | ||
1106 | * so the compressed buffer has to be the same size as the uncompressed | ||
1107 | * one. It also must reside (along with the line length buffer) in | ||
1108 | * stolen memory. | ||
1109 | * | ||
1110 | * We need to enable/disable FBC on a global basis. | ||
1111 | */ | ||
1112 | static void intel_update_fbc(struct drm_crtc *crtc, | ||
1113 | struct drm_display_mode *mode) | ||
1114 | { | ||
1115 | struct drm_device *dev = crtc->dev; | ||
1116 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1117 | struct drm_framebuffer *fb = crtc->fb; | ||
1118 | struct intel_framebuffer *intel_fb; | ||
1119 | struct drm_i915_gem_object *obj_priv; | ||
1120 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1121 | int plane = intel_crtc->plane; | ||
1122 | |||
1123 | if (!i915_powersave) | ||
1124 | return; | ||
1125 | |||
1126 | if (!dev_priv->display.fbc_enabled || | ||
1127 | !dev_priv->display.enable_fbc || | ||
1128 | !dev_priv->display.disable_fbc) | ||
1129 | return; | ||
1130 | |||
1131 | if (!crtc->fb) | ||
1132 | return; | ||
1133 | |||
1134 | intel_fb = to_intel_framebuffer(fb); | ||
1135 | obj_priv = intel_fb->obj->driver_private; | ||
1136 | |||
1137 | /* | ||
1138 | * If FBC is already on, we just have to verify that we can | ||
1139 | * keep it that way... | ||
1140 | * Need to disable if: | ||
1141 | * - changing FBC params (stride, fence, mode) | ||
1142 | * - new fb is too large to fit in compressed buffer | ||
1143 | * - going to an unsupported config (interlace, pixel multiply, etc.) | ||
1144 | */ | ||
1145 | if (intel_fb->obj->size > dev_priv->cfb_size) { | ||
1146 | DRM_DEBUG("framebuffer too large, disabling compression\n"); | ||
1147 | goto out_disable; | ||
1148 | } | ||
1149 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | ||
1150 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | ||
1151 | DRM_DEBUG("mode incompatible with compression, disabling\n"); | ||
1152 | goto out_disable; | ||
1153 | } | ||
1154 | if ((mode->hdisplay > 2048) || | ||
1155 | (mode->vdisplay > 1536)) { | ||
1156 | DRM_DEBUG("mode too large for compression, disabling\n"); | ||
1157 | goto out_disable; | ||
1158 | } | ||
1159 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | ||
1160 | DRM_DEBUG("plane not 0, disabling compression\n"); | ||
1161 | goto out_disable; | ||
1162 | } | ||
1163 | if (obj_priv->tiling_mode != I915_TILING_X) { | ||
1164 | DRM_DEBUG("framebuffer not tiled, disabling compression\n"); | ||
1165 | goto out_disable; | ||
1166 | } | ||
1167 | |||
1168 | if (dev_priv->display.fbc_enabled(crtc)) { | ||
1169 | /* We can re-enable it in this case, but need to update pitch */ | ||
1170 | if (fb->pitch > dev_priv->cfb_pitch) | ||
1171 | dev_priv->display.disable_fbc(dev); | ||
1172 | if (obj_priv->fence_reg != dev_priv->cfb_fence) | ||
1173 | dev_priv->display.disable_fbc(dev); | ||
1174 | if (plane != dev_priv->cfb_plane) | ||
1175 | dev_priv->display.disable_fbc(dev); | ||
1176 | } | ||
1177 | |||
1178 | if (!dev_priv->display.fbc_enabled(crtc)) { | ||
1179 | /* Now try to turn it back on if possible */ | ||
1180 | dev_priv->display.enable_fbc(crtc, 500); | ||
1181 | } | ||
1182 | |||
1183 | return; | ||
1184 | |||
1185 | out_disable: | ||
1186 | DRM_DEBUG("unsupported config, disabling FBC\n"); | ||
1187 | /* Multiple disables should be harmless */ | ||
1188 | if (dev_priv->display.fbc_enabled(crtc)) | ||
1189 | dev_priv->display.disable_fbc(dev); | ||
1190 | } | ||
1191 | |||
955 | static int | 1192 | static int |
956 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 1193 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
957 | struct drm_framebuffer *old_fb) | 1194 | struct drm_framebuffer *old_fb) |
@@ -964,12 +1201,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
964 | struct drm_i915_gem_object *obj_priv; | 1201 | struct drm_i915_gem_object *obj_priv; |
965 | struct drm_gem_object *obj; | 1202 | struct drm_gem_object *obj; |
966 | int pipe = intel_crtc->pipe; | 1203 | int pipe = intel_crtc->pipe; |
1204 | int plane = intel_crtc->plane; | ||
967 | unsigned long Start, Offset; | 1205 | unsigned long Start, Offset; |
968 | int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); | 1206 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); |
969 | int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); | 1207 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); |
970 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; | 1208 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; |
971 | int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); | 1209 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); |
972 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 1210 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
973 | u32 dspcntr, alignment; | 1211 | u32 dspcntr, alignment; |
974 | int ret; | 1212 | int ret; |
975 | 1213 | ||
@@ -979,12 +1217,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
979 | return 0; | 1217 | return 0; |
980 | } | 1218 | } |
981 | 1219 | ||
982 | switch (pipe) { | 1220 | switch (plane) { |
983 | case 0: | 1221 | case 0: |
984 | case 1: | 1222 | case 1: |
985 | break; | 1223 | break; |
986 | default: | 1224 | default: |
987 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 1225 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
988 | return -EINVAL; | 1226 | return -EINVAL; |
989 | } | 1227 | } |
990 | 1228 | ||
@@ -1086,6 +1324,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1086 | I915_READ(dspbase); | 1324 | I915_READ(dspbase); |
1087 | } | 1325 | } |
1088 | 1326 | ||
1327 | if ((IS_I965G(dev) || plane == 0)) | ||
1328 | intel_update_fbc(crtc, &crtc->mode); | ||
1329 | |||
1089 | intel_wait_for_vblank(dev); | 1330 | intel_wait_for_vblank(dev); |
1090 | 1331 | ||
1091 | if (old_fb) { | 1332 | if (old_fb) { |
@@ -1217,6 +1458,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1217 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1458 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
1218 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | 1459 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; |
1219 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | 1460 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; |
1461 | int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; | ||
1220 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 1462 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
1221 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 1463 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
1222 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | 1464 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; |
@@ -1268,6 +1510,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1268 | } | 1510 | } |
1269 | } | 1511 | } |
1270 | 1512 | ||
1513 | /* Enable panel fitting for LVDS */ | ||
1514 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
1515 | temp = I915_READ(pf_ctl_reg); | ||
1516 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); | ||
1517 | |||
1518 | /* currently full aspect */ | ||
1519 | I915_WRITE(pf_win_pos, 0); | ||
1520 | |||
1521 | I915_WRITE(pf_win_size, | ||
1522 | (dev_priv->panel_fixed_mode->hdisplay << 16) | | ||
1523 | (dev_priv->panel_fixed_mode->vdisplay)); | ||
1524 | } | ||
1525 | |||
1271 | /* Enable CPU pipe */ | 1526 | /* Enable CPU pipe */ |
1272 | temp = I915_READ(pipeconf_reg); | 1527 | temp = I915_READ(pipeconf_reg); |
1273 | if ((temp & PIPEACONF_ENABLE) == 0) { | 1528 | if ((temp & PIPEACONF_ENABLE) == 0) { |
@@ -1532,9 +1787,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1532 | struct drm_i915_private *dev_priv = dev->dev_private; | 1787 | struct drm_i915_private *dev_priv = dev->dev_private; |
1533 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1788 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1534 | int pipe = intel_crtc->pipe; | 1789 | int pipe = intel_crtc->pipe; |
1790 | int plane = intel_crtc->plane; | ||
1535 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 1791 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
1536 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 1792 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
1537 | int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; | 1793 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; |
1538 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 1794 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
1539 | u32 temp; | 1795 | u32 temp; |
1540 | 1796 | ||
@@ -1577,6 +1833,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1577 | 1833 | ||
1578 | intel_crtc_load_lut(crtc); | 1834 | intel_crtc_load_lut(crtc); |
1579 | 1835 | ||
1836 | if ((IS_I965G(dev) || plane == 0)) | ||
1837 | intel_update_fbc(crtc, &crtc->mode); | ||
1838 | |||
1580 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 1839 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
1581 | //intel_crtc_dpms_video(crtc, true); TODO | 1840 | //intel_crtc_dpms_video(crtc, true); TODO |
1582 | intel_update_watermarks(dev); | 1841 | intel_update_watermarks(dev); |
@@ -1586,6 +1845,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1586 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | 1845 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
1587 | //intel_crtc_dpms_video(crtc, FALSE); TODO | 1846 | //intel_crtc_dpms_video(crtc, FALSE); TODO |
1588 | 1847 | ||
1848 | if (dev_priv->cfb_plane == plane && | ||
1849 | dev_priv->display.disable_fbc) | ||
1850 | dev_priv->display.disable_fbc(dev); | ||
1851 | |||
1589 | /* Disable the VGA plane that we never use */ | 1852 | /* Disable the VGA plane that we never use */ |
1590 | i915_disable_vga(dev); | 1853 | i915_disable_vga(dev); |
1591 | 1854 | ||
@@ -1634,15 +1897,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1634 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | 1897 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) |
1635 | { | 1898 | { |
1636 | struct drm_device *dev = crtc->dev; | 1899 | struct drm_device *dev = crtc->dev; |
1900 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1637 | struct drm_i915_master_private *master_priv; | 1901 | struct drm_i915_master_private *master_priv; |
1638 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1902 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1639 | int pipe = intel_crtc->pipe; | 1903 | int pipe = intel_crtc->pipe; |
1640 | bool enabled; | 1904 | bool enabled; |
1641 | 1905 | ||
1642 | if (IS_IGDNG(dev)) | 1906 | dev_priv->display.dpms(crtc, mode); |
1643 | igdng_crtc_dpms(crtc, mode); | ||
1644 | else | ||
1645 | i9xx_crtc_dpms(crtc, mode); | ||
1646 | 1907 | ||
1647 | intel_crtc->dpms_mode = mode; | 1908 | intel_crtc->dpms_mode = mode; |
1648 | 1909 | ||
@@ -1709,56 +1970,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1709 | return true; | 1970 | return true; |
1710 | } | 1971 | } |
1711 | 1972 | ||
1973 | static int i945_get_display_clock_speed(struct drm_device *dev) | ||
1974 | { | ||
1975 | return 400000; | ||
1976 | } | ||
1712 | 1977 | ||
1713 | /** Returns the core display clock speed for i830 - i945 */ | 1978 | static int i915_get_display_clock_speed(struct drm_device *dev) |
1714 | static int intel_get_core_clock_speed(struct drm_device *dev) | ||
1715 | { | 1979 | { |
1980 | return 333000; | ||
1981 | } | ||
1716 | 1982 | ||
1717 | /* Core clock values taken from the published datasheets. | 1983 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
1718 | * The 830 may go up to 166 Mhz, which we should check. | 1984 | { |
1719 | */ | 1985 | return 200000; |
1720 | if (IS_I945G(dev)) | 1986 | } |
1721 | return 400000; | ||
1722 | else if (IS_I915G(dev)) | ||
1723 | return 333000; | ||
1724 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) | ||
1725 | return 200000; | ||
1726 | else if (IS_I915GM(dev)) { | ||
1727 | u16 gcfgc = 0; | ||
1728 | 1987 | ||
1729 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | 1988 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
1989 | { | ||
1990 | u16 gcfgc = 0; | ||
1730 | 1991 | ||
1731 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) | 1992 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
1732 | return 133000; | 1993 | |
1733 | else { | 1994 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
1734 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | 1995 | return 133000; |
1735 | case GC_DISPLAY_CLOCK_333_MHZ: | 1996 | else { |
1736 | return 333000; | 1997 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
1737 | default: | 1998 | case GC_DISPLAY_CLOCK_333_MHZ: |
1738 | case GC_DISPLAY_CLOCK_190_200_MHZ: | 1999 | return 333000; |
1739 | return 190000; | 2000 | default: |
1740 | } | 2001 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
1741 | } | 2002 | return 190000; |
1742 | } else if (IS_I865G(dev)) | ||
1743 | return 266000; | ||
1744 | else if (IS_I855(dev)) { | ||
1745 | u16 hpllcc = 0; | ||
1746 | /* Assume that the hardware is in the high speed state. This | ||
1747 | * should be the default. | ||
1748 | */ | ||
1749 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | ||
1750 | case GC_CLOCK_133_200: | ||
1751 | case GC_CLOCK_100_200: | ||
1752 | return 200000; | ||
1753 | case GC_CLOCK_166_250: | ||
1754 | return 250000; | ||
1755 | case GC_CLOCK_100_133: | ||
1756 | return 133000; | ||
1757 | } | 2003 | } |
1758 | } else /* 852, 830 */ | 2004 | } |
2005 | } | ||
2006 | |||
2007 | static int i865_get_display_clock_speed(struct drm_device *dev) | ||
2008 | { | ||
2009 | return 266000; | ||
2010 | } | ||
2011 | |||
2012 | static int i855_get_display_clock_speed(struct drm_device *dev) | ||
2013 | { | ||
2014 | u16 hpllcc = 0; | ||
2015 | /* Assume that the hardware is in the high speed state. This | ||
2016 | * should be the default. | ||
2017 | */ | ||
2018 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | ||
2019 | case GC_CLOCK_133_200: | ||
2020 | case GC_CLOCK_100_200: | ||
2021 | return 200000; | ||
2022 | case GC_CLOCK_166_250: | ||
2023 | return 250000; | ||
2024 | case GC_CLOCK_100_133: | ||
1759 | return 133000; | 2025 | return 133000; |
2026 | } | ||
2027 | |||
2028 | /* Shouldn't happen */ | ||
2029 | return 0; | ||
2030 | } | ||
1760 | 2031 | ||
1761 | return 0; /* Silence gcc warning */ | 2032 | static int i830_get_display_clock_speed(struct drm_device *dev) |
2033 | { | ||
2034 | return 133000; | ||
1762 | } | 2035 | } |
1763 | 2036 | ||
1764 | /** | 2037 | /** |
@@ -1921,7 +2194,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
1921 | { | 2194 | { |
1922 | long entries_required, wm_size; | 2195 | long entries_required, wm_size; |
1923 | 2196 | ||
1924 | entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; | 2197 | /* |
2198 | * Note: we need to make sure we don't overflow for various clock & | ||
2199 | * latency values. | ||
2200 | * clocks go from a few thousand to several hundred thousand. | ||
2201 | * latency is usually a few thousand | ||
2202 | */ | ||
2203 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | ||
2204 | 1000; | ||
1925 | entries_required /= wm->cacheline_size; | 2205 | entries_required /= wm->cacheline_size; |
1926 | 2206 | ||
1927 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); | 2207 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); |
@@ -1986,14 +2266,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | |||
1986 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | 2266 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { |
1987 | latency = &cxsr_latency_table[i]; | 2267 | latency = &cxsr_latency_table[i]; |
1988 | if (is_desktop == latency->is_desktop && | 2268 | if (is_desktop == latency->is_desktop && |
1989 | fsb == latency->fsb_freq && mem == latency->mem_freq) | 2269 | fsb == latency->fsb_freq && mem == latency->mem_freq) |
1990 | break; | 2270 | return latency; |
1991 | } | 2271 | } |
1992 | if (i >= ARRAY_SIZE(cxsr_latency_table)) { | 2272 | |
1993 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); | 2273 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); |
1994 | return NULL; | 2274 | |
1995 | } | 2275 | return NULL; |
1996 | return latency; | ||
1997 | } | 2276 | } |
1998 | 2277 | ||
1999 | static void igd_disable_cxsr(struct drm_device *dev) | 2278 | static void igd_disable_cxsr(struct drm_device *dev) |
@@ -2084,32 +2363,36 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2084 | */ | 2363 | */ |
2085 | const static int latency_ns = 5000; | 2364 | const static int latency_ns = 5000; |
2086 | 2365 | ||
2087 | static int intel_get_fifo_size(struct drm_device *dev, int plane) | 2366 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
2088 | { | 2367 | { |
2089 | struct drm_i915_private *dev_priv = dev->dev_private; | 2368 | struct drm_i915_private *dev_priv = dev->dev_private; |
2090 | uint32_t dsparb = I915_READ(DSPARB); | 2369 | uint32_t dsparb = I915_READ(DSPARB); |
2091 | int size; | 2370 | int size; |
2092 | 2371 | ||
2093 | if (IS_I9XX(dev)) { | 2372 | if (plane == 0) |
2094 | if (plane == 0) | ||
2095 | size = dsparb & 0x7f; | ||
2096 | else | ||
2097 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | ||
2098 | (dsparb & 0x7f); | ||
2099 | } else if (IS_I85X(dev)) { | ||
2100 | if (plane == 0) | ||
2101 | size = dsparb & 0x1ff; | ||
2102 | else | ||
2103 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2104 | (dsparb & 0x1ff); | ||
2105 | size >>= 1; /* Convert to cachelines */ | ||
2106 | } else if (IS_845G(dev)) { | ||
2107 | size = dsparb & 0x7f; | 2373 | size = dsparb & 0x7f; |
2108 | size >>= 2; /* Convert to cachelines */ | 2374 | else |
2109 | } else { | 2375 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - |
2110 | size = dsparb & 0x7f; | 2376 | (dsparb & 0x7f); |
2111 | size >>= 1; /* Convert to cachelines */ | 2377 | |
2112 | } | 2378 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", |
2379 | size); | ||
2380 | |||
2381 | return size; | ||
2382 | } | ||
2383 | |||
2384 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) | ||
2385 | { | ||
2386 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2387 | uint32_t dsparb = I915_READ(DSPARB); | ||
2388 | int size; | ||
2389 | |||
2390 | if (plane == 0) | ||
2391 | size = dsparb & 0x1ff; | ||
2392 | else | ||
2393 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2394 | (dsparb & 0x1ff); | ||
2395 | size >>= 1; /* Convert to cachelines */ | ||
2113 | 2396 | ||
2114 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | 2397 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", |
2115 | size); | 2398 | size); |
@@ -2117,7 +2400,38 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane) | |||
2117 | return size; | 2400 | return size; |
2118 | } | 2401 | } |
2119 | 2402 | ||
2120 | static void g4x_update_wm(struct drm_device *dev) | 2403 | static int i845_get_fifo_size(struct drm_device *dev, int plane) |
2404 | { | ||
2405 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2406 | uint32_t dsparb = I915_READ(DSPARB); | ||
2407 | int size; | ||
2408 | |||
2409 | size = dsparb & 0x7f; | ||
2410 | size >>= 2; /* Convert to cachelines */ | ||
2411 | |||
2412 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | ||
2413 | size); | ||
2414 | |||
2415 | return size; | ||
2416 | } | ||
2417 | |||
2418 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | ||
2419 | { | ||
2420 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2421 | uint32_t dsparb = I915_READ(DSPARB); | ||
2422 | int size; | ||
2423 | |||
2424 | size = dsparb & 0x7f; | ||
2425 | size >>= 1; /* Convert to cachelines */ | ||
2426 | |||
2427 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | ||
2428 | size); | ||
2429 | |||
2430 | return size; | ||
2431 | } | ||
2432 | |||
2433 | static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, | ||
2434 | int unused3, int unused4) | ||
2121 | { | 2435 | { |
2122 | struct drm_i915_private *dev_priv = dev->dev_private; | 2436 | struct drm_i915_private *dev_priv = dev->dev_private; |
2123 | u32 fw_blc_self = I915_READ(FW_BLC_SELF); | 2437 | u32 fw_blc_self = I915_READ(FW_BLC_SELF); |
@@ -2129,7 +2443,8 @@ static void g4x_update_wm(struct drm_device *dev) | |||
2129 | I915_WRITE(FW_BLC_SELF, fw_blc_self); | 2443 | I915_WRITE(FW_BLC_SELF, fw_blc_self); |
2130 | } | 2444 | } |
2131 | 2445 | ||
2132 | static void i965_update_wm(struct drm_device *dev) | 2446 | static void i965_update_wm(struct drm_device *dev, int unused, int unused2, |
2447 | int unused3, int unused4) | ||
2133 | { | 2448 | { |
2134 | struct drm_i915_private *dev_priv = dev->dev_private; | 2449 | struct drm_i915_private *dev_priv = dev->dev_private; |
2135 | 2450 | ||
@@ -2165,8 +2480,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2165 | cacheline_size = planea_params.cacheline_size; | 2480 | cacheline_size = planea_params.cacheline_size; |
2166 | 2481 | ||
2167 | /* Update per-plane FIFO sizes */ | 2482 | /* Update per-plane FIFO sizes */ |
2168 | planea_params.fifo_size = intel_get_fifo_size(dev, 0); | 2483 | planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
2169 | planeb_params.fifo_size = intel_get_fifo_size(dev, 1); | 2484 | planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
2170 | 2485 | ||
2171 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | 2486 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, |
2172 | pixel_size, latency_ns); | 2487 | pixel_size, latency_ns); |
@@ -2213,14 +2528,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2213 | I915_WRITE(FW_BLC2, fwater_hi); | 2528 | I915_WRITE(FW_BLC2, fwater_hi); |
2214 | } | 2529 | } |
2215 | 2530 | ||
2216 | static void i830_update_wm(struct drm_device *dev, int planea_clock, | 2531 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, |
2217 | int pixel_size) | 2532 | int unused2, int pixel_size) |
2218 | { | 2533 | { |
2219 | struct drm_i915_private *dev_priv = dev->dev_private; | 2534 | struct drm_i915_private *dev_priv = dev->dev_private; |
2220 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 2535 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
2221 | int planea_wm; | 2536 | int planea_wm; |
2222 | 2537 | ||
2223 | i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); | 2538 | i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
2224 | 2539 | ||
2225 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | 2540 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, |
2226 | pixel_size, latency_ns); | 2541 | pixel_size, latency_ns); |
@@ -2264,6 +2579,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, | |||
2264 | */ | 2579 | */ |
2265 | static void intel_update_watermarks(struct drm_device *dev) | 2580 | static void intel_update_watermarks(struct drm_device *dev) |
2266 | { | 2581 | { |
2582 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2267 | struct drm_crtc *crtc; | 2583 | struct drm_crtc *crtc; |
2268 | struct intel_crtc *intel_crtc; | 2584 | struct intel_crtc *intel_crtc; |
2269 | int sr_hdisplay = 0; | 2585 | int sr_hdisplay = 0; |
@@ -2302,15 +2618,8 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
2302 | else if (IS_IGD(dev)) | 2618 | else if (IS_IGD(dev)) |
2303 | igd_disable_cxsr(dev); | 2619 | igd_disable_cxsr(dev); |
2304 | 2620 | ||
2305 | if (IS_G4X(dev)) | 2621 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, |
2306 | g4x_update_wm(dev); | 2622 | sr_hdisplay, pixel_size); |
2307 | else if (IS_I965G(dev)) | ||
2308 | i965_update_wm(dev); | ||
2309 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) | ||
2310 | i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, | ||
2311 | pixel_size); | ||
2312 | else | ||
2313 | i830_update_wm(dev, planea_clock, pixel_size); | ||
2314 | } | 2623 | } |
2315 | 2624 | ||
2316 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 2625 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
@@ -2323,10 +2632,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2323 | struct drm_i915_private *dev_priv = dev->dev_private; | 2632 | struct drm_i915_private *dev_priv = dev->dev_private; |
2324 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2633 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2325 | int pipe = intel_crtc->pipe; | 2634 | int pipe = intel_crtc->pipe; |
2635 | int plane = intel_crtc->plane; | ||
2326 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; | 2636 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; |
2327 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 2637 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
2328 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; | 2638 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; |
2329 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 2639 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
2330 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 2640 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
2331 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 2641 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
2332 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 2642 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
@@ -2334,8 +2644,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2334 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | 2644 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; |
2335 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | 2645 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; |
2336 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | 2646 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; |
2337 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; | 2647 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; |
2338 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; | 2648 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; |
2339 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 2649 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
2340 | int refclk, num_outputs = 0; | 2650 | int refclk, num_outputs = 0; |
2341 | intel_clock_t clock, reduced_clock; | 2651 | intel_clock_t clock, reduced_clock; |
@@ -2568,7 +2878,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2568 | enable color space conversion */ | 2878 | enable color space conversion */ |
2569 | if (!IS_IGDNG(dev)) { | 2879 | if (!IS_IGDNG(dev)) { |
2570 | if (pipe == 0) | 2880 | if (pipe == 0) |
2571 | dspcntr |= DISPPLANE_SEL_PIPE_A; | 2881 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
2572 | else | 2882 | else |
2573 | dspcntr |= DISPPLANE_SEL_PIPE_B; | 2883 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
2574 | } | 2884 | } |
@@ -2580,7 +2890,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2580 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the | 2890 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the |
2581 | * pipe == 0 check? | 2891 | * pipe == 0 check? |
2582 | */ | 2892 | */ |
2583 | if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10) | 2893 | if (mode->clock > |
2894 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) | ||
2584 | pipeconf |= PIPEACONF_DOUBLE_WIDE; | 2895 | pipeconf |= PIPEACONF_DOUBLE_WIDE; |
2585 | else | 2896 | else |
2586 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | 2897 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; |
@@ -2652,9 +2963,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2652 | udelay(150); | 2963 | udelay(150); |
2653 | 2964 | ||
2654 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | 2965 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { |
2655 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | 2966 | if (is_sdvo) { |
2656 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | 2967 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; |
2968 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
2657 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | 2969 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); |
2970 | } else | ||
2971 | I915_WRITE(dpll_md_reg, 0); | ||
2658 | } else { | 2972 | } else { |
2659 | /* write it again -- the BIOS does, after all */ | 2973 | /* write it again -- the BIOS does, after all */ |
2660 | I915_WRITE(dpll_reg, dpll); | 2974 | I915_WRITE(dpll_reg, dpll); |
@@ -2734,6 +3048,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2734 | /* Flush the plane changes */ | 3048 | /* Flush the plane changes */ |
2735 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 3049 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
2736 | 3050 | ||
3051 | if ((IS_I965G(dev) || plane == 0)) | ||
3052 | intel_update_fbc(crtc, &crtc->mode); | ||
3053 | |||
2737 | intel_update_watermarks(dev); | 3054 | intel_update_watermarks(dev); |
2738 | 3055 | ||
2739 | drm_vblank_post_modeset(dev, pipe); | 3056 | drm_vblank_post_modeset(dev, pipe); |
@@ -2778,6 +3095,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
2778 | struct drm_gem_object *bo; | 3095 | struct drm_gem_object *bo; |
2779 | struct drm_i915_gem_object *obj_priv; | 3096 | struct drm_i915_gem_object *obj_priv; |
2780 | int pipe = intel_crtc->pipe; | 3097 | int pipe = intel_crtc->pipe; |
3098 | int plane = intel_crtc->plane; | ||
2781 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | 3099 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; |
2782 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | 3100 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; |
2783 | uint32_t temp = I915_READ(control); | 3101 | uint32_t temp = I915_READ(control); |
@@ -2863,6 +3181,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
2863 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 3181 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
2864 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 3182 | drm_gem_object_unreference(intel_crtc->cursor_bo); |
2865 | } | 3183 | } |
3184 | |||
3185 | if ((IS_I965G(dev) || plane == 0)) | ||
3186 | intel_update_fbc(crtc, &crtc->mode); | ||
3187 | |||
2866 | mutex_unlock(&dev->struct_mutex); | 3188 | mutex_unlock(&dev->struct_mutex); |
2867 | 3189 | ||
2868 | intel_crtc->cursor_addr = addr; | 3190 | intel_crtc->cursor_addr = addr; |
@@ -3544,6 +3866,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
3544 | intel_crtc->lut_b[i] = i; | 3866 | intel_crtc->lut_b[i] = i; |
3545 | } | 3867 | } |
3546 | 3868 | ||
3869 | /* Swap pipes & planes for FBC on pre-965 */ | ||
3870 | intel_crtc->pipe = pipe; | ||
3871 | intel_crtc->plane = pipe; | ||
3872 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { | ||
3873 | DRM_DEBUG("swapping pipes & planes for FBC\n"); | ||
3874 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); | ||
3875 | } | ||
3876 | |||
3547 | intel_crtc->cursor_addr = 0; | 3877 | intel_crtc->cursor_addr = 0; |
3548 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; | 3878 | intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; |
3549 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 3879 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
@@ -3826,6 +4156,73 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
3826 | } | 4156 | } |
3827 | } | 4157 | } |
3828 | 4158 | ||
4159 | /* Set up chip specific display functions */ | ||
4160 | static void intel_init_display(struct drm_device *dev) | ||
4161 | { | ||
4162 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4163 | |||
4164 | /* We always want a DPMS function */ | ||
4165 | if (IS_IGDNG(dev)) | ||
4166 | dev_priv->display.dpms = igdng_crtc_dpms; | ||
4167 | else | ||
4168 | dev_priv->display.dpms = i9xx_crtc_dpms; | ||
4169 | |||
4170 | /* Only mobile has FBC, leave pointers NULL for other chips */ | ||
4171 | if (IS_MOBILE(dev)) { | ||
4172 | if (IS_GM45(dev)) { | ||
4173 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | ||
4174 | dev_priv->display.enable_fbc = g4x_enable_fbc; | ||
4175 | dev_priv->display.disable_fbc = g4x_disable_fbc; | ||
4176 | } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { | ||
4177 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | ||
4178 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | ||
4179 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | ||
4180 | } | ||
4181 | /* 855GM needs testing */ | ||
4182 | } | ||
4183 | |||
4184 | /* Returns the core display clock speed */ | ||
4185 | if (IS_I945G(dev)) | ||
4186 | dev_priv->display.get_display_clock_speed = | ||
4187 | i945_get_display_clock_speed; | ||
4188 | else if (IS_I915G(dev)) | ||
4189 | dev_priv->display.get_display_clock_speed = | ||
4190 | i915_get_display_clock_speed; | ||
4191 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) | ||
4192 | dev_priv->display.get_display_clock_speed = | ||
4193 | i9xx_misc_get_display_clock_speed; | ||
4194 | else if (IS_I915GM(dev)) | ||
4195 | dev_priv->display.get_display_clock_speed = | ||
4196 | i915gm_get_display_clock_speed; | ||
4197 | else if (IS_I865G(dev)) | ||
4198 | dev_priv->display.get_display_clock_speed = | ||
4199 | i865_get_display_clock_speed; | ||
4200 | else if (IS_I855(dev)) | ||
4201 | dev_priv->display.get_display_clock_speed = | ||
4202 | i855_get_display_clock_speed; | ||
4203 | else /* 852, 830 */ | ||
4204 | dev_priv->display.get_display_clock_speed = | ||
4205 | i830_get_display_clock_speed; | ||
4206 | |||
4207 | /* For FIFO watermark updates */ | ||
4208 | if (IS_G4X(dev)) | ||
4209 | dev_priv->display.update_wm = g4x_update_wm; | ||
4210 | else if (IS_I965G(dev)) | ||
4211 | dev_priv->display.update_wm = i965_update_wm; | ||
4212 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | ||
4213 | dev_priv->display.update_wm = i9xx_update_wm; | ||
4214 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | ||
4215 | } else { | ||
4216 | if (IS_I85X(dev)) | ||
4217 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
4218 | else if (IS_845G(dev)) | ||
4219 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | ||
4220 | else | ||
4221 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||
4222 | dev_priv->display.update_wm = i830_update_wm; | ||
4223 | } | ||
4224 | } | ||
4225 | |||
3829 | void intel_modeset_init(struct drm_device *dev) | 4226 | void intel_modeset_init(struct drm_device *dev) |
3830 | { | 4227 | { |
3831 | struct drm_i915_private *dev_priv = dev->dev_private; | 4228 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3839,6 +4236,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
3839 | 4236 | ||
3840 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 4237 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
3841 | 4238 | ||
4239 | intel_init_display(dev); | ||
4240 | |||
3842 | if (IS_I965G(dev)) { | 4241 | if (IS_I965G(dev)) { |
3843 | dev->mode_config.max_width = 8192; | 4242 | dev->mode_config.max_width = 8192; |
3844 | dev->mode_config.max_height = 8192; | 4243 | dev->mode_config.max_height = 8192; |
@@ -3904,6 +4303,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
3904 | 4303 | ||
3905 | mutex_unlock(&dev->struct_mutex); | 4304 | mutex_unlock(&dev->struct_mutex); |
3906 | 4305 | ||
4306 | if (dev_priv->display.disable_fbc) | ||
4307 | dev_priv->display.disable_fbc(dev); | ||
4308 | |||
3907 | drm_mode_config_cleanup(dev); | 4309 | drm_mode_config_cleanup(dev); |
3908 | } | 4310 | } |
3909 | 4311 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3ebbbabfe59b..8aa4b7f30daa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include <linux/i2c-id.h> | 29 | #include <linux/i2c-id.h> |
30 | #include <linux/i2c-algo-bit.h> | 30 | #include <linux/i2c-algo-bit.h> |
31 | #include "i915_drv.h" | ||
31 | #include "drm_crtc.h" | 32 | #include "drm_crtc.h" |
32 | 33 | ||
33 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
@@ -111,8 +112,8 @@ struct intel_output { | |||
111 | 112 | ||
112 | struct intel_crtc { | 113 | struct intel_crtc { |
113 | struct drm_crtc base; | 114 | struct drm_crtc base; |
114 | int pipe; | 115 | enum pipe pipe; |
115 | int plane; | 116 | enum plane plane; |
116 | struct drm_gem_object *cursor_bo; | 117 | struct drm_gem_object *cursor_bo; |
117 | uint32_t cursor_addr; | 118 | uint32_t cursor_addr; |
118 | u8 lut_r[256], lut_g[256], lut_b[256]; | 119 | u8 lut_r[256], lut_g[256], lut_b[256]; |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7ba4a232a97f..e85d7e9eed7d 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -110,6 +110,7 @@ EXPORT_SYMBOL(intelfb_resize); | |||
110 | static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | 110 | static int intelfb_create(struct drm_device *dev, uint32_t fb_width, |
111 | uint32_t fb_height, uint32_t surface_width, | 111 | uint32_t fb_height, uint32_t surface_width, |
112 | uint32_t surface_height, | 112 | uint32_t surface_height, |
113 | uint32_t surface_depth, uint32_t surface_bpp, | ||
113 | struct drm_framebuffer **fb_p) | 114 | struct drm_framebuffer **fb_p) |
114 | { | 115 | { |
115 | struct fb_info *info; | 116 | struct fb_info *info; |
@@ -125,9 +126,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
125 | mode_cmd.width = surface_width; | 126 | mode_cmd.width = surface_width; |
126 | mode_cmd.height = surface_height; | 127 | mode_cmd.height = surface_height; |
127 | 128 | ||
128 | mode_cmd.bpp = 32; | 129 | mode_cmd.bpp = surface_bpp; |
129 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); | 130 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); |
130 | mode_cmd.depth = 24; | 131 | mode_cmd.depth = surface_depth; |
131 | 132 | ||
132 | size = mode_cmd.pitch * mode_cmd.height; | 133 | size = mode_cmd.pitch * mode_cmd.height; |
133 | size = ALIGN(size, PAGE_SIZE); | 134 | size = ALIGN(size, PAGE_SIZE); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index dafc0da1c256..98ae3d73577e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * Jesse Barnes <jesse.barnes@intel.com> | 27 | * Jesse Barnes <jesse.barnes@intel.com> |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <acpi/button.h> | ||
30 | #include <linux/dmi.h> | 31 | #include <linux/dmi.h> |
31 | #include <linux/i2c.h> | 32 | #include <linux/i2c.h> |
32 | #include "drmP.h" | 33 | #include "drmP.h" |
@@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
295 | goto out; | 296 | goto out; |
296 | } | 297 | } |
297 | 298 | ||
299 | /* full screen scale for now */ | ||
300 | if (IS_IGDNG(dev)) | ||
301 | goto out; | ||
302 | |||
298 | /* 965+ wants fuzzy fitting */ | 303 | /* 965+ wants fuzzy fitting */ |
299 | if (IS_I965G(dev)) | 304 | if (IS_I965G(dev)) |
300 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | | 305 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | |
@@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
322 | * to register description and PRM. | 327 | * to register description and PRM. |
323 | * Change the value here to see the borders for debugging | 328 | * Change the value here to see the borders for debugging |
324 | */ | 329 | */ |
325 | I915_WRITE(BCLRPAT_A, 0); | 330 | if (!IS_IGDNG(dev)) { |
326 | I915_WRITE(BCLRPAT_B, 0); | 331 | I915_WRITE(BCLRPAT_A, 0); |
332 | I915_WRITE(BCLRPAT_B, 0); | ||
333 | } | ||
327 | 334 | ||
328 | switch (lvds_priv->fitting_mode) { | 335 | switch (lvds_priv->fitting_mode) { |
329 | case DRM_MODE_SCALE_CENTER: | 336 | case DRM_MODE_SCALE_CENTER: |
@@ -572,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
572 | * settings. | 579 | * settings. |
573 | */ | 580 | */ |
574 | 581 | ||
575 | /* No panel fitting yet, fixme */ | ||
576 | if (IS_IGDNG(dev)) | 582 | if (IS_IGDNG(dev)) |
577 | return; | 583 | return; |
578 | 584 | ||
@@ -585,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
585 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); | 591 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); |
586 | } | 592 | } |
587 | 593 | ||
594 | /* Some lid devices report incorrect lid status, assume they're connected */ | ||
595 | static const struct dmi_system_id bad_lid_status[] = { | ||
596 | { | ||
597 | .ident = "Aspire One", | ||
598 | .matches = { | ||
599 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
600 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | ||
601 | }, | ||
602 | }, | ||
603 | { } | ||
604 | }; | ||
605 | |||
588 | /** | 606 | /** |
589 | * Detect the LVDS connection. | 607 | * Detect the LVDS connection. |
590 | * | 608 | * |
591 | * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have | 609 | * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means |
592 | * been set up if the LVDS was actually connected anyway. | 610 | * connected and closed means disconnected. We also send hotplug events as |
611 | * needed, using lid status notification from the input layer. | ||
593 | */ | 612 | */ |
594 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) | 613 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) |
595 | { | 614 | { |
596 | return connector_status_connected; | 615 | enum drm_connector_status status = connector_status_connected; |
616 | |||
617 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | ||
618 | status = connector_status_disconnected; | ||
619 | |||
620 | return status; | ||
597 | } | 621 | } |
598 | 622 | ||
599 | /** | 623 | /** |
@@ -632,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
632 | return 0; | 656 | return 0; |
633 | } | 657 | } |
634 | 658 | ||
659 | static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | ||
660 | void *unused) | ||
661 | { | ||
662 | struct drm_i915_private *dev_priv = | ||
663 | container_of(nb, struct drm_i915_private, lid_notifier); | ||
664 | struct drm_device *dev = dev_priv->dev; | ||
665 | |||
666 | if (acpi_lid_open() && !dev_priv->suspended) { | ||
667 | mutex_lock(&dev->mode_config.mutex); | ||
668 | drm_helper_resume_force_mode(dev); | ||
669 | mutex_unlock(&dev->mode_config.mutex); | ||
670 | } | ||
671 | |||
672 | drm_sysfs_hotplug_event(dev_priv->dev); | ||
673 | |||
674 | return NOTIFY_OK; | ||
675 | } | ||
676 | |||
635 | /** | 677 | /** |
636 | * intel_lvds_destroy - unregister and free LVDS structures | 678 | * intel_lvds_destroy - unregister and free LVDS structures |
637 | * @connector: connector to free | 679 | * @connector: connector to free |
@@ -641,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
641 | */ | 683 | */ |
642 | static void intel_lvds_destroy(struct drm_connector *connector) | 684 | static void intel_lvds_destroy(struct drm_connector *connector) |
643 | { | 685 | { |
686 | struct drm_device *dev = connector->dev; | ||
644 | struct intel_output *intel_output = to_intel_output(connector); | 687 | struct intel_output *intel_output = to_intel_output(connector); |
688 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
645 | 689 | ||
646 | if (intel_output->ddc_bus) | 690 | if (intel_output->ddc_bus) |
647 | intel_i2c_destroy(intel_output->ddc_bus); | 691 | intel_i2c_destroy(intel_output->ddc_bus); |
692 | if (dev_priv->lid_notifier.notifier_call) | ||
693 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | ||
648 | drm_sysfs_connector_remove(connector); | 694 | drm_sysfs_connector_remove(connector); |
649 | drm_connector_cleanup(connector); | 695 | drm_connector_cleanup(connector); |
650 | kfree(connector); | 696 | kfree(connector); |
@@ -1011,6 +1057,11 @@ out: | |||
1011 | pwm |= PWM_PCH_ENABLE; | 1057 | pwm |= PWM_PCH_ENABLE; |
1012 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); | 1058 | I915_WRITE(BLC_PWM_PCH_CTL1, pwm); |
1013 | } | 1059 | } |
1060 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | ||
1061 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | ||
1062 | DRM_DEBUG("lid notifier registration failed\n"); | ||
1063 | dev_priv->lid_notifier.notifier_call = NULL; | ||
1064 | } | ||
1014 | drm_sysfs_connector_add(connector); | 1065 | drm_sysfs_connector_add(connector); |
1015 | return; | 1066 | return; |
1016 | 1067 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 0bf28efcf2c1..083bec2e50f9 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -135,6 +135,30 @@ struct intel_sdvo_priv { | |||
135 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; | 135 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; |
136 | struct intel_sdvo_dtd save_output_dtd[16]; | 136 | struct intel_sdvo_dtd save_output_dtd[16]; |
137 | u32 save_SDVOX; | 137 | u32 save_SDVOX; |
138 | /* add the property for the SDVO-TV */ | ||
139 | struct drm_property *left_property; | ||
140 | struct drm_property *right_property; | ||
141 | struct drm_property *top_property; | ||
142 | struct drm_property *bottom_property; | ||
143 | struct drm_property *hpos_property; | ||
144 | struct drm_property *vpos_property; | ||
145 | |||
146 | /* add the property for the SDVO-TV/LVDS */ | ||
147 | struct drm_property *brightness_property; | ||
148 | struct drm_property *contrast_property; | ||
149 | struct drm_property *saturation_property; | ||
150 | struct drm_property *hue_property; | ||
151 | |||
152 | /* Add variable to record current setting for the above property */ | ||
153 | u32 left_margin, right_margin, top_margin, bottom_margin; | ||
154 | /* this is to get the range of margin.*/ | ||
155 | u32 max_hscan, max_vscan; | ||
156 | u32 max_hpos, cur_hpos; | ||
157 | u32 max_vpos, cur_vpos; | ||
158 | u32 cur_brightness, max_brightness; | ||
159 | u32 cur_contrast, max_contrast; | ||
160 | u32 cur_saturation, max_saturation; | ||
161 | u32 cur_hue, max_hue; | ||
138 | }; | 162 | }; |
139 | 163 | ||
140 | static bool | 164 | static bool |
@@ -281,6 +305,31 @@ static const struct _sdvo_cmd_name { | |||
281 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), | 305 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), |
282 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), | 306 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), |
283 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), | 307 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), |
308 | /* Add the op code for SDVO enhancements */ | ||
309 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), | ||
310 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), | ||
311 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), | ||
312 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), | ||
313 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), | ||
314 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), | ||
315 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), | ||
316 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), | ||
317 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), | ||
318 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), | ||
319 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), | ||
320 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), | ||
321 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), | ||
322 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), | ||
323 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), | ||
324 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), | ||
325 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), | ||
326 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), | ||
327 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), | ||
328 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), | ||
329 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), | ||
330 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), | ||
331 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), | ||
332 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), | ||
284 | /* HDMI op code */ | 333 | /* HDMI op code */ |
285 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), | 334 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), |
286 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), | 335 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), |
@@ -981,7 +1030,7 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) | |||
981 | 1030 | ||
982 | status = intel_sdvo_read_response(output, NULL, 0); | 1031 | status = intel_sdvo_read_response(output, NULL, 0); |
983 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1032 | if (status != SDVO_CMD_STATUS_SUCCESS) |
984 | DRM_DEBUG("%s: Failed to set TV format\n", | 1033 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", |
985 | SDVO_NAME(sdvo_priv)); | 1034 | SDVO_NAME(sdvo_priv)); |
986 | } | 1035 | } |
987 | 1036 | ||
@@ -1792,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
1792 | return 1; | 1841 | return 1; |
1793 | } | 1842 | } |
1794 | 1843 | ||
1844 | static | ||
1845 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | ||
1846 | { | ||
1847 | struct intel_output *intel_output = to_intel_output(connector); | ||
1848 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1849 | struct drm_device *dev = connector->dev; | ||
1850 | |||
1851 | if (sdvo_priv->is_tv) { | ||
1852 | if (sdvo_priv->left_property) | ||
1853 | drm_property_destroy(dev, sdvo_priv->left_property); | ||
1854 | if (sdvo_priv->right_property) | ||
1855 | drm_property_destroy(dev, sdvo_priv->right_property); | ||
1856 | if (sdvo_priv->top_property) | ||
1857 | drm_property_destroy(dev, sdvo_priv->top_property); | ||
1858 | if (sdvo_priv->bottom_property) | ||
1859 | drm_property_destroy(dev, sdvo_priv->bottom_property); | ||
1860 | if (sdvo_priv->hpos_property) | ||
1861 | drm_property_destroy(dev, sdvo_priv->hpos_property); | ||
1862 | if (sdvo_priv->vpos_property) | ||
1863 | drm_property_destroy(dev, sdvo_priv->vpos_property); | ||
1864 | } | ||
1865 | if (sdvo_priv->is_tv) { | ||
1866 | if (sdvo_priv->saturation_property) | ||
1867 | drm_property_destroy(dev, | ||
1868 | sdvo_priv->saturation_property); | ||
1869 | if (sdvo_priv->contrast_property) | ||
1870 | drm_property_destroy(dev, | ||
1871 | sdvo_priv->contrast_property); | ||
1872 | if (sdvo_priv->hue_property) | ||
1873 | drm_property_destroy(dev, sdvo_priv->hue_property); | ||
1874 | } | ||
1875 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | ||
1876 | if (sdvo_priv->brightness_property) | ||
1877 | drm_property_destroy(dev, | ||
1878 | sdvo_priv->brightness_property); | ||
1879 | } | ||
1880 | return; | ||
1881 | } | ||
1882 | |||
1795 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1883 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1796 | { | 1884 | { |
1797 | struct intel_output *intel_output = to_intel_output(connector); | 1885 | struct intel_output *intel_output = to_intel_output(connector); |
@@ -1812,6 +1900,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1812 | drm_property_destroy(connector->dev, | 1900 | drm_property_destroy(connector->dev, |
1813 | sdvo_priv->tv_format_property); | 1901 | sdvo_priv->tv_format_property); |
1814 | 1902 | ||
1903 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) | ||
1904 | intel_sdvo_destroy_enhance_property(connector); | ||
1905 | |||
1815 | drm_sysfs_connector_remove(connector); | 1906 | drm_sysfs_connector_remove(connector); |
1816 | drm_connector_cleanup(connector); | 1907 | drm_connector_cleanup(connector); |
1817 | 1908 | ||
@@ -1829,6 +1920,8 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1829 | struct drm_crtc *crtc = encoder->crtc; | 1920 | struct drm_crtc *crtc = encoder->crtc; |
1830 | int ret = 0; | 1921 | int ret = 0; |
1831 | bool changed = false; | 1922 | bool changed = false; |
1923 | uint8_t cmd, status; | ||
1924 | uint16_t temp_value; | ||
1832 | 1925 | ||
1833 | ret = drm_connector_property_set_value(connector, property, val); | 1926 | ret = drm_connector_property_set_value(connector, property, val); |
1834 | if (ret < 0) | 1927 | if (ret < 0) |
@@ -1845,11 +1938,102 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1845 | 1938 | ||
1846 | sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; | 1939 | sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; |
1847 | changed = true; | 1940 | changed = true; |
1848 | } else { | ||
1849 | ret = -EINVAL; | ||
1850 | goto out; | ||
1851 | } | 1941 | } |
1852 | 1942 | ||
1943 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | ||
1944 | cmd = 0; | ||
1945 | temp_value = val; | ||
1946 | if (sdvo_priv->left_property == property) { | ||
1947 | drm_connector_property_set_value(connector, | ||
1948 | sdvo_priv->right_property, val); | ||
1949 | if (sdvo_priv->left_margin == temp_value) | ||
1950 | goto out; | ||
1951 | |||
1952 | sdvo_priv->left_margin = temp_value; | ||
1953 | sdvo_priv->right_margin = temp_value; | ||
1954 | temp_value = sdvo_priv->max_hscan - | ||
1955 | sdvo_priv->left_margin; | ||
1956 | cmd = SDVO_CMD_SET_OVERSCAN_H; | ||
1957 | } else if (sdvo_priv->right_property == property) { | ||
1958 | drm_connector_property_set_value(connector, | ||
1959 | sdvo_priv->left_property, val); | ||
1960 | if (sdvo_priv->right_margin == temp_value) | ||
1961 | goto out; | ||
1962 | |||
1963 | sdvo_priv->left_margin = temp_value; | ||
1964 | sdvo_priv->right_margin = temp_value; | ||
1965 | temp_value = sdvo_priv->max_hscan - | ||
1966 | sdvo_priv->left_margin; | ||
1967 | cmd = SDVO_CMD_SET_OVERSCAN_H; | ||
1968 | } else if (sdvo_priv->top_property == property) { | ||
1969 | drm_connector_property_set_value(connector, | ||
1970 | sdvo_priv->bottom_property, val); | ||
1971 | if (sdvo_priv->top_margin == temp_value) | ||
1972 | goto out; | ||
1973 | |||
1974 | sdvo_priv->top_margin = temp_value; | ||
1975 | sdvo_priv->bottom_margin = temp_value; | ||
1976 | temp_value = sdvo_priv->max_vscan - | ||
1977 | sdvo_priv->top_margin; | ||
1978 | cmd = SDVO_CMD_SET_OVERSCAN_V; | ||
1979 | } else if (sdvo_priv->bottom_property == property) { | ||
1980 | drm_connector_property_set_value(connector, | ||
1981 | sdvo_priv->top_property, val); | ||
1982 | if (sdvo_priv->bottom_margin == temp_value) | ||
1983 | goto out; | ||
1984 | sdvo_priv->top_margin = temp_value; | ||
1985 | sdvo_priv->bottom_margin = temp_value; | ||
1986 | temp_value = sdvo_priv->max_vscan - | ||
1987 | sdvo_priv->top_margin; | ||
1988 | cmd = SDVO_CMD_SET_OVERSCAN_V; | ||
1989 | } else if (sdvo_priv->hpos_property == property) { | ||
1990 | if (sdvo_priv->cur_hpos == temp_value) | ||
1991 | goto out; | ||
1992 | |||
1993 | cmd = SDVO_CMD_SET_POSITION_H; | ||
1994 | sdvo_priv->cur_hpos = temp_value; | ||
1995 | } else if (sdvo_priv->vpos_property == property) { | ||
1996 | if (sdvo_priv->cur_vpos == temp_value) | ||
1997 | goto out; | ||
1998 | |||
1999 | cmd = SDVO_CMD_SET_POSITION_V; | ||
2000 | sdvo_priv->cur_vpos = temp_value; | ||
2001 | } else if (sdvo_priv->saturation_property == property) { | ||
2002 | if (sdvo_priv->cur_saturation == temp_value) | ||
2003 | goto out; | ||
2004 | |||
2005 | cmd = SDVO_CMD_SET_SATURATION; | ||
2006 | sdvo_priv->cur_saturation = temp_value; | ||
2007 | } else if (sdvo_priv->contrast_property == property) { | ||
2008 | if (sdvo_priv->cur_contrast == temp_value) | ||
2009 | goto out; | ||
2010 | |||
2011 | cmd = SDVO_CMD_SET_CONTRAST; | ||
2012 | sdvo_priv->cur_contrast = temp_value; | ||
2013 | } else if (sdvo_priv->hue_property == property) { | ||
2014 | if (sdvo_priv->cur_hue == temp_value) | ||
2015 | goto out; | ||
2016 | |||
2017 | cmd = SDVO_CMD_SET_HUE; | ||
2018 | sdvo_priv->cur_hue = temp_value; | ||
2019 | } else if (sdvo_priv->brightness_property == property) { | ||
2020 | if (sdvo_priv->cur_brightness == temp_value) | ||
2021 | goto out; | ||
2022 | |||
2023 | cmd = SDVO_CMD_SET_BRIGHTNESS; | ||
2024 | sdvo_priv->cur_brightness = temp_value; | ||
2025 | } | ||
2026 | if (cmd) { | ||
2027 | intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); | ||
2028 | status = intel_sdvo_read_response(intel_output, | ||
2029 | NULL, 0); | ||
2030 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2031 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); | ||
2032 | return -EINVAL; | ||
2033 | } | ||
2034 | changed = true; | ||
2035 | } | ||
2036 | } | ||
1853 | if (changed && crtc) | 2037 | if (changed && crtc) |
1854 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, | 2038 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, |
1855 | crtc->y, crtc->fb); | 2039 | crtc->y, crtc->fb); |
@@ -2090,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2090 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | 2274 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
2091 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2275 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2092 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2276 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2277 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2278 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2093 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2279 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2094 | 2280 | ||
2095 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2281 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
@@ -2176,6 +2362,310 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) | |||
2176 | 2362 | ||
2177 | } | 2363 | } |
2178 | 2364 | ||
2365 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | ||
2366 | { | ||
2367 | struct intel_output *intel_output = to_intel_output(connector); | ||
2368 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
2369 | struct intel_sdvo_enhancements_reply sdvo_data; | ||
2370 | struct drm_device *dev = connector->dev; | ||
2371 | uint8_t status; | ||
2372 | uint16_t response, data_value[2]; | ||
2373 | |||
2374 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | ||
2375 | NULL, 0); | ||
2376 | status = intel_sdvo_read_response(intel_output, &sdvo_data, | ||
2377 | sizeof(sdvo_data)); | ||
2378 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2379 | DRM_DEBUG_KMS(" incorrect response is returned\n"); | ||
2380 | return; | ||
2381 | } | ||
2382 | response = *((uint16_t *)&sdvo_data); | ||
2383 | if (!response) { | ||
2384 | DRM_DEBUG_KMS("No enhancement is supported\n"); | ||
2385 | return; | ||
2386 | } | ||
2387 | if (sdvo_priv->is_tv) { | ||
2388 | /* when horizontal overscan is supported, Add the left/right | ||
2389 | * property | ||
2390 | */ | ||
2391 | if (sdvo_data.overscan_h) { | ||
2392 | intel_sdvo_write_cmd(intel_output, | ||
2393 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); | ||
2394 | status = intel_sdvo_read_response(intel_output, | ||
2395 | &data_value, 4); | ||
2396 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2397 | DRM_DEBUG_KMS("Incorrect SDVO max " | ||
2398 | "h_overscan\n"); | ||
2399 | return; | ||
2400 | } | ||
2401 | intel_sdvo_write_cmd(intel_output, | ||
2402 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); | ||
2403 | status = intel_sdvo_read_response(intel_output, | ||
2404 | &response, 2); | ||
2405 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2406 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); | ||
2407 | return; | ||
2408 | } | ||
2409 | sdvo_priv->max_hscan = data_value[0]; | ||
2410 | sdvo_priv->left_margin = data_value[0] - response; | ||
2411 | sdvo_priv->right_margin = sdvo_priv->left_margin; | ||
2412 | sdvo_priv->left_property = | ||
2413 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2414 | "left_margin", 2); | ||
2415 | sdvo_priv->left_property->values[0] = 0; | ||
2416 | sdvo_priv->left_property->values[1] = data_value[0]; | ||
2417 | drm_connector_attach_property(connector, | ||
2418 | sdvo_priv->left_property, | ||
2419 | sdvo_priv->left_margin); | ||
2420 | sdvo_priv->right_property = | ||
2421 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2422 | "right_margin", 2); | ||
2423 | sdvo_priv->right_property->values[0] = 0; | ||
2424 | sdvo_priv->right_property->values[1] = data_value[0]; | ||
2425 | drm_connector_attach_property(connector, | ||
2426 | sdvo_priv->right_property, | ||
2427 | sdvo_priv->right_margin); | ||
2428 | DRM_DEBUG_KMS("h_overscan: max %d, " | ||
2429 | "default %d, current %d\n", | ||
2430 | data_value[0], data_value[1], response); | ||
2431 | } | ||
2432 | if (sdvo_data.overscan_v) { | ||
2433 | intel_sdvo_write_cmd(intel_output, | ||
2434 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); | ||
2435 | status = intel_sdvo_read_response(intel_output, | ||
2436 | &data_value, 4); | ||
2437 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2438 | DRM_DEBUG_KMS("Incorrect SDVO max " | ||
2439 | "v_overscan\n"); | ||
2440 | return; | ||
2441 | } | ||
2442 | intel_sdvo_write_cmd(intel_output, | ||
2443 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); | ||
2444 | status = intel_sdvo_read_response(intel_output, | ||
2445 | &response, 2); | ||
2446 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2447 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); | ||
2448 | return; | ||
2449 | } | ||
2450 | sdvo_priv->max_vscan = data_value[0]; | ||
2451 | sdvo_priv->top_margin = data_value[0] - response; | ||
2452 | sdvo_priv->bottom_margin = sdvo_priv->top_margin; | ||
2453 | sdvo_priv->top_property = | ||
2454 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2455 | "top_margin", 2); | ||
2456 | sdvo_priv->top_property->values[0] = 0; | ||
2457 | sdvo_priv->top_property->values[1] = data_value[0]; | ||
2458 | drm_connector_attach_property(connector, | ||
2459 | sdvo_priv->top_property, | ||
2460 | sdvo_priv->top_margin); | ||
2461 | sdvo_priv->bottom_property = | ||
2462 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2463 | "bottom_margin", 2); | ||
2464 | sdvo_priv->bottom_property->values[0] = 0; | ||
2465 | sdvo_priv->bottom_property->values[1] = data_value[0]; | ||
2466 | drm_connector_attach_property(connector, | ||
2467 | sdvo_priv->bottom_property, | ||
2468 | sdvo_priv->bottom_margin); | ||
2469 | DRM_DEBUG_KMS("v_overscan: max %d, " | ||
2470 | "default %d, current %d\n", | ||
2471 | data_value[0], data_value[1], response); | ||
2472 | } | ||
2473 | if (sdvo_data.position_h) { | ||
2474 | intel_sdvo_write_cmd(intel_output, | ||
2475 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); | ||
2476 | status = intel_sdvo_read_response(intel_output, | ||
2477 | &data_value, 4); | ||
2478 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2479 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); | ||
2480 | return; | ||
2481 | } | ||
2482 | intel_sdvo_write_cmd(intel_output, | ||
2483 | SDVO_CMD_GET_POSITION_H, NULL, 0); | ||
2484 | status = intel_sdvo_read_response(intel_output, | ||
2485 | &response, 2); | ||
2486 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2487 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); | ||
2488 | return; | ||
2489 | } | ||
2490 | sdvo_priv->max_hpos = data_value[0]; | ||
2491 | sdvo_priv->cur_hpos = response; | ||
2492 | sdvo_priv->hpos_property = | ||
2493 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2494 | "hpos", 2); | ||
2495 | sdvo_priv->hpos_property->values[0] = 0; | ||
2496 | sdvo_priv->hpos_property->values[1] = data_value[0]; | ||
2497 | drm_connector_attach_property(connector, | ||
2498 | sdvo_priv->hpos_property, | ||
2499 | sdvo_priv->cur_hpos); | ||
2500 | DRM_DEBUG_KMS("h_position: max %d, " | ||
2501 | "default %d, current %d\n", | ||
2502 | data_value[0], data_value[1], response); | ||
2503 | } | ||
2504 | if (sdvo_data.position_v) { | ||
2505 | intel_sdvo_write_cmd(intel_output, | ||
2506 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); | ||
2507 | status = intel_sdvo_read_response(intel_output, | ||
2508 | &data_value, 4); | ||
2509 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2510 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); | ||
2511 | return; | ||
2512 | } | ||
2513 | intel_sdvo_write_cmd(intel_output, | ||
2514 | SDVO_CMD_GET_POSITION_V, NULL, 0); | ||
2515 | status = intel_sdvo_read_response(intel_output, | ||
2516 | &response, 2); | ||
2517 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2518 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); | ||
2519 | return; | ||
2520 | } | ||
2521 | sdvo_priv->max_vpos = data_value[0]; | ||
2522 | sdvo_priv->cur_vpos = response; | ||
2523 | sdvo_priv->vpos_property = | ||
2524 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2525 | "vpos", 2); | ||
2526 | sdvo_priv->vpos_property->values[0] = 0; | ||
2527 | sdvo_priv->vpos_property->values[1] = data_value[0]; | ||
2528 | drm_connector_attach_property(connector, | ||
2529 | sdvo_priv->vpos_property, | ||
2530 | sdvo_priv->cur_vpos); | ||
2531 | DRM_DEBUG_KMS("v_position: max %d, " | ||
2532 | "default %d, current %d\n", | ||
2533 | data_value[0], data_value[1], response); | ||
2534 | } | ||
2535 | } | ||
2536 | if (sdvo_priv->is_tv) { | ||
2537 | if (sdvo_data.saturation) { | ||
2538 | intel_sdvo_write_cmd(intel_output, | ||
2539 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | ||
2540 | status = intel_sdvo_read_response(intel_output, | ||
2541 | &data_value, 4); | ||
2542 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2543 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); | ||
2544 | return; | ||
2545 | } | ||
2546 | intel_sdvo_write_cmd(intel_output, | ||
2547 | SDVO_CMD_GET_SATURATION, NULL, 0); | ||
2548 | status = intel_sdvo_read_response(intel_output, | ||
2549 | &response, 2); | ||
2550 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2551 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); | ||
2552 | return; | ||
2553 | } | ||
2554 | sdvo_priv->max_saturation = data_value[0]; | ||
2555 | sdvo_priv->cur_saturation = response; | ||
2556 | sdvo_priv->saturation_property = | ||
2557 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2558 | "saturation", 2); | ||
2559 | sdvo_priv->saturation_property->values[0] = 0; | ||
2560 | sdvo_priv->saturation_property->values[1] = | ||
2561 | data_value[0]; | ||
2562 | drm_connector_attach_property(connector, | ||
2563 | sdvo_priv->saturation_property, | ||
2564 | sdvo_priv->cur_saturation); | ||
2565 | DRM_DEBUG_KMS("saturation: max %d, " | ||
2566 | "default %d, current %d\n", | ||
2567 | data_value[0], data_value[1], response); | ||
2568 | } | ||
2569 | if (sdvo_data.contrast) { | ||
2570 | intel_sdvo_write_cmd(intel_output, | ||
2571 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); | ||
2572 | status = intel_sdvo_read_response(intel_output, | ||
2573 | &data_value, 4); | ||
2574 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2575 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); | ||
2576 | return; | ||
2577 | } | ||
2578 | intel_sdvo_write_cmd(intel_output, | ||
2579 | SDVO_CMD_GET_CONTRAST, NULL, 0); | ||
2580 | status = intel_sdvo_read_response(intel_output, | ||
2581 | &response, 2); | ||
2582 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2583 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); | ||
2584 | return; | ||
2585 | } | ||
2586 | sdvo_priv->max_contrast = data_value[0]; | ||
2587 | sdvo_priv->cur_contrast = response; | ||
2588 | sdvo_priv->contrast_property = | ||
2589 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2590 | "contrast", 2); | ||
2591 | sdvo_priv->contrast_property->values[0] = 0; | ||
2592 | sdvo_priv->contrast_property->values[1] = data_value[0]; | ||
2593 | drm_connector_attach_property(connector, | ||
2594 | sdvo_priv->contrast_property, | ||
2595 | sdvo_priv->cur_contrast); | ||
2596 | DRM_DEBUG_KMS("contrast: max %d, " | ||
2597 | "default %d, current %d\n", | ||
2598 | data_value[0], data_value[1], response); | ||
2599 | } | ||
2600 | if (sdvo_data.hue) { | ||
2601 | intel_sdvo_write_cmd(intel_output, | ||
2602 | SDVO_CMD_GET_MAX_HUE, NULL, 0); | ||
2603 | status = intel_sdvo_read_response(intel_output, | ||
2604 | &data_value, 4); | ||
2605 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2606 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); | ||
2607 | return; | ||
2608 | } | ||
2609 | intel_sdvo_write_cmd(intel_output, | ||
2610 | SDVO_CMD_GET_HUE, NULL, 0); | ||
2611 | status = intel_sdvo_read_response(intel_output, | ||
2612 | &response, 2); | ||
2613 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2614 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); | ||
2615 | return; | ||
2616 | } | ||
2617 | sdvo_priv->max_hue = data_value[0]; | ||
2618 | sdvo_priv->cur_hue = response; | ||
2619 | sdvo_priv->hue_property = | ||
2620 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2621 | "hue", 2); | ||
2622 | sdvo_priv->hue_property->values[0] = 0; | ||
2623 | sdvo_priv->hue_property->values[1] = | ||
2624 | data_value[0]; | ||
2625 | drm_connector_attach_property(connector, | ||
2626 | sdvo_priv->hue_property, | ||
2627 | sdvo_priv->cur_hue); | ||
2628 | DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n", | ||
2629 | data_value[0], data_value[1], response); | ||
2630 | } | ||
2631 | } | ||
2632 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | ||
2633 | if (sdvo_data.brightness) { | ||
2634 | intel_sdvo_write_cmd(intel_output, | ||
2635 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | ||
2636 | status = intel_sdvo_read_response(intel_output, | ||
2637 | &data_value, 4); | ||
2638 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2639 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); | ||
2640 | return; | ||
2641 | } | ||
2642 | intel_sdvo_write_cmd(intel_output, | ||
2643 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); | ||
2644 | status = intel_sdvo_read_response(intel_output, | ||
2645 | &response, 2); | ||
2646 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2647 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); | ||
2648 | return; | ||
2649 | } | ||
2650 | sdvo_priv->max_brightness = data_value[0]; | ||
2651 | sdvo_priv->cur_brightness = response; | ||
2652 | sdvo_priv->brightness_property = | ||
2653 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2654 | "brightness", 2); | ||
2655 | sdvo_priv->brightness_property->values[0] = 0; | ||
2656 | sdvo_priv->brightness_property->values[1] = | ||
2657 | data_value[0]; | ||
2658 | drm_connector_attach_property(connector, | ||
2659 | sdvo_priv->brightness_property, | ||
2660 | sdvo_priv->cur_brightness); | ||
2661 | DRM_DEBUG_KMS("brightness: max %d, " | ||
2662 | "default %d, current %d\n", | ||
2663 | data_value[0], data_value[1], response); | ||
2664 | } | ||
2665 | } | ||
2666 | return; | ||
2667 | } | ||
2668 | |||
2179 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2669 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
2180 | { | 2670 | { |
2181 | struct drm_connector *connector; | 2671 | struct drm_connector *connector; |
@@ -2264,6 +2754,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2264 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2754 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
2265 | if (sdvo_priv->is_tv) | 2755 | if (sdvo_priv->is_tv) |
2266 | intel_sdvo_tv_create_property(connector); | 2756 | intel_sdvo_tv_create_property(connector); |
2757 | |||
2758 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) | ||
2759 | intel_sdvo_create_enhance_property(connector); | ||
2760 | |||
2267 | drm_sysfs_connector_add(connector); | 2761 | drm_sysfs_connector_add(connector); |
2268 | 2762 | ||
2269 | intel_sdvo_select_ddc_bus(sdvo_priv); | 2763 | intel_sdvo_select_ddc_bus(sdvo_priv); |
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore new file mode 100644 index 000000000000..403eb3a5891f --- /dev/null +++ b/drivers/gpu/drm/radeon/.gitignore | |||
@@ -0,0 +1,3 @@ | |||
1 | mkregtable | ||
2 | *_reg_safe.h | ||
3 | |||
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h index e2b92c445bab..d4e6e6e4a938 100644 --- a/drivers/gpu/drm/radeon/avivod.h +++ b/drivers/gpu/drm/radeon/avivod.h | |||
@@ -57,13 +57,4 @@ | |||
57 | #define VGA_RENDER_CONTROL 0x0300 | 57 | #define VGA_RENDER_CONTROL 0x0300 |
58 | #define VGA_VSTATUS_CNTL_MASK 0x00030000 | 58 | #define VGA_VSTATUS_CNTL_MASK 0x00030000 |
59 | 59 | ||
60 | /* AVIVO disable VGA rendering */ | ||
61 | static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev) | ||
62 | { | ||
63 | u32 vga_render; | ||
64 | vga_render = RREG32(VGA_RENDER_CONTROL); | ||
65 | vga_render &= ~VGA_VSTATUS_CNTL_MASK; | ||
66 | WREG32(VGA_RENDER_CONTROL, vga_render); | ||
67 | } | ||
68 | |||
69 | #endif | 60 | #endif |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index be51c5f7d0f6..e6cce24de802 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, | |||
863 | void r100_cs_dump_packet(struct radeon_cs_parser *p, | 863 | void r100_cs_dump_packet(struct radeon_cs_parser *p, |
864 | struct radeon_cs_packet *pkt) | 864 | struct radeon_cs_packet *pkt) |
865 | { | 865 | { |
866 | struct radeon_cs_chunk *ib_chunk; | ||
867 | volatile uint32_t *ib; | 866 | volatile uint32_t *ib; |
868 | unsigned i; | 867 | unsigned i; |
869 | unsigned idx; | 868 | unsigned idx; |
870 | 869 | ||
871 | ib = p->ib->ptr; | 870 | ib = p->ib->ptr; |
872 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
873 | idx = pkt->idx; | 871 | idx = pkt->idx; |
874 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { | 872 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { |
875 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); | 873 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); |
@@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
896 | idx, ib_chunk->length_dw); | 894 | idx, ib_chunk->length_dw); |
897 | return -EINVAL; | 895 | return -EINVAL; |
898 | } | 896 | } |
899 | header = ib_chunk->kdata[idx]; | 897 | header = radeon_get_ib_value(p, idx); |
900 | pkt->idx = idx; | 898 | pkt->idx = idx; |
901 | pkt->type = CP_PACKET_GET_TYPE(header); | 899 | pkt->type = CP_PACKET_GET_TYPE(header); |
902 | pkt->count = CP_PACKET_GET_COUNT(header); | 900 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
939 | */ | 937 | */ |
940 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | 938 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) |
941 | { | 939 | { |
942 | struct radeon_cs_chunk *ib_chunk; | ||
943 | struct drm_mode_object *obj; | 940 | struct drm_mode_object *obj; |
944 | struct drm_crtc *crtc; | 941 | struct drm_crtc *crtc; |
945 | struct radeon_crtc *radeon_crtc; | 942 | struct radeon_crtc *radeon_crtc; |
@@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
947 | int crtc_id; | 944 | int crtc_id; |
948 | int r; | 945 | int r; |
949 | uint32_t header, h_idx, reg; | 946 | uint32_t header, h_idx, reg; |
947 | volatile uint32_t *ib; | ||
950 | 948 | ||
951 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 949 | ib = p->ib->ptr; |
952 | 950 | ||
953 | /* parse the wait until */ | 951 | /* parse the wait until */ |
954 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | 952 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); |
@@ -963,24 +961,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
963 | return r; | 961 | return r; |
964 | } | 962 | } |
965 | 963 | ||
966 | if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { | 964 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
967 | DRM_ERROR("vline wait had illegal wait until\n"); | 965 | DRM_ERROR("vline wait had illegal wait until\n"); |
968 | r = -EINVAL; | 966 | r = -EINVAL; |
969 | return r; | 967 | return r; |
970 | } | 968 | } |
971 | 969 | ||
972 | /* jump over the NOP */ | 970 | /* jump over the NOP */ |
973 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | 971 | r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); |
974 | if (r) | 972 | if (r) |
975 | return r; | 973 | return r; |
976 | 974 | ||
977 | h_idx = p->idx - 2; | 975 | h_idx = p->idx - 2; |
978 | p->idx += waitreloc.count; | 976 | p->idx += waitreloc.count + 2; |
979 | p->idx += p3reloc.count; | 977 | p->idx += p3reloc.count + 2; |
980 | 978 | ||
981 | header = ib_chunk->kdata[h_idx]; | 979 | header = radeon_get_ib_value(p, h_idx); |
982 | crtc_id = ib_chunk->kdata[h_idx + 5]; | 980 | crtc_id = radeon_get_ib_value(p, h_idx + 5); |
983 | reg = ib_chunk->kdata[h_idx] >> 2; | 981 | reg = header >> 2; |
984 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | 982 | mutex_lock(&p->rdev->ddev->mode_config.mutex); |
985 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 983 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
986 | if (!obj) { | 984 | if (!obj) { |
@@ -994,16 +992,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
994 | 992 | ||
995 | if (!crtc->enabled) { | 993 | if (!crtc->enabled) { |
996 | /* if the CRTC isn't enabled - we need to nop out the wait until */ | 994 | /* if the CRTC isn't enabled - we need to nop out the wait until */ |
997 | ib_chunk->kdata[h_idx + 2] = PACKET2(0); | 995 | ib[h_idx + 2] = PACKET2(0); |
998 | ib_chunk->kdata[h_idx + 3] = PACKET2(0); | 996 | ib[h_idx + 3] = PACKET2(0); |
999 | } else if (crtc_id == 1) { | 997 | } else if (crtc_id == 1) { |
1000 | switch (reg) { | 998 | switch (reg) { |
1001 | case AVIVO_D1MODE_VLINE_START_END: | 999 | case AVIVO_D1MODE_VLINE_START_END: |
1002 | header &= R300_CP_PACKET0_REG_MASK; | 1000 | header &= ~R300_CP_PACKET0_REG_MASK; |
1003 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; | 1001 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; |
1004 | break; | 1002 | break; |
1005 | case RADEON_CRTC_GUI_TRIG_VLINE: | 1003 | case RADEON_CRTC_GUI_TRIG_VLINE: |
1006 | header &= R300_CP_PACKET0_REG_MASK; | 1004 | header &= ~R300_CP_PACKET0_REG_MASK; |
1007 | header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; | 1005 | header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; |
1008 | break; | 1006 | break; |
1009 | default: | 1007 | default: |
@@ -1011,8 +1009,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1011 | r = -EINVAL; | 1009 | r = -EINVAL; |
1012 | goto out; | 1010 | goto out; |
1013 | } | 1011 | } |
1014 | ib_chunk->kdata[h_idx] = header; | 1012 | ib[h_idx] = header; |
1015 | ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1013 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1016 | } | 1014 | } |
1017 | out: | 1015 | out: |
1018 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | 1016 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); |
@@ -1033,7 +1031,6 @@ out: | |||
1033 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 1031 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
1034 | struct radeon_cs_reloc **cs_reloc) | 1032 | struct radeon_cs_reloc **cs_reloc) |
1035 | { | 1033 | { |
1036 | struct radeon_cs_chunk *ib_chunk; | ||
1037 | struct radeon_cs_chunk *relocs_chunk; | 1034 | struct radeon_cs_chunk *relocs_chunk; |
1038 | struct radeon_cs_packet p3reloc; | 1035 | struct radeon_cs_packet p3reloc; |
1039 | unsigned idx; | 1036 | unsigned idx; |
@@ -1044,7 +1041,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
1044 | return -EINVAL; | 1041 | return -EINVAL; |
1045 | } | 1042 | } |
1046 | *cs_reloc = NULL; | 1043 | *cs_reloc = NULL; |
1047 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1048 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 1044 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
1049 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | 1045 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); |
1050 | if (r) { | 1046 | if (r) { |
@@ -1057,7 +1053,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
1057 | r100_cs_dump_packet(p, &p3reloc); | 1053 | r100_cs_dump_packet(p, &p3reloc); |
1058 | return -EINVAL; | 1054 | return -EINVAL; |
1059 | } | 1055 | } |
1060 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 1056 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
1061 | if (idx >= relocs_chunk->length_dw) { | 1057 | if (idx >= relocs_chunk->length_dw) { |
1062 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 1058 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
1063 | idx, relocs_chunk->length_dw); | 1059 | idx, relocs_chunk->length_dw); |
@@ -1126,7 +1122,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1126 | struct radeon_cs_packet *pkt, | 1122 | struct radeon_cs_packet *pkt, |
1127 | unsigned idx, unsigned reg) | 1123 | unsigned idx, unsigned reg) |
1128 | { | 1124 | { |
1129 | struct radeon_cs_chunk *ib_chunk; | ||
1130 | struct radeon_cs_reloc *reloc; | 1125 | struct radeon_cs_reloc *reloc; |
1131 | struct r100_cs_track *track; | 1126 | struct r100_cs_track *track; |
1132 | volatile uint32_t *ib; | 1127 | volatile uint32_t *ib; |
@@ -1134,11 +1129,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1134 | int r; | 1129 | int r; |
1135 | int i, face; | 1130 | int i, face; |
1136 | u32 tile_flags = 0; | 1131 | u32 tile_flags = 0; |
1132 | u32 idx_value; | ||
1137 | 1133 | ||
1138 | ib = p->ib->ptr; | 1134 | ib = p->ib->ptr; |
1139 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1140 | track = (struct r100_cs_track *)p->track; | 1135 | track = (struct r100_cs_track *)p->track; |
1141 | 1136 | ||
1137 | idx_value = radeon_get_ib_value(p, idx); | ||
1138 | |||
1142 | switch (reg) { | 1139 | switch (reg) { |
1143 | case RADEON_CRTC_GUI_TRIG_VLINE: | 1140 | case RADEON_CRTC_GUI_TRIG_VLINE: |
1144 | r = r100_cs_packet_parse_vline(p); | 1141 | r = r100_cs_packet_parse_vline(p); |
@@ -1166,8 +1163,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1166 | return r; | 1163 | return r; |
1167 | } | 1164 | } |
1168 | track->zb.robj = reloc->robj; | 1165 | track->zb.robj = reloc->robj; |
1169 | track->zb.offset = ib_chunk->kdata[idx]; | 1166 | track->zb.offset = idx_value; |
1170 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1167 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1171 | break; | 1168 | break; |
1172 | case RADEON_RB3D_COLOROFFSET: | 1169 | case RADEON_RB3D_COLOROFFSET: |
1173 | r = r100_cs_packet_next_reloc(p, &reloc); | 1170 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1178,8 +1175,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1178 | return r; | 1175 | return r; |
1179 | } | 1176 | } |
1180 | track->cb[0].robj = reloc->robj; | 1177 | track->cb[0].robj = reloc->robj; |
1181 | track->cb[0].offset = ib_chunk->kdata[idx]; | 1178 | track->cb[0].offset = idx_value; |
1182 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1179 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1183 | break; | 1180 | break; |
1184 | case RADEON_PP_TXOFFSET_0: | 1181 | case RADEON_PP_TXOFFSET_0: |
1185 | case RADEON_PP_TXOFFSET_1: | 1182 | case RADEON_PP_TXOFFSET_1: |
@@ -1192,7 +1189,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1192 | r100_cs_dump_packet(p, pkt); | 1189 | r100_cs_dump_packet(p, pkt); |
1193 | return r; | 1190 | return r; |
1194 | } | 1191 | } |
1195 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1192 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1196 | track->textures[i].robj = reloc->robj; | 1193 | track->textures[i].robj = reloc->robj; |
1197 | break; | 1194 | break; |
1198 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1195 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
@@ -1208,8 +1205,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1208 | r100_cs_dump_packet(p, pkt); | 1205 | r100_cs_dump_packet(p, pkt); |
1209 | return r; | 1206 | return r; |
1210 | } | 1207 | } |
1211 | track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; | 1208 | track->textures[0].cube_info[i].offset = idx_value; |
1212 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1209 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1213 | track->textures[0].cube_info[i].robj = reloc->robj; | 1210 | track->textures[0].cube_info[i].robj = reloc->robj; |
1214 | break; | 1211 | break; |
1215 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1212 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
@@ -1225,8 +1222,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1225 | r100_cs_dump_packet(p, pkt); | 1222 | r100_cs_dump_packet(p, pkt); |
1226 | return r; | 1223 | return r; |
1227 | } | 1224 | } |
1228 | track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; | 1225 | track->textures[1].cube_info[i].offset = idx_value; |
1229 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1226 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1230 | track->textures[1].cube_info[i].robj = reloc->robj; | 1227 | track->textures[1].cube_info[i].robj = reloc->robj; |
1231 | break; | 1228 | break; |
1232 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1229 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
@@ -1242,12 +1239,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1242 | r100_cs_dump_packet(p, pkt); | 1239 | r100_cs_dump_packet(p, pkt); |
1243 | return r; | 1240 | return r; |
1244 | } | 1241 | } |
1245 | track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; | 1242 | track->textures[2].cube_info[i].offset = idx_value; |
1246 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1243 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1247 | track->textures[2].cube_info[i].robj = reloc->robj; | 1244 | track->textures[2].cube_info[i].robj = reloc->robj; |
1248 | break; | 1245 | break; |
1249 | case RADEON_RE_WIDTH_HEIGHT: | 1246 | case RADEON_RE_WIDTH_HEIGHT: |
1250 | track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); | 1247 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1251 | break; | 1248 | break; |
1252 | case RADEON_RB3D_COLORPITCH: | 1249 | case RADEON_RB3D_COLORPITCH: |
1253 | r = r100_cs_packet_next_reloc(p, &reloc); | 1250 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1263,17 +1260,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1263 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1260 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
1264 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 1261 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
1265 | 1262 | ||
1266 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 1263 | tmp = idx_value & ~(0x7 << 16); |
1267 | tmp |= tile_flags; | 1264 | tmp |= tile_flags; |
1268 | ib[idx] = tmp; | 1265 | ib[idx] = tmp; |
1269 | 1266 | ||
1270 | track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; | 1267 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1271 | break; | 1268 | break; |
1272 | case RADEON_RB3D_DEPTHPITCH: | 1269 | case RADEON_RB3D_DEPTHPITCH: |
1273 | track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; | 1270 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1274 | break; | 1271 | break; |
1275 | case RADEON_RB3D_CNTL: | 1272 | case RADEON_RB3D_CNTL: |
1276 | switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1273 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
1277 | case 7: | 1274 | case 7: |
1278 | case 8: | 1275 | case 8: |
1279 | case 9: | 1276 | case 9: |
@@ -1291,13 +1288,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1291 | break; | 1288 | break; |
1292 | default: | 1289 | default: |
1293 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 1290 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
1294 | ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); | 1291 | ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
1295 | return -EINVAL; | 1292 | return -EINVAL; |
1296 | } | 1293 | } |
1297 | track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); | 1294 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1298 | break; | 1295 | break; |
1299 | case RADEON_RB3D_ZSTENCILCNTL: | 1296 | case RADEON_RB3D_ZSTENCILCNTL: |
1300 | switch (ib_chunk->kdata[idx] & 0xf) { | 1297 | switch (idx_value & 0xf) { |
1301 | case 0: | 1298 | case 0: |
1302 | track->zb.cpp = 2; | 1299 | track->zb.cpp = 2; |
1303 | break; | 1300 | break; |
@@ -1321,44 +1318,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1321 | r100_cs_dump_packet(p, pkt); | 1318 | r100_cs_dump_packet(p, pkt); |
1322 | return r; | 1319 | return r; |
1323 | } | 1320 | } |
1324 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1321 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1325 | break; | 1322 | break; |
1326 | case RADEON_PP_CNTL: | 1323 | case RADEON_PP_CNTL: |
1327 | { | 1324 | { |
1328 | uint32_t temp = ib_chunk->kdata[idx] >> 4; | 1325 | uint32_t temp = idx_value >> 4; |
1329 | for (i = 0; i < track->num_texture; i++) | 1326 | for (i = 0; i < track->num_texture; i++) |
1330 | track->textures[i].enabled = !!(temp & (1 << i)); | 1327 | track->textures[i].enabled = !!(temp & (1 << i)); |
1331 | } | 1328 | } |
1332 | break; | 1329 | break; |
1333 | case RADEON_SE_VF_CNTL: | 1330 | case RADEON_SE_VF_CNTL: |
1334 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1331 | track->vap_vf_cntl = idx_value; |
1335 | break; | 1332 | break; |
1336 | case RADEON_SE_VTX_FMT: | 1333 | case RADEON_SE_VTX_FMT: |
1337 | track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); | 1334 | track->vtx_size = r100_get_vtx_size(idx_value); |
1338 | break; | 1335 | break; |
1339 | case RADEON_PP_TEX_SIZE_0: | 1336 | case RADEON_PP_TEX_SIZE_0: |
1340 | case RADEON_PP_TEX_SIZE_1: | 1337 | case RADEON_PP_TEX_SIZE_1: |
1341 | case RADEON_PP_TEX_SIZE_2: | 1338 | case RADEON_PP_TEX_SIZE_2: |
1342 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1339 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1343 | track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; | 1340 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1344 | track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1341 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1345 | break; | 1342 | break; |
1346 | case RADEON_PP_TEX_PITCH_0: | 1343 | case RADEON_PP_TEX_PITCH_0: |
1347 | case RADEON_PP_TEX_PITCH_1: | 1344 | case RADEON_PP_TEX_PITCH_1: |
1348 | case RADEON_PP_TEX_PITCH_2: | 1345 | case RADEON_PP_TEX_PITCH_2: |
1349 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1346 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1350 | track->textures[i].pitch = ib_chunk->kdata[idx] + 32; | 1347 | track->textures[i].pitch = idx_value + 32; |
1351 | break; | 1348 | break; |
1352 | case RADEON_PP_TXFILTER_0: | 1349 | case RADEON_PP_TXFILTER_0: |
1353 | case RADEON_PP_TXFILTER_1: | 1350 | case RADEON_PP_TXFILTER_1: |
1354 | case RADEON_PP_TXFILTER_2: | 1351 | case RADEON_PP_TXFILTER_2: |
1355 | i = (reg - RADEON_PP_TXFILTER_0) / 24; | 1352 | i = (reg - RADEON_PP_TXFILTER_0) / 24; |
1356 | track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) | 1353 | track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK) |
1357 | >> RADEON_MAX_MIP_LEVEL_SHIFT); | 1354 | >> RADEON_MAX_MIP_LEVEL_SHIFT); |
1358 | tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; | 1355 | tmp = (idx_value >> 23) & 0x7; |
1359 | if (tmp == 2 || tmp == 6) | 1356 | if (tmp == 2 || tmp == 6) |
1360 | track->textures[i].roundup_w = false; | 1357 | track->textures[i].roundup_w = false; |
1361 | tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; | 1358 | tmp = (idx_value >> 27) & 0x7; |
1362 | if (tmp == 2 || tmp == 6) | 1359 | if (tmp == 2 || tmp == 6) |
1363 | track->textures[i].roundup_h = false; | 1360 | track->textures[i].roundup_h = false; |
1364 | break; | 1361 | break; |
@@ -1366,16 +1363,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1366 | case RADEON_PP_TXFORMAT_1: | 1363 | case RADEON_PP_TXFORMAT_1: |
1367 | case RADEON_PP_TXFORMAT_2: | 1364 | case RADEON_PP_TXFORMAT_2: |
1368 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; | 1365 | i = (reg - RADEON_PP_TXFORMAT_0) / 24; |
1369 | if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { | 1366 | if (idx_value & RADEON_TXFORMAT_NON_POWER2) { |
1370 | track->textures[i].use_pitch = 1; | 1367 | track->textures[i].use_pitch = 1; |
1371 | } else { | 1368 | } else { |
1372 | track->textures[i].use_pitch = 0; | 1369 | track->textures[i].use_pitch = 0; |
1373 | track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 1370 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
1374 | track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 1371 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
1375 | } | 1372 | } |
1376 | if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) | 1373 | if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) |
1377 | track->textures[i].tex_coord_type = 2; | 1374 | track->textures[i].tex_coord_type = 2; |
1378 | switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { | 1375 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
1379 | case RADEON_TXFORMAT_I8: | 1376 | case RADEON_TXFORMAT_I8: |
1380 | case RADEON_TXFORMAT_RGB332: | 1377 | case RADEON_TXFORMAT_RGB332: |
1381 | case RADEON_TXFORMAT_Y8: | 1378 | case RADEON_TXFORMAT_Y8: |
@@ -1402,13 +1399,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1402 | track->textures[i].cpp = 4; | 1399 | track->textures[i].cpp = 4; |
1403 | break; | 1400 | break; |
1404 | } | 1401 | } |
1405 | track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); | 1402 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1406 | track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); | 1403 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1407 | break; | 1404 | break; |
1408 | case RADEON_PP_CUBIC_FACES_0: | 1405 | case RADEON_PP_CUBIC_FACES_0: |
1409 | case RADEON_PP_CUBIC_FACES_1: | 1406 | case RADEON_PP_CUBIC_FACES_1: |
1410 | case RADEON_PP_CUBIC_FACES_2: | 1407 | case RADEON_PP_CUBIC_FACES_2: |
1411 | tmp = ib_chunk->kdata[idx]; | 1408 | tmp = idx_value; |
1412 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; | 1409 | i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; |
1413 | for (face = 0; face < 4; face++) { | 1410 | for (face = 0; face < 4; face++) { |
1414 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1411 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
@@ -1427,15 +1424,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | |||
1427 | struct radeon_cs_packet *pkt, | 1424 | struct radeon_cs_packet *pkt, |
1428 | struct radeon_object *robj) | 1425 | struct radeon_object *robj) |
1429 | { | 1426 | { |
1430 | struct radeon_cs_chunk *ib_chunk; | ||
1431 | unsigned idx; | 1427 | unsigned idx; |
1432 | 1428 | u32 value; | |
1433 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1434 | idx = pkt->idx + 1; | 1429 | idx = pkt->idx + 1; |
1435 | if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { | 1430 | value = radeon_get_ib_value(p, idx + 2); |
1431 | if ((value + 1) > radeon_object_size(robj)) { | ||
1436 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " | 1432 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
1437 | "(need %u have %lu) !\n", | 1433 | "(need %u have %lu) !\n", |
1438 | ib_chunk->kdata[idx+2] + 1, | 1434 | value + 1, |
1439 | radeon_object_size(robj)); | 1435 | radeon_object_size(robj)); |
1440 | return -EINVAL; | 1436 | return -EINVAL; |
1441 | } | 1437 | } |
@@ -1445,59 +1441,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | |||
1445 | static int r100_packet3_check(struct radeon_cs_parser *p, | 1441 | static int r100_packet3_check(struct radeon_cs_parser *p, |
1446 | struct radeon_cs_packet *pkt) | 1442 | struct radeon_cs_packet *pkt) |
1447 | { | 1443 | { |
1448 | struct radeon_cs_chunk *ib_chunk; | ||
1449 | struct radeon_cs_reloc *reloc; | 1444 | struct radeon_cs_reloc *reloc; |
1450 | struct r100_cs_track *track; | 1445 | struct r100_cs_track *track; |
1451 | unsigned idx; | 1446 | unsigned idx; |
1452 | unsigned i, c; | ||
1453 | volatile uint32_t *ib; | 1447 | volatile uint32_t *ib; |
1454 | int r; | 1448 | int r; |
1455 | 1449 | ||
1456 | ib = p->ib->ptr; | 1450 | ib = p->ib->ptr; |
1457 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1458 | idx = pkt->idx + 1; | 1451 | idx = pkt->idx + 1; |
1459 | track = (struct r100_cs_track *)p->track; | 1452 | track = (struct r100_cs_track *)p->track; |
1460 | switch (pkt->opcode) { | 1453 | switch (pkt->opcode) { |
1461 | case PACKET3_3D_LOAD_VBPNTR: | 1454 | case PACKET3_3D_LOAD_VBPNTR: |
1462 | c = ib_chunk->kdata[idx++]; | 1455 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1463 | track->num_arrays = c; | 1456 | if (r) |
1464 | for (i = 0; i < (c - 1); i += 2, idx += 3) { | 1457 | return r; |
1465 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1466 | if (r) { | ||
1467 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1468 | pkt->opcode); | ||
1469 | r100_cs_dump_packet(p, pkt); | ||
1470 | return r; | ||
1471 | } | ||
1472 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1473 | track->arrays[i + 0].robj = reloc->robj; | ||
1474 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1475 | track->arrays[i + 0].esize &= 0x7F; | ||
1476 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1477 | if (r) { | ||
1478 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1479 | pkt->opcode); | ||
1480 | r100_cs_dump_packet(p, pkt); | ||
1481 | return r; | ||
1482 | } | ||
1483 | ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); | ||
1484 | track->arrays[i + 1].robj = reloc->robj; | ||
1485 | track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; | ||
1486 | track->arrays[i + 1].esize &= 0x7F; | ||
1487 | } | ||
1488 | if (c & 1) { | ||
1489 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1490 | if (r) { | ||
1491 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1492 | pkt->opcode); | ||
1493 | r100_cs_dump_packet(p, pkt); | ||
1494 | return r; | ||
1495 | } | ||
1496 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1497 | track->arrays[i + 0].robj = reloc->robj; | ||
1498 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1499 | track->arrays[i + 0].esize &= 0x7F; | ||
1500 | } | ||
1501 | break; | 1458 | break; |
1502 | case PACKET3_INDX_BUFFER: | 1459 | case PACKET3_INDX_BUFFER: |
1503 | r = r100_cs_packet_next_reloc(p, &reloc); | 1460 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1506,7 +1463,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1506 | r100_cs_dump_packet(p, pkt); | 1463 | r100_cs_dump_packet(p, pkt); |
1507 | return r; | 1464 | return r; |
1508 | } | 1465 | } |
1509 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | 1466 | ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); |
1510 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1467 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1511 | if (r) { | 1468 | if (r) { |
1512 | return r; | 1469 | return r; |
@@ -1520,27 +1477,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1520 | r100_cs_dump_packet(p, pkt); | 1477 | r100_cs_dump_packet(p, pkt); |
1521 | return r; | 1478 | return r; |
1522 | } | 1479 | } |
1523 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1480 | ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); |
1524 | track->num_arrays = 1; | 1481 | track->num_arrays = 1; |
1525 | track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); | 1482 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); |
1526 | 1483 | ||
1527 | track->arrays[0].robj = reloc->robj; | 1484 | track->arrays[0].robj = reloc->robj; |
1528 | track->arrays[0].esize = track->vtx_size; | 1485 | track->arrays[0].esize = track->vtx_size; |
1529 | 1486 | ||
1530 | track->max_indx = ib_chunk->kdata[idx+1]; | 1487 | track->max_indx = radeon_get_ib_value(p, idx+1); |
1531 | 1488 | ||
1532 | track->vap_vf_cntl = ib_chunk->kdata[idx+3]; | 1489 | track->vap_vf_cntl = radeon_get_ib_value(p, idx+3); |
1533 | track->immd_dwords = pkt->count - 1; | 1490 | track->immd_dwords = pkt->count - 1; |
1534 | r = r100_cs_track_check(p->rdev, track); | 1491 | r = r100_cs_track_check(p->rdev, track); |
1535 | if (r) | 1492 | if (r) |
1536 | return r; | 1493 | return r; |
1537 | break; | 1494 | break; |
1538 | case PACKET3_3D_DRAW_IMMD: | 1495 | case PACKET3_3D_DRAW_IMMD: |
1539 | if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { | 1496 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1540 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1497 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1541 | return -EINVAL; | 1498 | return -EINVAL; |
1542 | } | 1499 | } |
1543 | track->vap_vf_cntl = ib_chunk->kdata[idx+1]; | 1500 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1544 | track->immd_dwords = pkt->count - 1; | 1501 | track->immd_dwords = pkt->count - 1; |
1545 | r = r100_cs_track_check(p->rdev, track); | 1502 | r = r100_cs_track_check(p->rdev, track); |
1546 | if (r) | 1503 | if (r) |
@@ -1548,11 +1505,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1548 | break; | 1505 | break; |
1549 | /* triggers drawing using in-packet vertex data */ | 1506 | /* triggers drawing using in-packet vertex data */ |
1550 | case PACKET3_3D_DRAW_IMMD_2: | 1507 | case PACKET3_3D_DRAW_IMMD_2: |
1551 | if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { | 1508 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1552 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1509 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1553 | return -EINVAL; | 1510 | return -EINVAL; |
1554 | } | 1511 | } |
1555 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1512 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1556 | track->immd_dwords = pkt->count; | 1513 | track->immd_dwords = pkt->count; |
1557 | r = r100_cs_track_check(p->rdev, track); | 1514 | r = r100_cs_track_check(p->rdev, track); |
1558 | if (r) | 1515 | if (r) |
@@ -1560,28 +1517,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1560 | break; | 1517 | break; |
1561 | /* triggers drawing using in-packet vertex data */ | 1518 | /* triggers drawing using in-packet vertex data */ |
1562 | case PACKET3_3D_DRAW_VBUF_2: | 1519 | case PACKET3_3D_DRAW_VBUF_2: |
1563 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1520 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1564 | r = r100_cs_track_check(p->rdev, track); | 1521 | r = r100_cs_track_check(p->rdev, track); |
1565 | if (r) | 1522 | if (r) |
1566 | return r; | 1523 | return r; |
1567 | break; | 1524 | break; |
1568 | /* triggers drawing of vertex buffers setup elsewhere */ | 1525 | /* triggers drawing of vertex buffers setup elsewhere */ |
1569 | case PACKET3_3D_DRAW_INDX_2: | 1526 | case PACKET3_3D_DRAW_INDX_2: |
1570 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1527 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1571 | r = r100_cs_track_check(p->rdev, track); | 1528 | r = r100_cs_track_check(p->rdev, track); |
1572 | if (r) | 1529 | if (r) |
1573 | return r; | 1530 | return r; |
1574 | break; | 1531 | break; |
1575 | /* triggers drawing using indices to vertex buffer */ | 1532 | /* triggers drawing using indices to vertex buffer */ |
1576 | case PACKET3_3D_DRAW_VBUF: | 1533 | case PACKET3_3D_DRAW_VBUF: |
1577 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1534 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1578 | r = r100_cs_track_check(p->rdev, track); | 1535 | r = r100_cs_track_check(p->rdev, track); |
1579 | if (r) | 1536 | if (r) |
1580 | return r; | 1537 | return r; |
1581 | break; | 1538 | break; |
1582 | /* triggers drawing of vertex buffers setup elsewhere */ | 1539 | /* triggers drawing of vertex buffers setup elsewhere */ |
1583 | case PACKET3_3D_DRAW_INDX: | 1540 | case PACKET3_3D_DRAW_INDX: |
1584 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1541 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1585 | r = r100_cs_track_check(p->rdev, track); | 1542 | r = r100_cs_track_check(p->rdev, track); |
1586 | if (r) | 1543 | if (r) |
1587 | return r; | 1544 | return r; |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 70a82eda394a..0daf0d76a891 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
84 | struct radeon_cs_packet *pkt, | 84 | struct radeon_cs_packet *pkt, |
85 | unsigned idx, unsigned reg); | 85 | unsigned idx, unsigned reg); |
86 | 86 | ||
87 | |||
88 | |||
87 | static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | 89 | static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, |
88 | struct radeon_cs_packet *pkt, | 90 | struct radeon_cs_packet *pkt, |
89 | unsigned idx, | 91 | unsigned idx, |
@@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
93 | u32 tile_flags = 0; | 95 | u32 tile_flags = 0; |
94 | u32 tmp; | 96 | u32 tmp; |
95 | struct radeon_cs_reloc *reloc; | 97 | struct radeon_cs_reloc *reloc; |
96 | struct radeon_cs_chunk *ib_chunk; | 98 | u32 value; |
97 | |||
98 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
99 | 99 | ||
100 | r = r100_cs_packet_next_reloc(p, &reloc); | 100 | r = r100_cs_packet_next_reloc(p, &reloc); |
101 | if (r) { | 101 | if (r) { |
@@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
104 | r100_cs_dump_packet(p, pkt); | 104 | r100_cs_dump_packet(p, pkt); |
105 | return r; | 105 | return r; |
106 | } | 106 | } |
107 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 107 | value = radeon_get_ib_value(p, idx); |
108 | tmp = value & 0x003fffff; | ||
108 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 109 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
109 | 110 | ||
110 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 111 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
@@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
119 | } | 120 | } |
120 | 121 | ||
121 | tmp |= tile_flags; | 122 | tmp |= tile_flags; |
122 | p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | 123 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
126 | |||
127 | static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | ||
128 | struct radeon_cs_packet *pkt, | ||
129 | int idx) | ||
130 | { | ||
131 | unsigned c, i; | ||
132 | struct radeon_cs_reloc *reloc; | ||
133 | struct r100_cs_track *track; | ||
134 | int r = 0; | ||
135 | volatile uint32_t *ib; | ||
136 | u32 idx_value; | ||
137 | |||
138 | ib = p->ib->ptr; | ||
139 | track = (struct r100_cs_track *)p->track; | ||
140 | c = radeon_get_ib_value(p, idx++) & 0x1F; | ||
141 | track->num_arrays = c; | ||
142 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | ||
143 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
144 | if (r) { | ||
145 | DRM_ERROR("No reloc for packet3 %d\n", | ||
146 | pkt->opcode); | ||
147 | r100_cs_dump_packet(p, pkt); | ||
148 | return r; | ||
149 | } | ||
150 | idx_value = radeon_get_ib_value(p, idx); | ||
151 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
152 | |||
153 | track->arrays[i + 0].esize = idx_value >> 8; | ||
154 | track->arrays[i + 0].robj = reloc->robj; | ||
155 | track->arrays[i + 0].esize &= 0x7F; | ||
156 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
157 | if (r) { | ||
158 | DRM_ERROR("No reloc for packet3 %d\n", | ||
159 | pkt->opcode); | ||
160 | r100_cs_dump_packet(p, pkt); | ||
161 | return r; | ||
162 | } | ||
163 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); | ||
164 | track->arrays[i + 1].robj = reloc->robj; | ||
165 | track->arrays[i + 1].esize = idx_value >> 24; | ||
166 | track->arrays[i + 1].esize &= 0x7F; | ||
167 | } | ||
168 | if (c & 1) { | ||
169 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
170 | if (r) { | ||
171 | DRM_ERROR("No reloc for packet3 %d\n", | ||
172 | pkt->opcode); | ||
173 | r100_cs_dump_packet(p, pkt); | ||
174 | return r; | ||
175 | } | ||
176 | idx_value = radeon_get_ib_value(p, idx); | ||
177 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | ||
178 | track->arrays[i + 0].robj = reloc->robj; | ||
179 | track->arrays[i + 0].esize = idx_value >> 8; | ||
180 | track->arrays[i + 0].esize &= 0x7F; | ||
181 | } | ||
182 | return r; | ||
183 | } | ||
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 568c74bfba3d..cf7fea5ff2e5 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
96 | struct radeon_cs_packet *pkt, | 96 | struct radeon_cs_packet *pkt, |
97 | unsigned idx, unsigned reg) | 97 | unsigned idx, unsigned reg) |
98 | { | 98 | { |
99 | struct radeon_cs_chunk *ib_chunk; | ||
100 | struct radeon_cs_reloc *reloc; | 99 | struct radeon_cs_reloc *reloc; |
101 | struct r100_cs_track *track; | 100 | struct r100_cs_track *track; |
102 | volatile uint32_t *ib; | 101 | volatile uint32_t *ib; |
@@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
105 | int i; | 104 | int i; |
106 | int face; | 105 | int face; |
107 | u32 tile_flags = 0; | 106 | u32 tile_flags = 0; |
107 | u32 idx_value; | ||
108 | 108 | ||
109 | ib = p->ib->ptr; | 109 | ib = p->ib->ptr; |
110 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
111 | track = (struct r100_cs_track *)p->track; | 110 | track = (struct r100_cs_track *)p->track; |
112 | 111 | idx_value = radeon_get_ib_value(p, idx); | |
113 | switch (reg) { | 112 | switch (reg) { |
114 | case RADEON_CRTC_GUI_TRIG_VLINE: | 113 | case RADEON_CRTC_GUI_TRIG_VLINE: |
115 | r = r100_cs_packet_parse_vline(p); | 114 | r = r100_cs_packet_parse_vline(p); |
@@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
137 | return r; | 136 | return r; |
138 | } | 137 | } |
139 | track->zb.robj = reloc->robj; | 138 | track->zb.robj = reloc->robj; |
140 | track->zb.offset = ib_chunk->kdata[idx]; | 139 | track->zb.offset = idx_value; |
141 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 140 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
142 | break; | 141 | break; |
143 | case RADEON_RB3D_COLOROFFSET: | 142 | case RADEON_RB3D_COLOROFFSET: |
144 | r = r100_cs_packet_next_reloc(p, &reloc); | 143 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
149 | return r; | 148 | return r; |
150 | } | 149 | } |
151 | track->cb[0].robj = reloc->robj; | 150 | track->cb[0].robj = reloc->robj; |
152 | track->cb[0].offset = ib_chunk->kdata[idx]; | 151 | track->cb[0].offset = idx_value; |
153 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 152 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
154 | break; | 153 | break; |
155 | case R200_PP_TXOFFSET_0: | 154 | case R200_PP_TXOFFSET_0: |
156 | case R200_PP_TXOFFSET_1: | 155 | case R200_PP_TXOFFSET_1: |
@@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
166 | r100_cs_dump_packet(p, pkt); | 165 | r100_cs_dump_packet(p, pkt); |
167 | return r; | 166 | return r; |
168 | } | 167 | } |
169 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 168 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
170 | track->textures[i].robj = reloc->robj; | 169 | track->textures[i].robj = reloc->robj; |
171 | break; | 170 | break; |
172 | case R200_PP_CUBIC_OFFSET_F1_0: | 171 | case R200_PP_CUBIC_OFFSET_F1_0: |
@@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
208 | r100_cs_dump_packet(p, pkt); | 207 | r100_cs_dump_packet(p, pkt); |
209 | return r; | 208 | return r; |
210 | } | 209 | } |
211 | track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx]; | 210 | track->textures[i].cube_info[face - 1].offset = idx_value; |
212 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 211 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
213 | track->textures[i].cube_info[face - 1].robj = reloc->robj; | 212 | track->textures[i].cube_info[face - 1].robj = reloc->robj; |
214 | break; | 213 | break; |
215 | case RADEON_RE_WIDTH_HEIGHT: | 214 | case RADEON_RE_WIDTH_HEIGHT: |
216 | track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); | 215 | track->maxy = ((idx_value >> 16) & 0x7FF); |
217 | break; | 216 | break; |
218 | case RADEON_RB3D_COLORPITCH: | 217 | case RADEON_RB3D_COLORPITCH: |
219 | r = r100_cs_packet_next_reloc(p, &reloc); | 218 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
229 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 228 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
230 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 229 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
231 | 230 | ||
232 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 231 | tmp = idx_value & ~(0x7 << 16); |
233 | tmp |= tile_flags; | 232 | tmp |= tile_flags; |
234 | ib[idx] = tmp; | 233 | ib[idx] = tmp; |
235 | 234 | ||
236 | track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; | 235 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
237 | break; | 236 | break; |
238 | case RADEON_RB3D_DEPTHPITCH: | 237 | case RADEON_RB3D_DEPTHPITCH: |
239 | track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; | 238 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
240 | break; | 239 | break; |
241 | case RADEON_RB3D_CNTL: | 240 | case RADEON_RB3D_CNTL: |
242 | switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 241 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
243 | case 7: | 242 | case 7: |
244 | case 8: | 243 | case 8: |
245 | case 9: | 244 | case 9: |
@@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
257 | break; | 256 | break; |
258 | default: | 257 | default: |
259 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 258 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
260 | ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); | 259 | ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
261 | return -EINVAL; | 260 | return -EINVAL; |
262 | } | 261 | } |
263 | if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) { | 262 | if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) { |
264 | DRM_ERROR("No support for depth xy offset in kms\n"); | 263 | DRM_ERROR("No support for depth xy offset in kms\n"); |
265 | return -EINVAL; | 264 | return -EINVAL; |
266 | } | 265 | } |
267 | 266 | ||
268 | track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); | 267 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
269 | break; | 268 | break; |
270 | case RADEON_RB3D_ZSTENCILCNTL: | 269 | case RADEON_RB3D_ZSTENCILCNTL: |
271 | switch (ib_chunk->kdata[idx] & 0xf) { | 270 | switch (idx_value & 0xf) { |
272 | case 0: | 271 | case 0: |
273 | track->zb.cpp = 2; | 272 | track->zb.cpp = 2; |
274 | break; | 273 | break; |
@@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
292 | r100_cs_dump_packet(p, pkt); | 291 | r100_cs_dump_packet(p, pkt); |
293 | return r; | 292 | return r; |
294 | } | 293 | } |
295 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 294 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
296 | break; | 295 | break; |
297 | case RADEON_PP_CNTL: | 296 | case RADEON_PP_CNTL: |
298 | { | 297 | { |
299 | uint32_t temp = ib_chunk->kdata[idx] >> 4; | 298 | uint32_t temp = idx_value >> 4; |
300 | for (i = 0; i < track->num_texture; i++) | 299 | for (i = 0; i < track->num_texture; i++) |
301 | track->textures[i].enabled = !!(temp & (1 << i)); | 300 | track->textures[i].enabled = !!(temp & (1 << i)); |
302 | } | 301 | } |
303 | break; | 302 | break; |
304 | case RADEON_SE_VF_CNTL: | 303 | case RADEON_SE_VF_CNTL: |
305 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 304 | track->vap_vf_cntl = idx_value; |
306 | break; | 305 | break; |
307 | case 0x210c: | 306 | case 0x210c: |
308 | /* VAP_VF_MAX_VTX_INDX */ | 307 | /* VAP_VF_MAX_VTX_INDX */ |
309 | track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; | 308 | track->max_indx = idx_value & 0x00FFFFFFUL; |
310 | break; | 309 | break; |
311 | case R200_SE_VTX_FMT_0: | 310 | case R200_SE_VTX_FMT_0: |
312 | track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]); | 311 | track->vtx_size = r200_get_vtx_size_0(idx_value); |
313 | break; | 312 | break; |
314 | case R200_SE_VTX_FMT_1: | 313 | case R200_SE_VTX_FMT_1: |
315 | track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]); | 314 | track->vtx_size += r200_get_vtx_size_1(idx_value); |
316 | break; | 315 | break; |
317 | case R200_PP_TXSIZE_0: | 316 | case R200_PP_TXSIZE_0: |
318 | case R200_PP_TXSIZE_1: | 317 | case R200_PP_TXSIZE_1: |
@@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
321 | case R200_PP_TXSIZE_4: | 320 | case R200_PP_TXSIZE_4: |
322 | case R200_PP_TXSIZE_5: | 321 | case R200_PP_TXSIZE_5: |
323 | i = (reg - R200_PP_TXSIZE_0) / 32; | 322 | i = (reg - R200_PP_TXSIZE_0) / 32; |
324 | track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; | 323 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
325 | track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 324 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
326 | break; | 325 | break; |
327 | case R200_PP_TXPITCH_0: | 326 | case R200_PP_TXPITCH_0: |
328 | case R200_PP_TXPITCH_1: | 327 | case R200_PP_TXPITCH_1: |
@@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
331 | case R200_PP_TXPITCH_4: | 330 | case R200_PP_TXPITCH_4: |
332 | case R200_PP_TXPITCH_5: | 331 | case R200_PP_TXPITCH_5: |
333 | i = (reg - R200_PP_TXPITCH_0) / 32; | 332 | i = (reg - R200_PP_TXPITCH_0) / 32; |
334 | track->textures[i].pitch = ib_chunk->kdata[idx] + 32; | 333 | track->textures[i].pitch = idx_value + 32; |
335 | break; | 334 | break; |
336 | case R200_PP_TXFILTER_0: | 335 | case R200_PP_TXFILTER_0: |
337 | case R200_PP_TXFILTER_1: | 336 | case R200_PP_TXFILTER_1: |
@@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
340 | case R200_PP_TXFILTER_4: | 339 | case R200_PP_TXFILTER_4: |
341 | case R200_PP_TXFILTER_5: | 340 | case R200_PP_TXFILTER_5: |
342 | i = (reg - R200_PP_TXFILTER_0) / 32; | 341 | i = (reg - R200_PP_TXFILTER_0) / 32; |
343 | track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK) | 342 | track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK) |
344 | >> R200_MAX_MIP_LEVEL_SHIFT); | 343 | >> R200_MAX_MIP_LEVEL_SHIFT); |
345 | tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; | 344 | tmp = (idx_value >> 23) & 0x7; |
346 | if (tmp == 2 || tmp == 6) | 345 | if (tmp == 2 || tmp == 6) |
347 | track->textures[i].roundup_w = false; | 346 | track->textures[i].roundup_w = false; |
348 | tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; | 347 | tmp = (idx_value >> 27) & 0x7; |
349 | if (tmp == 2 || tmp == 6) | 348 | if (tmp == 2 || tmp == 6) |
350 | track->textures[i].roundup_h = false; | 349 | track->textures[i].roundup_h = false; |
351 | break; | 350 | break; |
@@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
364 | case R200_PP_TXFORMAT_X_4: | 363 | case R200_PP_TXFORMAT_X_4: |
365 | case R200_PP_TXFORMAT_X_5: | 364 | case R200_PP_TXFORMAT_X_5: |
366 | i = (reg - R200_PP_TXFORMAT_X_0) / 32; | 365 | i = (reg - R200_PP_TXFORMAT_X_0) / 32; |
367 | track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7; | 366 | track->textures[i].txdepth = idx_value & 0x7; |
368 | tmp = (ib_chunk->kdata[idx] >> 16) & 0x3; | 367 | tmp = (idx_value >> 16) & 0x3; |
369 | /* 2D, 3D, CUBE */ | 368 | /* 2D, 3D, CUBE */ |
370 | switch (tmp) { | 369 | switch (tmp) { |
371 | case 0: | 370 | case 0: |
@@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
389 | case R200_PP_TXFORMAT_4: | 388 | case R200_PP_TXFORMAT_4: |
390 | case R200_PP_TXFORMAT_5: | 389 | case R200_PP_TXFORMAT_5: |
391 | i = (reg - R200_PP_TXFORMAT_0) / 32; | 390 | i = (reg - R200_PP_TXFORMAT_0) / 32; |
392 | if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) { | 391 | if (idx_value & R200_TXFORMAT_NON_POWER2) { |
393 | track->textures[i].use_pitch = 1; | 392 | track->textures[i].use_pitch = 1; |
394 | } else { | 393 | } else { |
395 | track->textures[i].use_pitch = 0; | 394 | track->textures[i].use_pitch = 0; |
396 | track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); | 395 | track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
397 | track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); | 396 | track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
398 | } | 397 | } |
399 | switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { | 398 | switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { |
400 | case R200_TXFORMAT_I8: | 399 | case R200_TXFORMAT_I8: |
401 | case R200_TXFORMAT_RGB332: | 400 | case R200_TXFORMAT_RGB332: |
402 | case R200_TXFORMAT_Y8: | 401 | case R200_TXFORMAT_Y8: |
@@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
424 | track->textures[i].cpp = 4; | 423 | track->textures[i].cpp = 4; |
425 | break; | 424 | break; |
426 | } | 425 | } |
427 | track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); | 426 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
428 | track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); | 427 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
429 | break; | 428 | break; |
430 | case R200_PP_CUBIC_FACES_0: | 429 | case R200_PP_CUBIC_FACES_0: |
431 | case R200_PP_CUBIC_FACES_1: | 430 | case R200_PP_CUBIC_FACES_1: |
@@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
433 | case R200_PP_CUBIC_FACES_3: | 432 | case R200_PP_CUBIC_FACES_3: |
434 | case R200_PP_CUBIC_FACES_4: | 433 | case R200_PP_CUBIC_FACES_4: |
435 | case R200_PP_CUBIC_FACES_5: | 434 | case R200_PP_CUBIC_FACES_5: |
436 | tmp = ib_chunk->kdata[idx]; | 435 | tmp = idx_value; |
437 | i = (reg - R200_PP_CUBIC_FACES_0) / 32; | 436 | i = (reg - R200_PP_CUBIC_FACES_0) / 32; |
438 | for (face = 0; face < 4; face++) { | 437 | for (face = 0; face < 4; face++) { |
439 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 438 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index bb151ecdf8fc..1ebea8cc8c93 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -697,17 +697,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
697 | struct radeon_cs_packet *pkt, | 697 | struct radeon_cs_packet *pkt, |
698 | unsigned idx, unsigned reg) | 698 | unsigned idx, unsigned reg) |
699 | { | 699 | { |
700 | struct radeon_cs_chunk *ib_chunk; | ||
701 | struct radeon_cs_reloc *reloc; | 700 | struct radeon_cs_reloc *reloc; |
702 | struct r100_cs_track *track; | 701 | struct r100_cs_track *track; |
703 | volatile uint32_t *ib; | 702 | volatile uint32_t *ib; |
704 | uint32_t tmp, tile_flags = 0; | 703 | uint32_t tmp, tile_flags = 0; |
705 | unsigned i; | 704 | unsigned i; |
706 | int r; | 705 | int r; |
706 | u32 idx_value; | ||
707 | 707 | ||
708 | ib = p->ib->ptr; | 708 | ib = p->ib->ptr; |
709 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
710 | track = (struct r100_cs_track *)p->track; | 709 | track = (struct r100_cs_track *)p->track; |
710 | idx_value = radeon_get_ib_value(p, idx); | ||
711 | |||
711 | switch(reg) { | 712 | switch(reg) { |
712 | case AVIVO_D1MODE_VLINE_START_END: | 713 | case AVIVO_D1MODE_VLINE_START_END: |
713 | case RADEON_CRTC_GUI_TRIG_VLINE: | 714 | case RADEON_CRTC_GUI_TRIG_VLINE: |
@@ -738,8 +739,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
738 | return r; | 739 | return r; |
739 | } | 740 | } |
740 | track->cb[i].robj = reloc->robj; | 741 | track->cb[i].robj = reloc->robj; |
741 | track->cb[i].offset = ib_chunk->kdata[idx]; | 742 | track->cb[i].offset = idx_value; |
742 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 743 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
743 | break; | 744 | break; |
744 | case R300_ZB_DEPTHOFFSET: | 745 | case R300_ZB_DEPTHOFFSET: |
745 | r = r100_cs_packet_next_reloc(p, &reloc); | 746 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -750,8 +751,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
750 | return r; | 751 | return r; |
751 | } | 752 | } |
752 | track->zb.robj = reloc->robj; | 753 | track->zb.robj = reloc->robj; |
753 | track->zb.offset = ib_chunk->kdata[idx]; | 754 | track->zb.offset = idx_value; |
754 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 755 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
755 | break; | 756 | break; |
756 | case R300_TX_OFFSET_0: | 757 | case R300_TX_OFFSET_0: |
757 | case R300_TX_OFFSET_0+4: | 758 | case R300_TX_OFFSET_0+4: |
@@ -777,32 +778,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
777 | r100_cs_dump_packet(p, pkt); | 778 | r100_cs_dump_packet(p, pkt); |
778 | return r; | 779 | return r; |
779 | } | 780 | } |
780 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 781 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
781 | track->textures[i].robj = reloc->robj; | 782 | track->textures[i].robj = reloc->robj; |
782 | break; | 783 | break; |
783 | /* Tracked registers */ | 784 | /* Tracked registers */ |
784 | case 0x2084: | 785 | case 0x2084: |
785 | /* VAP_VF_CNTL */ | 786 | /* VAP_VF_CNTL */ |
786 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 787 | track->vap_vf_cntl = idx_value; |
787 | break; | 788 | break; |
788 | case 0x20B4: | 789 | case 0x20B4: |
789 | /* VAP_VTX_SIZE */ | 790 | /* VAP_VTX_SIZE */ |
790 | track->vtx_size = ib_chunk->kdata[idx] & 0x7F; | 791 | track->vtx_size = idx_value & 0x7F; |
791 | break; | 792 | break; |
792 | case 0x2134: | 793 | case 0x2134: |
793 | /* VAP_VF_MAX_VTX_INDX */ | 794 | /* VAP_VF_MAX_VTX_INDX */ |
794 | track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; | 795 | track->max_indx = idx_value & 0x00FFFFFFUL; |
795 | break; | 796 | break; |
796 | case 0x43E4: | 797 | case 0x43E4: |
797 | /* SC_SCISSOR1 */ | 798 | /* SC_SCISSOR1 */ |
798 | track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; | 799 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
799 | if (p->rdev->family < CHIP_RV515) { | 800 | if (p->rdev->family < CHIP_RV515) { |
800 | track->maxy -= 1440; | 801 | track->maxy -= 1440; |
801 | } | 802 | } |
802 | break; | 803 | break; |
803 | case 0x4E00: | 804 | case 0x4E00: |
804 | /* RB3D_CCTL */ | 805 | /* RB3D_CCTL */ |
805 | track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1; | 806 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
806 | break; | 807 | break; |
807 | case 0x4E38: | 808 | case 0x4E38: |
808 | case 0x4E3C: | 809 | case 0x4E3C: |
@@ -825,13 +826,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
825 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 826 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
826 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | 827 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
827 | 828 | ||
828 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 829 | tmp = idx_value & ~(0x7 << 16); |
829 | tmp |= tile_flags; | 830 | tmp |= tile_flags; |
830 | ib[idx] = tmp; | 831 | ib[idx] = tmp; |
831 | 832 | ||
832 | i = (reg - 0x4E38) >> 2; | 833 | i = (reg - 0x4E38) >> 2; |
833 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; | 834 | track->cb[i].pitch = idx_value & 0x3FFE; |
834 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { | 835 | switch (((idx_value >> 21) & 0xF)) { |
835 | case 9: | 836 | case 9: |
836 | case 11: | 837 | case 11: |
837 | case 12: | 838 | case 12: |
@@ -854,13 +855,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
854 | break; | 855 | break; |
855 | default: | 856 | default: |
856 | DRM_ERROR("Invalid color buffer format (%d) !\n", | 857 | DRM_ERROR("Invalid color buffer format (%d) !\n", |
857 | ((ib_chunk->kdata[idx] >> 21) & 0xF)); | 858 | ((idx_value >> 21) & 0xF)); |
858 | return -EINVAL; | 859 | return -EINVAL; |
859 | } | 860 | } |
860 | break; | 861 | break; |
861 | case 0x4F00: | 862 | case 0x4F00: |
862 | /* ZB_CNTL */ | 863 | /* ZB_CNTL */ |
863 | if (ib_chunk->kdata[idx] & 2) { | 864 | if (idx_value & 2) { |
864 | track->z_enabled = true; | 865 | track->z_enabled = true; |
865 | } else { | 866 | } else { |
866 | track->z_enabled = false; | 867 | track->z_enabled = false; |
@@ -868,7 +869,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
868 | break; | 869 | break; |
869 | case 0x4F10: | 870 | case 0x4F10: |
870 | /* ZB_FORMAT */ | 871 | /* ZB_FORMAT */ |
871 | switch ((ib_chunk->kdata[idx] & 0xF)) { | 872 | switch ((idx_value & 0xF)) { |
872 | case 0: | 873 | case 0: |
873 | case 1: | 874 | case 1: |
874 | track->zb.cpp = 2; | 875 | track->zb.cpp = 2; |
@@ -878,7 +879,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
878 | break; | 879 | break; |
879 | default: | 880 | default: |
880 | DRM_ERROR("Invalid z buffer format (%d) !\n", | 881 | DRM_ERROR("Invalid z buffer format (%d) !\n", |
881 | (ib_chunk->kdata[idx] & 0xF)); | 882 | (idx_value & 0xF)); |
882 | return -EINVAL; | 883 | return -EINVAL; |
883 | } | 884 | } |
884 | break; | 885 | break; |
@@ -897,17 +898,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
897 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 898 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
898 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | 899 | tile_flags |= R300_DEPTHMICROTILE_TILED;; |
899 | 900 | ||
900 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | 901 | tmp = idx_value & ~(0x7 << 16); |
901 | tmp |= tile_flags; | 902 | tmp |= tile_flags; |
902 | ib[idx] = tmp; | 903 | ib[idx] = tmp; |
903 | 904 | ||
904 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; | 905 | track->zb.pitch = idx_value & 0x3FFC; |
905 | break; | 906 | break; |
906 | case 0x4104: | 907 | case 0x4104: |
907 | for (i = 0; i < 16; i++) { | 908 | for (i = 0; i < 16; i++) { |
908 | bool enabled; | 909 | bool enabled; |
909 | 910 | ||
910 | enabled = !!(ib_chunk->kdata[idx] & (1 << i)); | 911 | enabled = !!(idx_value & (1 << i)); |
911 | track->textures[i].enabled = enabled; | 912 | track->textures[i].enabled = enabled; |
912 | } | 913 | } |
913 | break; | 914 | break; |
@@ -929,9 +930,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
929 | case 0x44FC: | 930 | case 0x44FC: |
930 | /* TX_FORMAT1_[0-15] */ | 931 | /* TX_FORMAT1_[0-15] */ |
931 | i = (reg - 0x44C0) >> 2; | 932 | i = (reg - 0x44C0) >> 2; |
932 | tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; | 933 | tmp = (idx_value >> 25) & 0x3; |
933 | track->textures[i].tex_coord_type = tmp; | 934 | track->textures[i].tex_coord_type = tmp; |
934 | switch ((ib_chunk->kdata[idx] & 0x1F)) { | 935 | switch ((idx_value & 0x1F)) { |
935 | case R300_TX_FORMAT_X8: | 936 | case R300_TX_FORMAT_X8: |
936 | case R300_TX_FORMAT_Y4X4: | 937 | case R300_TX_FORMAT_Y4X4: |
937 | case R300_TX_FORMAT_Z3Y3X2: | 938 | case R300_TX_FORMAT_Z3Y3X2: |
@@ -971,7 +972,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
971 | break; | 972 | break; |
972 | default: | 973 | default: |
973 | DRM_ERROR("Invalid texture format %u\n", | 974 | DRM_ERROR("Invalid texture format %u\n", |
974 | (ib_chunk->kdata[idx] & 0x1F)); | 975 | (idx_value & 0x1F)); |
975 | return -EINVAL; | 976 | return -EINVAL; |
976 | break; | 977 | break; |
977 | } | 978 | } |
@@ -994,11 +995,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
994 | case 0x443C: | 995 | case 0x443C: |
995 | /* TX_FILTER0_[0-15] */ | 996 | /* TX_FILTER0_[0-15] */ |
996 | i = (reg - 0x4400) >> 2; | 997 | i = (reg - 0x4400) >> 2; |
997 | tmp = ib_chunk->kdata[idx] & 0x7; | 998 | tmp = idx_value & 0x7; |
998 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 999 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
999 | track->textures[i].roundup_w = false; | 1000 | track->textures[i].roundup_w = false; |
1000 | } | 1001 | } |
1001 | tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; | 1002 | tmp = (idx_value >> 3) & 0x7; |
1002 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 1003 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
1003 | track->textures[i].roundup_h = false; | 1004 | track->textures[i].roundup_h = false; |
1004 | } | 1005 | } |
@@ -1021,12 +1022,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1021 | case 0x453C: | 1022 | case 0x453C: |
1022 | /* TX_FORMAT2_[0-15] */ | 1023 | /* TX_FORMAT2_[0-15] */ |
1023 | i = (reg - 0x4500) >> 2; | 1024 | i = (reg - 0x4500) >> 2; |
1024 | tmp = ib_chunk->kdata[idx] & 0x3FFF; | 1025 | tmp = idx_value & 0x3FFF; |
1025 | track->textures[i].pitch = tmp + 1; | 1026 | track->textures[i].pitch = tmp + 1; |
1026 | if (p->rdev->family >= CHIP_RV515) { | 1027 | if (p->rdev->family >= CHIP_RV515) { |
1027 | tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; | 1028 | tmp = ((idx_value >> 15) & 1) << 11; |
1028 | track->textures[i].width_11 = tmp; | 1029 | track->textures[i].width_11 = tmp; |
1029 | tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; | 1030 | tmp = ((idx_value >> 16) & 1) << 11; |
1030 | track->textures[i].height_11 = tmp; | 1031 | track->textures[i].height_11 = tmp; |
1031 | } | 1032 | } |
1032 | break; | 1033 | break; |
@@ -1048,15 +1049,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1048 | case 0x44BC: | 1049 | case 0x44BC: |
1049 | /* TX_FORMAT0_[0-15] */ | 1050 | /* TX_FORMAT0_[0-15] */ |
1050 | i = (reg - 0x4480) >> 2; | 1051 | i = (reg - 0x4480) >> 2; |
1051 | tmp = ib_chunk->kdata[idx] & 0x7FF; | 1052 | tmp = idx_value & 0x7FF; |
1052 | track->textures[i].width = tmp + 1; | 1053 | track->textures[i].width = tmp + 1; |
1053 | tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; | 1054 | tmp = (idx_value >> 11) & 0x7FF; |
1054 | track->textures[i].height = tmp + 1; | 1055 | track->textures[i].height = tmp + 1; |
1055 | tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; | 1056 | tmp = (idx_value >> 26) & 0xF; |
1056 | track->textures[i].num_levels = tmp; | 1057 | track->textures[i].num_levels = tmp; |
1057 | tmp = ib_chunk->kdata[idx] & (1 << 31); | 1058 | tmp = idx_value & (1 << 31); |
1058 | track->textures[i].use_pitch = !!tmp; | 1059 | track->textures[i].use_pitch = !!tmp; |
1059 | tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; | 1060 | tmp = (idx_value >> 22) & 0xF; |
1060 | track->textures[i].txdepth = tmp; | 1061 | track->textures[i].txdepth = tmp; |
1061 | break; | 1062 | break; |
1062 | case R300_ZB_ZPASS_ADDR: | 1063 | case R300_ZB_ZPASS_ADDR: |
@@ -1067,7 +1068,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1067 | r100_cs_dump_packet(p, pkt); | 1068 | r100_cs_dump_packet(p, pkt); |
1068 | return r; | 1069 | return r; |
1069 | } | 1070 | } |
1070 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1071 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1071 | break; | 1072 | break; |
1072 | case 0x4be8: | 1073 | case 0x4be8: |
1073 | /* valid register only on RV530 */ | 1074 | /* valid register only on RV530 */ |
@@ -1085,60 +1086,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1085 | static int r300_packet3_check(struct radeon_cs_parser *p, | 1086 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1086 | struct radeon_cs_packet *pkt) | 1087 | struct radeon_cs_packet *pkt) |
1087 | { | 1088 | { |
1088 | struct radeon_cs_chunk *ib_chunk; | ||
1089 | |||
1090 | struct radeon_cs_reloc *reloc; | 1089 | struct radeon_cs_reloc *reloc; |
1091 | struct r100_cs_track *track; | 1090 | struct r100_cs_track *track; |
1092 | volatile uint32_t *ib; | 1091 | volatile uint32_t *ib; |
1093 | unsigned idx; | 1092 | unsigned idx; |
1094 | unsigned i, c; | ||
1095 | int r; | 1093 | int r; |
1096 | 1094 | ||
1097 | ib = p->ib->ptr; | 1095 | ib = p->ib->ptr; |
1098 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
1099 | idx = pkt->idx + 1; | 1096 | idx = pkt->idx + 1; |
1100 | track = (struct r100_cs_track *)p->track; | 1097 | track = (struct r100_cs_track *)p->track; |
1101 | switch(pkt->opcode) { | 1098 | switch(pkt->opcode) { |
1102 | case PACKET3_3D_LOAD_VBPNTR: | 1099 | case PACKET3_3D_LOAD_VBPNTR: |
1103 | c = ib_chunk->kdata[idx++] & 0x1F; | 1100 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1104 | track->num_arrays = c; | 1101 | if (r) |
1105 | for (i = 0; i < (c - 1); i+=2, idx+=3) { | 1102 | return r; |
1106 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1107 | if (r) { | ||
1108 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1109 | pkt->opcode); | ||
1110 | r100_cs_dump_packet(p, pkt); | ||
1111 | return r; | ||
1112 | } | ||
1113 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1114 | track->arrays[i + 0].robj = reloc->robj; | ||
1115 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1116 | track->arrays[i + 0].esize &= 0x7F; | ||
1117 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1118 | if (r) { | ||
1119 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1120 | pkt->opcode); | ||
1121 | r100_cs_dump_packet(p, pkt); | ||
1122 | return r; | ||
1123 | } | ||
1124 | ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); | ||
1125 | track->arrays[i + 1].robj = reloc->robj; | ||
1126 | track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; | ||
1127 | track->arrays[i + 1].esize &= 0x7F; | ||
1128 | } | ||
1129 | if (c & 1) { | ||
1130 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1131 | if (r) { | ||
1132 | DRM_ERROR("No reloc for packet3 %d\n", | ||
1133 | pkt->opcode); | ||
1134 | r100_cs_dump_packet(p, pkt); | ||
1135 | return r; | ||
1136 | } | ||
1137 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | ||
1138 | track->arrays[i + 0].robj = reloc->robj; | ||
1139 | track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; | ||
1140 | track->arrays[i + 0].esize &= 0x7F; | ||
1141 | } | ||
1142 | break; | 1103 | break; |
1143 | case PACKET3_INDX_BUFFER: | 1104 | case PACKET3_INDX_BUFFER: |
1144 | r = r100_cs_packet_next_reloc(p, &reloc); | 1105 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1147,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1147 | r100_cs_dump_packet(p, pkt); | 1108 | r100_cs_dump_packet(p, pkt); |
1148 | return r; | 1109 | return r; |
1149 | } | 1110 | } |
1150 | ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); | 1111 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); |
1151 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1112 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1152 | if (r) { | 1113 | if (r) { |
1153 | return r; | 1114 | return r; |
@@ -1158,11 +1119,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1158 | /* Number of dwords is vtx_size * (num_vertices - 1) | 1119 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1159 | * PRIM_WALK must be equal to 3 vertex data in embedded | 1120 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1160 | * in cmd stream */ | 1121 | * in cmd stream */ |
1161 | if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { | 1122 | if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { |
1162 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1123 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1163 | return -EINVAL; | 1124 | return -EINVAL; |
1164 | } | 1125 | } |
1165 | track->vap_vf_cntl = ib_chunk->kdata[idx+1]; | 1126 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1166 | track->immd_dwords = pkt->count - 1; | 1127 | track->immd_dwords = pkt->count - 1; |
1167 | r = r100_cs_track_check(p->rdev, track); | 1128 | r = r100_cs_track_check(p->rdev, track); |
1168 | if (r) { | 1129 | if (r) { |
@@ -1173,11 +1134,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1173 | /* Number of dwords is vtx_size * (num_vertices - 1) | 1134 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1174 | * PRIM_WALK must be equal to 3 vertex data in embedded | 1135 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1175 | * in cmd stream */ | 1136 | * in cmd stream */ |
1176 | if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { | 1137 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1177 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1138 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1178 | return -EINVAL; | 1139 | return -EINVAL; |
1179 | } | 1140 | } |
1180 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1141 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1181 | track->immd_dwords = pkt->count; | 1142 | track->immd_dwords = pkt->count; |
1182 | r = r100_cs_track_check(p->rdev, track); | 1143 | r = r100_cs_track_check(p->rdev, track); |
1183 | if (r) { | 1144 | if (r) { |
@@ -1185,28 +1146,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1185 | } | 1146 | } |
1186 | break; | 1147 | break; |
1187 | case PACKET3_3D_DRAW_VBUF: | 1148 | case PACKET3_3D_DRAW_VBUF: |
1188 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1149 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1189 | r = r100_cs_track_check(p->rdev, track); | 1150 | r = r100_cs_track_check(p->rdev, track); |
1190 | if (r) { | 1151 | if (r) { |
1191 | return r; | 1152 | return r; |
1192 | } | 1153 | } |
1193 | break; | 1154 | break; |
1194 | case PACKET3_3D_DRAW_VBUF_2: | 1155 | case PACKET3_3D_DRAW_VBUF_2: |
1195 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1156 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1196 | r = r100_cs_track_check(p->rdev, track); | 1157 | r = r100_cs_track_check(p->rdev, track); |
1197 | if (r) { | 1158 | if (r) { |
1198 | return r; | 1159 | return r; |
1199 | } | 1160 | } |
1200 | break; | 1161 | break; |
1201 | case PACKET3_3D_DRAW_INDX: | 1162 | case PACKET3_3D_DRAW_INDX: |
1202 | track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; | 1163 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1203 | r = r100_cs_track_check(p->rdev, track); | 1164 | r = r100_cs_track_check(p->rdev, track); |
1204 | if (r) { | 1165 | if (r) { |
1205 | return r; | 1166 | return r; |
1206 | } | 1167 | } |
1207 | break; | 1168 | break; |
1208 | case PACKET3_3D_DRAW_INDX_2: | 1169 | case PACKET3_3D_DRAW_INDX_2: |
1209 | track->vap_vf_cntl = ib_chunk->kdata[idx]; | 1170 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1210 | r = r100_cs_track_check(p->rdev, track); | 1171 | r = r100_cs_track_check(p->rdev, track); |
1211 | if (r) { | 1172 | if (r) { |
1212 | return r; | 1173 | return r; |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index e1d5e0331e19..868add6e166d 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -445,6 +445,8 @@ | |||
445 | #define AVIVO_D1MODE_VBLANK_STATUS 0x6534 | 445 | #define AVIVO_D1MODE_VBLANK_STATUS 0x6534 |
446 | # define AVIVO_VBLANK_ACK (1 << 4) | 446 | # define AVIVO_VBLANK_ACK (1 << 4) |
447 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 | 447 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 |
448 | #define AVIVO_D1MODE_VLINE_STATUS 0x653c | ||
449 | # define AVIVO_D1MODE_VLINE_STAT (1 << 12) | ||
448 | #define AVIVO_DxMODE_INT_MASK 0x6540 | 450 | #define AVIVO_DxMODE_INT_MASK 0x6540 |
449 | # define AVIVO_D1MODE_INT_MASK (1 << 0) | 451 | # define AVIVO_D1MODE_INT_MASK (1 << 0) |
450 | # define AVIVO_D2MODE_INT_MASK (1 << 8) | 452 | # define AVIVO_D2MODE_INT_MASK (1 << 8) |
@@ -502,6 +504,7 @@ | |||
502 | 504 | ||
503 | #define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 | 505 | #define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 |
504 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 | 506 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 |
507 | #define AVIVO_D2MODE_VLINE_STATUS 0x6d3c | ||
505 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 | 508 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 |
506 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 | 509 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 |
507 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 | 510 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index d4b0b9d2e39b..0bf13fccdaf2 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -26,108 +26,13 @@ | |||
26 | * Jerome Glisse | 26 | * Jerome Glisse |
27 | */ | 27 | */ |
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | ||
30 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "atom.h" | ||
31 | #include "r520d.h" | ||
31 | 32 | ||
32 | /* r520,rv530,rv560,rv570,r580 depends on : */ | 33 | /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | ||
34 | void r420_pipes_init(struct radeon_device *rdev); | ||
35 | void rs600_mc_disable_clients(struct radeon_device *rdev); | ||
36 | void rs600_disable_vga(struct radeon_device *rdev); | ||
37 | int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); | ||
38 | int rv515_debugfs_ga_info_init(struct radeon_device *rdev); | ||
39 | 34 | ||
40 | /* This files gather functions specifics to: | 35 | static int r520_mc_wait_for_idle(struct radeon_device *rdev) |
41 | * r520,rv530,rv560,rv570,r580 | ||
42 | * | ||
43 | * Some of these functions might be used by newer ASICs. | ||
44 | */ | ||
45 | void r520_gpu_init(struct radeon_device *rdev); | ||
46 | int r520_mc_wait_for_idle(struct radeon_device *rdev); | ||
47 | |||
48 | |||
49 | /* | ||
50 | * MC | ||
51 | */ | ||
52 | int r520_mc_init(struct radeon_device *rdev) | ||
53 | { | ||
54 | uint32_t tmp; | ||
55 | int r; | ||
56 | |||
57 | if (r100_debugfs_rbbm_init(rdev)) { | ||
58 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
59 | } | ||
60 | if (rv515_debugfs_pipes_info_init(rdev)) { | ||
61 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); | ||
62 | } | ||
63 | if (rv515_debugfs_ga_info_init(rdev)) { | ||
64 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); | ||
65 | } | ||
66 | |||
67 | r520_gpu_init(rdev); | ||
68 | rv370_pcie_gart_disable(rdev); | ||
69 | |||
70 | /* Setup GPU memory space */ | ||
71 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
72 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
73 | if (rdev->flags & RADEON_IS_AGP) { | ||
74 | r = radeon_agp_init(rdev); | ||
75 | if (r) { | ||
76 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
77 | rdev->flags &= ~RADEON_IS_AGP; | ||
78 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
79 | } else { | ||
80 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
81 | } | ||
82 | } | ||
83 | r = radeon_mc_setup(rdev); | ||
84 | if (r) { | ||
85 | return r; | ||
86 | } | ||
87 | |||
88 | /* Program GPU memory space */ | ||
89 | rs600_mc_disable_clients(rdev); | ||
90 | if (r520_mc_wait_for_idle(rdev)) { | ||
91 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
92 | "programming pipes. Bad things might happen.\n"); | ||
93 | } | ||
94 | /* Write VRAM size in case we are limiting it */ | ||
95 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
96 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
97 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); | ||
98 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); | ||
99 | WREG32_MC(R520_MC_FB_LOCATION, tmp); | ||
100 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
101 | WREG32(0x310, rdev->mc.vram_location); | ||
102 | if (rdev->flags & RADEON_IS_AGP) { | ||
103 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | ||
104 | tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16); | ||
105 | tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16); | ||
106 | WREG32_MC(R520_MC_AGP_LOCATION, tmp); | ||
107 | WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base); | ||
108 | WREG32_MC(R520_MC_AGP_BASE_2, 0); | ||
109 | } else { | ||
110 | WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF); | ||
111 | WREG32_MC(R520_MC_AGP_BASE, 0); | ||
112 | WREG32_MC(R520_MC_AGP_BASE_2, 0); | ||
113 | } | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | void r520_mc_fini(struct radeon_device *rdev) | ||
118 | { | ||
119 | } | ||
120 | |||
121 | |||
122 | /* | ||
123 | * Global GPU functions | ||
124 | */ | ||
125 | void r520_errata(struct radeon_device *rdev) | ||
126 | { | ||
127 | rdev->pll_errata = 0; | ||
128 | } | ||
129 | |||
130 | int r520_mc_wait_for_idle(struct radeon_device *rdev) | ||
131 | { | 36 | { |
132 | unsigned i; | 37 | unsigned i; |
133 | uint32_t tmp; | 38 | uint32_t tmp; |
@@ -143,12 +48,12 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev) | |||
143 | return -1; | 48 | return -1; |
144 | } | 49 | } |
145 | 50 | ||
146 | void r520_gpu_init(struct radeon_device *rdev) | 51 | static void r520_gpu_init(struct radeon_device *rdev) |
147 | { | 52 | { |
148 | unsigned pipe_select_current, gb_pipe_select, tmp; | 53 | unsigned pipe_select_current, gb_pipe_select, tmp; |
149 | 54 | ||
150 | r100_hdp_reset(rdev); | 55 | r100_hdp_reset(rdev); |
151 | rs600_disable_vga(rdev); | 56 | rv515_vga_render_disable(rdev); |
152 | /* | 57 | /* |
153 | * DST_PIPE_CONFIG 0x170C | 58 | * DST_PIPE_CONFIG 0x170C |
154 | * GB_TILE_CONFIG 0x4018 | 59 | * GB_TILE_CONFIG 0x4018 |
@@ -186,10 +91,6 @@ void r520_gpu_init(struct radeon_device *rdev) | |||
186 | } | 91 | } |
187 | } | 92 | } |
188 | 93 | ||
189 | |||
190 | /* | ||
191 | * VRAM info | ||
192 | */ | ||
193 | static void r520_vram_get_type(struct radeon_device *rdev) | 94 | static void r520_vram_get_type(struct radeon_device *rdev) |
194 | { | 95 | { |
195 | uint32_t tmp; | 96 | uint32_t tmp; |
@@ -233,7 +134,168 @@ void r520_vram_info(struct radeon_device *rdev) | |||
233 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 134 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
234 | } | 135 | } |
235 | 136 | ||
236 | void r520_bandwidth_update(struct radeon_device *rdev) | 137 | void r520_mc_program(struct radeon_device *rdev) |
138 | { | ||
139 | struct rv515_mc_save save; | ||
140 | |||
141 | /* Stops all mc clients */ | ||
142 | rv515_mc_stop(rdev, &save); | ||
143 | |||
144 | /* Wait for mc idle */ | ||
145 | if (r520_mc_wait_for_idle(rdev)) | ||
146 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
147 | /* Write VRAM size in case we are limiting it */ | ||
148 | WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
149 | /* Program MC, should be a 32bits limited address space */ | ||
150 | WREG32_MC(R_000004_MC_FB_LOCATION, | ||
151 | S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
152 | S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
153 | WREG32(R_000134_HDP_FB_LOCATION, | ||
154 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
155 | if (rdev->flags & RADEON_IS_AGP) { | ||
156 | WREG32_MC(R_000005_MC_AGP_LOCATION, | ||
157 | S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) | | ||
158 | S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); | ||
159 | WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); | ||
160 | WREG32_MC(R_000007_AGP_BASE_2, | ||
161 | S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base))); | ||
162 | } else { | ||
163 | WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF); | ||
164 | WREG32_MC(R_000006_AGP_BASE, 0); | ||
165 | WREG32_MC(R_000007_AGP_BASE_2, 0); | ||
166 | } | ||
167 | |||
168 | rv515_mc_resume(rdev, &save); | ||
169 | } | ||
170 | |||
171 | static int r520_startup(struct radeon_device *rdev) | ||
172 | { | ||
173 | int r; | ||
174 | |||
175 | r520_mc_program(rdev); | ||
176 | /* Resume clock */ | ||
177 | rv515_clock_startup(rdev); | ||
178 | /* Initialize GPU configuration (# pipes, ...) */ | ||
179 | r520_gpu_init(rdev); | ||
180 | /* Initialize GART (initialize after TTM so we can allocate | ||
181 | * memory through TTM but finalize after TTM) */ | ||
182 | if (rdev->flags & RADEON_IS_PCIE) { | ||
183 | r = rv370_pcie_gart_enable(rdev); | ||
184 | if (r) | ||
185 | return r; | ||
186 | } | ||
187 | /* Enable IRQ */ | ||
188 | rdev->irq.sw_int = true; | ||
189 | r100_irq_set(rdev); | ||
190 | /* 1M ring buffer */ | ||
191 | r = r100_cp_init(rdev, 1024 * 1024); | ||
192 | if (r) { | ||
193 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
194 | return r; | ||
195 | } | ||
196 | r = r100_wb_init(rdev); | ||
197 | if (r) | ||
198 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
199 | r = r100_ib_init(rdev); | ||
200 | if (r) { | ||
201 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
202 | return r; | ||
203 | } | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | int r520_resume(struct radeon_device *rdev) | ||
237 | { | 208 | { |
238 | rv515_bandwidth_avivo_update(rdev); | 209 | /* Make sur GART are not working */ |
210 | if (rdev->flags & RADEON_IS_PCIE) | ||
211 | rv370_pcie_gart_disable(rdev); | ||
212 | /* Resume clock before doing reset */ | ||
213 | rv515_clock_startup(rdev); | ||
214 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
215 | if (radeon_gpu_reset(rdev)) { | ||
216 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
217 | RREG32(R_000E40_RBBM_STATUS), | ||
218 | RREG32(R_0007C0_CP_STAT)); | ||
219 | } | ||
220 | /* post */ | ||
221 | atom_asic_init(rdev->mode_info.atom_context); | ||
222 | /* Resume clock after posting */ | ||
223 | rv515_clock_startup(rdev); | ||
224 | return r520_startup(rdev); | ||
225 | } | ||
226 | |||
227 | int r520_init(struct radeon_device *rdev) | ||
228 | { | ||
229 | int r; | ||
230 | |||
231 | rdev->new_init_path = true; | ||
232 | /* Initialize scratch registers */ | ||
233 | radeon_scratch_init(rdev); | ||
234 | /* Initialize surface registers */ | ||
235 | radeon_surface_init(rdev); | ||
236 | /* TODO: disable VGA need to use VGA request */ | ||
237 | /* BIOS*/ | ||
238 | if (!radeon_get_bios(rdev)) { | ||
239 | if (ASIC_IS_AVIVO(rdev)) | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | if (rdev->is_atom_bios) { | ||
243 | r = radeon_atombios_init(rdev); | ||
244 | if (r) | ||
245 | return r; | ||
246 | } else { | ||
247 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
251 | if (radeon_gpu_reset(rdev)) { | ||
252 | dev_warn(rdev->dev, | ||
253 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
254 | RREG32(R_000E40_RBBM_STATUS), | ||
255 | RREG32(R_0007C0_CP_STAT)); | ||
256 | } | ||
257 | /* check if cards are posted or not */ | ||
258 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
259 | DRM_INFO("GPU not posted. posting now...\n"); | ||
260 | atom_asic_init(rdev->mode_info.atom_context); | ||
261 | } | ||
262 | /* Initialize clocks */ | ||
263 | radeon_get_clock_info(rdev->ddev); | ||
264 | /* Get vram informations */ | ||
265 | r520_vram_info(rdev); | ||
266 | /* Initialize memory controller (also test AGP) */ | ||
267 | r = r420_mc_init(rdev); | ||
268 | if (r) | ||
269 | return r; | ||
270 | rv515_debugfs(rdev); | ||
271 | /* Fence driver */ | ||
272 | r = radeon_fence_driver_init(rdev); | ||
273 | if (r) | ||
274 | return r; | ||
275 | r = radeon_irq_kms_init(rdev); | ||
276 | if (r) | ||
277 | return r; | ||
278 | /* Memory manager */ | ||
279 | r = radeon_object_init(rdev); | ||
280 | if (r) | ||
281 | return r; | ||
282 | r = rv370_pcie_gart_init(rdev); | ||
283 | if (r) | ||
284 | return r; | ||
285 | rv515_set_safe_registers(rdev); | ||
286 | rdev->accel_working = true; | ||
287 | r = r520_startup(rdev); | ||
288 | if (r) { | ||
289 | /* Somethings want wront with the accel init stop accel */ | ||
290 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
291 | rv515_suspend(rdev); | ||
292 | r100_cp_fini(rdev); | ||
293 | r100_wb_fini(rdev); | ||
294 | r100_ib_fini(rdev); | ||
295 | rv370_pcie_gart_fini(rdev); | ||
296 | radeon_agp_fini(rdev); | ||
297 | radeon_irq_kms_fini(rdev); | ||
298 | rdev->accel_working = false; | ||
299 | } | ||
300 | return 0; | ||
239 | } | 301 | } |
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h new file mode 100644 index 000000000000..61af61f644bc --- /dev/null +++ b/drivers/gpu/drm/radeon/r520d.h | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __R520D_H__ | ||
29 | #define __R520D_H__ | ||
30 | |||
31 | /* Registers */ | ||
32 | #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 | ||
33 | #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) | ||
34 | #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) | ||
35 | #define C_0000F8_CONFIG_MEMSIZE 0x00000000 | ||
36 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
37 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
38 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
39 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
40 | #define R_0007C0_CP_STAT 0x0007C0 | ||
41 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
42 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
43 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
44 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
45 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
46 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
47 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
48 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
49 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
50 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
51 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
52 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
53 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
54 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
55 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
56 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
57 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
58 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
59 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
60 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
61 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
62 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
63 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
64 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
65 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
66 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
67 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
68 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
69 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
70 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
71 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
72 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
73 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
74 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
75 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
76 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
77 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
78 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
79 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
80 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
81 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
82 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
83 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
84 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
85 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
86 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
87 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
88 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
89 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
90 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
91 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
92 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
93 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
94 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
95 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
96 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
97 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
98 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
99 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
100 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
101 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
102 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
103 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
104 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
105 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
106 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
107 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
108 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
109 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
110 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
111 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
112 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
113 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
114 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
115 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
116 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
117 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
118 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
119 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
120 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
121 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
122 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
123 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
124 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
125 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
126 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
127 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
128 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
129 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
130 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
131 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
132 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
133 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
134 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
135 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
136 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
137 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
138 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
139 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
140 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
141 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
142 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
143 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
144 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
145 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
146 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
147 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
148 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
149 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
150 | #define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28) | ||
151 | #define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1) | ||
152 | #define C_000E40_RBBM_HIBUSY 0xEFFFFFFF | ||
153 | #define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29) | ||
154 | #define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1) | ||
155 | #define C_000E40_SKID_CFBUSY 0xDFFFFFFF | ||
156 | #define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30) | ||
157 | #define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1) | ||
158 | #define C_000E40_VAP_VF_BUSY 0xBFFFFFFF | ||
159 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
160 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
161 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
162 | |||
163 | |||
164 | #define R_000004_MC_FB_LOCATION 0x000004 | ||
165 | #define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
166 | #define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
167 | #define C_000004_MC_FB_START 0xFFFF0000 | ||
168 | #define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
169 | #define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
170 | #define C_000004_MC_FB_TOP 0x0000FFFF | ||
171 | #define R_000005_MC_AGP_LOCATION 0x000005 | ||
172 | #define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
173 | #define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
174 | #define C_000005_MC_AGP_START 0xFFFF0000 | ||
175 | #define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
176 | #define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
177 | #define C_000005_MC_AGP_TOP 0x0000FFFF | ||
178 | #define R_000006_AGP_BASE 0x000006 | ||
179 | #define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
180 | #define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
181 | #define C_000006_AGP_BASE_ADDR 0x00000000 | ||
182 | #define R_000007_AGP_BASE_2 0x000007 | ||
183 | #define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
184 | #define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
185 | #define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
186 | |||
187 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index eab31c1d6df1..2e4e60edbff4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -33,8 +33,8 @@ | |||
33 | #include "radeon.h" | 33 | #include "radeon.h" |
34 | #include "radeon_mode.h" | 34 | #include "radeon_mode.h" |
35 | #include "r600d.h" | 35 | #include "r600d.h" |
36 | #include "avivod.h" | ||
37 | #include "atom.h" | 36 | #include "atom.h" |
37 | #include "avivod.h" | ||
38 | 38 | ||
39 | #define PFP_UCODE_SIZE 576 | 39 | #define PFP_UCODE_SIZE 576 |
40 | #define PM4_UCODE_SIZE 1792 | 40 | #define PM4_UCODE_SIZE 1792 |
@@ -342,7 +342,7 @@ static void r600_mc_resume(struct radeon_device *rdev) | |||
342 | 342 | ||
343 | /* we need to own VRAM, so turn off the VGA renderer here | 343 | /* we need to own VRAM, so turn off the VGA renderer here |
344 | * to stop it overwriting our objects */ | 344 | * to stop it overwriting our objects */ |
345 | radeon_avivo_vga_render_disable(rdev); | 345 | rv515_vga_render_disable(rdev); |
346 | } | 346 | } |
347 | 347 | ||
348 | int r600_mc_init(struct radeon_device *rdev) | 348 | int r600_mc_init(struct radeon_device *rdev) |
@@ -380,6 +380,13 @@ int r600_mc_init(struct radeon_device *rdev) | |||
380 | /* Setup GPU memory space */ | 380 | /* Setup GPU memory space */ |
381 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 381 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
382 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 382 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
383 | |||
384 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
385 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
386 | |||
387 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
388 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
389 | |||
383 | if (rdev->flags & RADEON_IS_AGP) { | 390 | if (rdev->flags & RADEON_IS_AGP) { |
384 | r = radeon_agp_init(rdev); | 391 | r = radeon_agp_init(rdev); |
385 | if (r) | 392 | if (r) |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index dde2ccbf1d15..d988eece0187 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -737,7 +737,7 @@ r600_blit_copy(struct drm_device *dev, | |||
737 | 737 | ||
738 | /* dst */ | 738 | /* dst */ |
739 | set_render_target(dev_priv, COLOR_8_8_8_8, | 739 | set_render_target(dev_priv, COLOR_8_8_8_8, |
740 | dst_x + cur_size, h, | 740 | (dst_x + cur_size) / 4, h, |
741 | dst_gpu_addr); | 741 | dst_gpu_addr); |
742 | 742 | ||
743 | /* scissors */ | 743 | /* scissors */ |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 0a6f4681f468..acae33e2ad51 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -774,7 +774,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
774 | 774 | ||
775 | /* dst 23 */ | 775 | /* dst 23 */ |
776 | set_render_target(rdev, COLOR_8_8_8_8, | 776 | set_render_target(rdev, COLOR_8_8_8_8, |
777 | dst_x + cur_size, h, | 777 | (dst_x + cur_size) / 4, h, |
778 | dst_gpu_addr); | 778 | dst_gpu_addr); |
779 | 779 | ||
780 | /* scissors 12 */ | 780 | /* scissors 12 */ |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 33b89cd8743e..d28970db6a2d 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "r600d.h" | 30 | #include "r600d.h" |
31 | #include "avivod.h" | ||
32 | 31 | ||
33 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | 32 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, |
34 | struct radeon_cs_reloc **cs_reloc); | 33 | struct radeon_cs_reloc **cs_reloc); |
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p, | |||
57 | idx, ib_chunk->length_dw); | 56 | idx, ib_chunk->length_dw); |
58 | return -EINVAL; | 57 | return -EINVAL; |
59 | } | 58 | } |
60 | header = ib_chunk->kdata[idx]; | 59 | header = radeon_get_ib_value(p, idx); |
61 | pkt->idx = idx; | 60 | pkt->idx = idx; |
62 | pkt->type = CP_PACKET_GET_TYPE(header); | 61 | pkt->type = CP_PACKET_GET_TYPE(header); |
63 | pkt->count = CP_PACKET_GET_COUNT(header); | 62 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p, | |||
98 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | 97 | static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, |
99 | struct radeon_cs_reloc **cs_reloc) | 98 | struct radeon_cs_reloc **cs_reloc) |
100 | { | 99 | { |
101 | struct radeon_cs_chunk *ib_chunk; | ||
102 | struct radeon_cs_chunk *relocs_chunk; | 100 | struct radeon_cs_chunk *relocs_chunk; |
103 | struct radeon_cs_packet p3reloc; | 101 | struct radeon_cs_packet p3reloc; |
104 | unsigned idx; | 102 | unsigned idx; |
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
109 | return -EINVAL; | 107 | return -EINVAL; |
110 | } | 108 | } |
111 | *cs_reloc = NULL; | 109 | *cs_reloc = NULL; |
112 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
113 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 110 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
114 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | 111 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); |
115 | if (r) { | 112 | if (r) { |
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
121 | p3reloc.idx); | 118 | p3reloc.idx); |
122 | return -EINVAL; | 119 | return -EINVAL; |
123 | } | 120 | } |
124 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 121 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
125 | if (idx >= relocs_chunk->length_dw) { | 122 | if (idx >= relocs_chunk->length_dw) { |
126 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 123 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
127 | idx, relocs_chunk->length_dw); | 124 | idx, relocs_chunk->length_dw); |
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, | |||
146 | static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | 143 | static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, |
147 | struct radeon_cs_reloc **cs_reloc) | 144 | struct radeon_cs_reloc **cs_reloc) |
148 | { | 145 | { |
149 | struct radeon_cs_chunk *ib_chunk; | ||
150 | struct radeon_cs_chunk *relocs_chunk; | 146 | struct radeon_cs_chunk *relocs_chunk; |
151 | struct radeon_cs_packet p3reloc; | 147 | struct radeon_cs_packet p3reloc; |
152 | unsigned idx; | 148 | unsigned idx; |
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
157 | return -EINVAL; | 153 | return -EINVAL; |
158 | } | 154 | } |
159 | *cs_reloc = NULL; | 155 | *cs_reloc = NULL; |
160 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
161 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 156 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
162 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | 157 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); |
163 | if (r) { | 158 | if (r) { |
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
169 | p3reloc.idx); | 164 | p3reloc.idx); |
170 | return -EINVAL; | 165 | return -EINVAL; |
171 | } | 166 | } |
172 | idx = ib_chunk->kdata[p3reloc.idx + 1]; | 167 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
173 | if (idx >= relocs_chunk->length_dw) { | 168 | if (idx >= relocs_chunk->length_dw) { |
174 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | 169 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
175 | idx, relocs_chunk->length_dw); | 170 | idx, relocs_chunk->length_dw); |
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
181 | return 0; | 176 | return 0; |
182 | } | 177 | } |
183 | 178 | ||
179 | /** | ||
180 | * r600_cs_packet_next_vline() - parse userspace VLINE packet | ||
181 | * @parser: parser structure holding parsing context. | ||
182 | * | ||
183 | * Userspace sends a special sequence for VLINE waits. | ||
184 | * PACKET0 - VLINE_START_END + value | ||
185 | * PACKET3 - WAIT_REG_MEM poll vline status reg | ||
186 | * RELOC (P3) - crtc_id in reloc. | ||
187 | * | ||
188 | * This function parses this and relocates the VLINE START END | ||
189 | * and WAIT_REG_MEM packets to the correct crtc. | ||
190 | * It also detects a switched off crtc and nulls out the | ||
191 | * wait in that case. | ||
192 | */ | ||
193 | static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
194 | { | ||
195 | struct drm_mode_object *obj; | ||
196 | struct drm_crtc *crtc; | ||
197 | struct radeon_crtc *radeon_crtc; | ||
198 | struct radeon_cs_packet p3reloc, wait_reg_mem; | ||
199 | int crtc_id; | ||
200 | int r; | ||
201 | uint32_t header, h_idx, reg, wait_reg_mem_info; | ||
202 | volatile uint32_t *ib; | ||
203 | |||
204 | ib = p->ib->ptr; | ||
205 | |||
206 | /* parse the WAIT_REG_MEM */ | ||
207 | r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); | ||
208 | if (r) | ||
209 | return r; | ||
210 | |||
211 | /* check its a WAIT_REG_MEM */ | ||
212 | if (wait_reg_mem.type != PACKET_TYPE3 || | ||
213 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { | ||
214 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); | ||
215 | r = -EINVAL; | ||
216 | return r; | ||
217 | } | ||
218 | |||
219 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); | ||
220 | /* bit 4 is reg (0) or mem (1) */ | ||
221 | if (wait_reg_mem_info & 0x10) { | ||
222 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); | ||
223 | r = -EINVAL; | ||
224 | return r; | ||
225 | } | ||
226 | /* waiting for value to be equal */ | ||
227 | if ((wait_reg_mem_info & 0x7) != 0x3) { | ||
228 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); | ||
229 | r = -EINVAL; | ||
230 | return r; | ||
231 | } | ||
232 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { | ||
233 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); | ||
234 | r = -EINVAL; | ||
235 | return r; | ||
236 | } | ||
237 | |||
238 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { | ||
239 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); | ||
240 | r = -EINVAL; | ||
241 | return r; | ||
242 | } | ||
243 | |||
244 | /* jump over the NOP */ | ||
245 | r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); | ||
246 | if (r) | ||
247 | return r; | ||
248 | |||
249 | h_idx = p->idx - 2; | ||
250 | p->idx += wait_reg_mem.count + 2; | ||
251 | p->idx += p3reloc.count + 2; | ||
252 | |||
253 | header = radeon_get_ib_value(p, h_idx); | ||
254 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | ||
255 | reg = header >> 2; | ||
256 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
257 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
258 | if (!obj) { | ||
259 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
260 | r = -EINVAL; | ||
261 | goto out; | ||
262 | } | ||
263 | crtc = obj_to_crtc(obj); | ||
264 | radeon_crtc = to_radeon_crtc(crtc); | ||
265 | crtc_id = radeon_crtc->crtc_id; | ||
266 | |||
267 | if (!crtc->enabled) { | ||
268 | /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ | ||
269 | ib[h_idx + 2] = PACKET2(0); | ||
270 | ib[h_idx + 3] = PACKET2(0); | ||
271 | ib[h_idx + 4] = PACKET2(0); | ||
272 | ib[h_idx + 5] = PACKET2(0); | ||
273 | ib[h_idx + 6] = PACKET2(0); | ||
274 | ib[h_idx + 7] = PACKET2(0); | ||
275 | ib[h_idx + 8] = PACKET2(0); | ||
276 | } else if (crtc_id == 1) { | ||
277 | switch (reg) { | ||
278 | case AVIVO_D1MODE_VLINE_START_END: | ||
279 | header &= ~R600_CP_PACKET0_REG_MASK; | ||
280 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; | ||
281 | break; | ||
282 | default: | ||
283 | DRM_ERROR("unknown crtc reloc\n"); | ||
284 | r = -EINVAL; | ||
285 | goto out; | ||
286 | } | ||
287 | ib[h_idx] = header; | ||
288 | ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; | ||
289 | } | ||
290 | out: | ||
291 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
292 | return r; | ||
293 | } | ||
294 | |||
184 | static int r600_packet0_check(struct radeon_cs_parser *p, | 295 | static int r600_packet0_check(struct radeon_cs_parser *p, |
185 | struct radeon_cs_packet *pkt, | 296 | struct radeon_cs_packet *pkt, |
186 | unsigned idx, unsigned reg) | 297 | unsigned idx, unsigned reg) |
187 | { | 298 | { |
299 | int r; | ||
300 | |||
188 | switch (reg) { | 301 | switch (reg) { |
189 | case AVIVO_D1MODE_VLINE_START_END: | 302 | case AVIVO_D1MODE_VLINE_START_END: |
190 | case AVIVO_D2MODE_VLINE_START_END: | 303 | r = r600_cs_packet_parse_vline(p); |
304 | if (r) { | ||
305 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
306 | idx, reg); | ||
307 | return r; | ||
308 | } | ||
191 | break; | 309 | break; |
192 | default: | 310 | default: |
193 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 311 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p, | |||
218 | static int r600_packet3_check(struct radeon_cs_parser *p, | 336 | static int r600_packet3_check(struct radeon_cs_parser *p, |
219 | struct radeon_cs_packet *pkt) | 337 | struct radeon_cs_packet *pkt) |
220 | { | 338 | { |
221 | struct radeon_cs_chunk *ib_chunk; | ||
222 | struct radeon_cs_reloc *reloc; | 339 | struct radeon_cs_reloc *reloc; |
223 | volatile u32 *ib; | 340 | volatile u32 *ib; |
224 | unsigned idx; | 341 | unsigned idx; |
225 | unsigned i; | 342 | unsigned i; |
226 | unsigned start_reg, end_reg, reg; | 343 | unsigned start_reg, end_reg, reg; |
227 | int r; | 344 | int r; |
345 | u32 idx_value; | ||
228 | 346 | ||
229 | ib = p->ib->ptr; | 347 | ib = p->ib->ptr; |
230 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
231 | idx = pkt->idx + 1; | 348 | idx = pkt->idx + 1; |
349 | idx_value = radeon_get_ib_value(p, idx); | ||
350 | |||
232 | switch (pkt->opcode) { | 351 | switch (pkt->opcode) { |
233 | case PACKET3_START_3D_CMDBUF: | 352 | case PACKET3_START_3D_CMDBUF: |
234 | if (p->family >= CHIP_RV770 || pkt->count) { | 353 | if (p->family >= CHIP_RV770 || pkt->count) { |
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
259 | DRM_ERROR("bad DRAW_INDEX\n"); | 378 | DRM_ERROR("bad DRAW_INDEX\n"); |
260 | return -EINVAL; | 379 | return -EINVAL; |
261 | } | 380 | } |
262 | ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 381 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
263 | ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 382 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
264 | break; | 383 | break; |
265 | case PACKET3_DRAW_INDEX_AUTO: | 384 | case PACKET3_DRAW_INDEX_AUTO: |
266 | if (pkt->count != 1) { | 385 | if (pkt->count != 1) { |
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
281 | return -EINVAL; | 400 | return -EINVAL; |
282 | } | 401 | } |
283 | /* bit 4 is reg (0) or mem (1) */ | 402 | /* bit 4 is reg (0) or mem (1) */ |
284 | if (ib_chunk->kdata[idx+0] & 0x10) { | 403 | if (idx_value & 0x10) { |
285 | r = r600_cs_packet_next_reloc(p, &reloc); | 404 | r = r600_cs_packet_next_reloc(p, &reloc); |
286 | if (r) { | 405 | if (r) { |
287 | DRM_ERROR("bad WAIT_REG_MEM\n"); | 406 | DRM_ERROR("bad WAIT_REG_MEM\n"); |
288 | return -EINVAL; | 407 | return -EINVAL; |
289 | } | 408 | } |
290 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 409 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
291 | ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 410 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
292 | } | 411 | } |
293 | break; | 412 | break; |
294 | case PACKET3_SURFACE_SYNC: | 413 | case PACKET3_SURFACE_SYNC: |
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
297 | return -EINVAL; | 416 | return -EINVAL; |
298 | } | 417 | } |
299 | /* 0xffffffff/0x0 is flush all cache flag */ | 418 | /* 0xffffffff/0x0 is flush all cache flag */ |
300 | if (ib_chunk->kdata[idx+1] != 0xffffffff || | 419 | if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || |
301 | ib_chunk->kdata[idx+2] != 0) { | 420 | radeon_get_ib_value(p, idx + 2) != 0) { |
302 | r = r600_cs_packet_next_reloc(p, &reloc); | 421 | r = r600_cs_packet_next_reloc(p, &reloc); |
303 | if (r) { | 422 | if (r) { |
304 | DRM_ERROR("bad SURFACE_SYNC\n"); | 423 | DRM_ERROR("bad SURFACE_SYNC\n"); |
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
319 | return -EINVAL; | 438 | return -EINVAL; |
320 | } | 439 | } |
321 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 440 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
322 | ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 441 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
323 | } | 442 | } |
324 | break; | 443 | break; |
325 | case PACKET3_EVENT_WRITE_EOP: | 444 | case PACKET3_EVENT_WRITE_EOP: |
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
333 | return -EINVAL; | 452 | return -EINVAL; |
334 | } | 453 | } |
335 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 454 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); |
336 | ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 455 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
337 | break; | 456 | break; |
338 | case PACKET3_SET_CONFIG_REG: | 457 | case PACKET3_SET_CONFIG_REG: |
339 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET; | 458 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; |
340 | end_reg = 4 * pkt->count + start_reg - 4; | 459 | end_reg = 4 * pkt->count + start_reg - 4; |
341 | if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || | 460 | if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || |
342 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || | 461 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || |
@@ -356,7 +475,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
356 | } | 475 | } |
357 | break; | 476 | break; |
358 | case PACKET3_SET_CONTEXT_REG: | 477 | case PACKET3_SET_CONTEXT_REG: |
359 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; | 478 | start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; |
360 | end_reg = 4 * pkt->count + start_reg - 4; | 479 | end_reg = 4 * pkt->count + start_reg - 4; |
361 | if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || | 480 | if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || |
362 | (start_reg >= PACKET3_SET_CONTEXT_REG_END) || | 481 | (start_reg >= PACKET3_SET_CONTEXT_REG_END) || |
@@ -421,7 +540,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
421 | DRM_ERROR("bad SET_RESOURCE\n"); | 540 | DRM_ERROR("bad SET_RESOURCE\n"); |
422 | return -EINVAL; | 541 | return -EINVAL; |
423 | } | 542 | } |
424 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET; | 543 | start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; |
425 | end_reg = 4 * pkt->count + start_reg - 4; | 544 | end_reg = 4 * pkt->count + start_reg - 4; |
426 | if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || | 545 | if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || |
427 | (start_reg >= PACKET3_SET_RESOURCE_END) || | 546 | (start_reg >= PACKET3_SET_RESOURCE_END) || |
@@ -430,7 +549,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
430 | return -EINVAL; | 549 | return -EINVAL; |
431 | } | 550 | } |
432 | for (i = 0; i < (pkt->count / 7); i++) { | 551 | for (i = 0; i < (pkt->count / 7); i++) { |
433 | switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) { | 552 | switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { |
434 | case SQ_TEX_VTX_VALID_TEXTURE: | 553 | case SQ_TEX_VTX_VALID_TEXTURE: |
435 | /* tex base */ | 554 | /* tex base */ |
436 | r = r600_cs_packet_next_reloc(p, &reloc); | 555 | r = r600_cs_packet_next_reloc(p, &reloc); |
@@ -455,7 +574,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
455 | return -EINVAL; | 574 | return -EINVAL; |
456 | } | 575 | } |
457 | ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); | 576 | ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); |
458 | ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 577 | ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; |
459 | break; | 578 | break; |
460 | case SQ_TEX_VTX_INVALID_TEXTURE: | 579 | case SQ_TEX_VTX_INVALID_TEXTURE: |
461 | case SQ_TEX_VTX_INVALID_BUFFER: | 580 | case SQ_TEX_VTX_INVALID_BUFFER: |
@@ -466,7 +585,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
466 | } | 585 | } |
467 | break; | 586 | break; |
468 | case PACKET3_SET_ALU_CONST: | 587 | case PACKET3_SET_ALU_CONST: |
469 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET; | 588 | start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; |
470 | end_reg = 4 * pkt->count + start_reg - 4; | 589 | end_reg = 4 * pkt->count + start_reg - 4; |
471 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || | 590 | if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || |
472 | (start_reg >= PACKET3_SET_ALU_CONST_END) || | 591 | (start_reg >= PACKET3_SET_ALU_CONST_END) || |
@@ -476,7 +595,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
476 | } | 595 | } |
477 | break; | 596 | break; |
478 | case PACKET3_SET_BOOL_CONST: | 597 | case PACKET3_SET_BOOL_CONST: |
479 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET; | 598 | start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; |
480 | end_reg = 4 * pkt->count + start_reg - 4; | 599 | end_reg = 4 * pkt->count + start_reg - 4; |
481 | if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || | 600 | if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || |
482 | (start_reg >= PACKET3_SET_BOOL_CONST_END) || | 601 | (start_reg >= PACKET3_SET_BOOL_CONST_END) || |
@@ -486,7 +605,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
486 | } | 605 | } |
487 | break; | 606 | break; |
488 | case PACKET3_SET_LOOP_CONST: | 607 | case PACKET3_SET_LOOP_CONST: |
489 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET; | 608 | start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; |
490 | end_reg = 4 * pkt->count + start_reg - 4; | 609 | end_reg = 4 * pkt->count + start_reg - 4; |
491 | if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || | 610 | if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || |
492 | (start_reg >= PACKET3_SET_LOOP_CONST_END) || | 611 | (start_reg >= PACKET3_SET_LOOP_CONST_END) || |
@@ -496,7 +615,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
496 | } | 615 | } |
497 | break; | 616 | break; |
498 | case PACKET3_SET_CTL_CONST: | 617 | case PACKET3_SET_CTL_CONST: |
499 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET; | 618 | start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; |
500 | end_reg = 4 * pkt->count + start_reg - 4; | 619 | end_reg = 4 * pkt->count + start_reg - 4; |
501 | if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || | 620 | if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || |
502 | (start_reg >= PACKET3_SET_CTL_CONST_END) || | 621 | (start_reg >= PACKET3_SET_CTL_CONST_END) || |
@@ -510,7 +629,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
510 | DRM_ERROR("bad SET_SAMPLER\n"); | 629 | DRM_ERROR("bad SET_SAMPLER\n"); |
511 | return -EINVAL; | 630 | return -EINVAL; |
512 | } | 631 | } |
513 | start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET; | 632 | start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; |
514 | end_reg = 4 * pkt->count + start_reg - 4; | 633 | end_reg = 4 * pkt->count + start_reg - 4; |
515 | if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || | 634 | if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || |
516 | (start_reg >= PACKET3_SET_SAMPLER_END) || | 635 | (start_reg >= PACKET3_SET_SAMPLER_END) || |
@@ -602,6 +721,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
602 | kfree(parser->relocs); | 721 | kfree(parser->relocs); |
603 | for (i = 0; i < parser->nchunks; i++) { | 722 | for (i = 0; i < parser->nchunks; i++) { |
604 | kfree(parser->chunks[i].kdata); | 723 | kfree(parser->chunks[i].kdata); |
724 | kfree(parser->chunks[i].kpage[0]); | ||
725 | kfree(parser->chunks[i].kpage[1]); | ||
605 | } | 726 | } |
606 | kfree(parser->chunks); | 727 | kfree(parser->chunks); |
607 | kfree(parser->chunks_array); | 728 | kfree(parser->chunks_array); |
@@ -639,7 +760,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
639 | * uncached). */ | 760 | * uncached). */ |
640 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 761 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
641 | parser.ib->length_dw = ib_chunk->length_dw; | 762 | parser.ib->length_dw = ib_chunk->length_dw; |
642 | memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4); | ||
643 | *l = parser.ib->length_dw; | 763 | *l = parser.ib->length_dw; |
644 | r = r600_cs_parse(&parser); | 764 | r = r600_cs_parse(&parser); |
645 | if (r) { | 765 | if (r) { |
@@ -647,6 +767,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
647 | r600_cs_parser_fini(&parser, r); | 767 | r600_cs_parser_fini(&parser, r); |
648 | return r; | 768 | return r; |
649 | } | 769 | } |
770 | r = radeon_cs_finish_pages(&parser); | ||
771 | if (r) { | ||
772 | DRM_ERROR("Invalid command stream !\n"); | ||
773 | r600_cs_parser_fini(&parser, r); | ||
774 | return r; | ||
775 | } | ||
650 | r600_cs_parser_fini(&parser, r); | 776 | r600_cs_parser_fini(&parser, r); |
651 | return r; | 777 | return r; |
652 | } | 778 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c839b608970f..950b346e343f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -44,11 +44,30 @@ | |||
44 | * - TESTING, TESTING, TESTING | 44 | * - TESTING, TESTING, TESTING |
45 | */ | 45 | */ |
46 | 46 | ||
47 | /* Initialization path: | ||
48 | * We expect that acceleration initialization might fail for various | ||
49 | * reasons even thought we work hard to make it works on most | ||
50 | * configurations. In order to still have a working userspace in such | ||
51 | * situation the init path must succeed up to the memory controller | ||
52 | * initialization point. Failure before this point are considered as | ||
53 | * fatal error. Here is the init callchain : | ||
54 | * radeon_device_init perform common structure, mutex initialization | ||
55 | * asic_init setup the GPU memory layout and perform all | ||
56 | * one time initialization (failure in this | ||
57 | * function are considered fatal) | ||
58 | * asic_startup setup the GPU acceleration, in order to | ||
59 | * follow guideline the first thing this | ||
60 | * function should do is setting the GPU | ||
61 | * memory controller (only MC setup failure | ||
62 | * are considered as fatal) | ||
63 | */ | ||
64 | |||
47 | #include <asm/atomic.h> | 65 | #include <asm/atomic.h> |
48 | #include <linux/wait.h> | 66 | #include <linux/wait.h> |
49 | #include <linux/list.h> | 67 | #include <linux/list.h> |
50 | #include <linux/kref.h> | 68 | #include <linux/kref.h> |
51 | 69 | ||
70 | #include "radeon_family.h" | ||
52 | #include "radeon_mode.h" | 71 | #include "radeon_mode.h" |
53 | #include "radeon_reg.h" | 72 | #include "radeon_reg.h" |
54 | 73 | ||
@@ -77,64 +96,6 @@ extern int radeon_tv; | |||
77 | #define RADEONFB_CONN_LIMIT 4 | 96 | #define RADEONFB_CONN_LIMIT 4 |
78 | #define RADEON_BIOS_NUM_SCRATCH 8 | 97 | #define RADEON_BIOS_NUM_SCRATCH 8 |
79 | 98 | ||
80 | enum radeon_family { | ||
81 | CHIP_R100, | ||
82 | CHIP_RV100, | ||
83 | CHIP_RS100, | ||
84 | CHIP_RV200, | ||
85 | CHIP_RS200, | ||
86 | CHIP_R200, | ||
87 | CHIP_RV250, | ||
88 | CHIP_RS300, | ||
89 | CHIP_RV280, | ||
90 | CHIP_R300, | ||
91 | CHIP_R350, | ||
92 | CHIP_RV350, | ||
93 | CHIP_RV380, | ||
94 | CHIP_R420, | ||
95 | CHIP_R423, | ||
96 | CHIP_RV410, | ||
97 | CHIP_RS400, | ||
98 | CHIP_RS480, | ||
99 | CHIP_RS600, | ||
100 | CHIP_RS690, | ||
101 | CHIP_RS740, | ||
102 | CHIP_RV515, | ||
103 | CHIP_R520, | ||
104 | CHIP_RV530, | ||
105 | CHIP_RV560, | ||
106 | CHIP_RV570, | ||
107 | CHIP_R580, | ||
108 | CHIP_R600, | ||
109 | CHIP_RV610, | ||
110 | CHIP_RV630, | ||
111 | CHIP_RV670, | ||
112 | CHIP_RV620, | ||
113 | CHIP_RV635, | ||
114 | CHIP_RS780, | ||
115 | CHIP_RS880, | ||
116 | CHIP_RV770, | ||
117 | CHIP_RV730, | ||
118 | CHIP_RV710, | ||
119 | CHIP_RV740, | ||
120 | CHIP_LAST, | ||
121 | }; | ||
122 | |||
123 | enum radeon_chip_flags { | ||
124 | RADEON_FAMILY_MASK = 0x0000ffffUL, | ||
125 | RADEON_FLAGS_MASK = 0xffff0000UL, | ||
126 | RADEON_IS_MOBILITY = 0x00010000UL, | ||
127 | RADEON_IS_IGP = 0x00020000UL, | ||
128 | RADEON_SINGLE_CRTC = 0x00040000UL, | ||
129 | RADEON_IS_AGP = 0x00080000UL, | ||
130 | RADEON_HAS_HIERZ = 0x00100000UL, | ||
131 | RADEON_IS_PCIE = 0x00200000UL, | ||
132 | RADEON_NEW_MEMMAP = 0x00400000UL, | ||
133 | RADEON_IS_PCI = 0x00800000UL, | ||
134 | RADEON_IS_IGPGART = 0x01000000UL, | ||
135 | }; | ||
136 | |||
137 | |||
138 | /* | 99 | /* |
139 | * Errata workarounds. | 100 | * Errata workarounds. |
140 | */ | 101 | */ |
@@ -399,7 +360,7 @@ struct radeon_ib { | |||
399 | unsigned long idx; | 360 | unsigned long idx; |
400 | uint64_t gpu_addr; | 361 | uint64_t gpu_addr; |
401 | struct radeon_fence *fence; | 362 | struct radeon_fence *fence; |
402 | volatile uint32_t *ptr; | 363 | uint32_t *ptr; |
403 | uint32_t length_dw; | 364 | uint32_t length_dw; |
404 | }; | 365 | }; |
405 | 366 | ||
@@ -472,7 +433,12 @@ struct radeon_cs_reloc { | |||
472 | struct radeon_cs_chunk { | 433 | struct radeon_cs_chunk { |
473 | uint32_t chunk_id; | 434 | uint32_t chunk_id; |
474 | uint32_t length_dw; | 435 | uint32_t length_dw; |
436 | int kpage_idx[2]; | ||
437 | uint32_t *kpage[2]; | ||
475 | uint32_t *kdata; | 438 | uint32_t *kdata; |
439 | void __user *user_ptr; | ||
440 | int last_copied_page; | ||
441 | int last_page_index; | ||
476 | }; | 442 | }; |
477 | 443 | ||
478 | struct radeon_cs_parser { | 444 | struct radeon_cs_parser { |
@@ -495,8 +461,38 @@ struct radeon_cs_parser { | |||
495 | struct radeon_ib *ib; | 461 | struct radeon_ib *ib; |
496 | void *track; | 462 | void *track; |
497 | unsigned family; | 463 | unsigned family; |
464 | int parser_error; | ||
498 | }; | 465 | }; |
499 | 466 | ||
467 | extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); | ||
468 | extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); | ||
469 | |||
470 | |||
471 | static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) | ||
472 | { | ||
473 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
474 | u32 pg_idx, pg_offset; | ||
475 | u32 idx_value = 0; | ||
476 | int new_page; | ||
477 | |||
478 | pg_idx = (idx * 4) / PAGE_SIZE; | ||
479 | pg_offset = (idx * 4) % PAGE_SIZE; | ||
480 | |||
481 | if (ibc->kpage_idx[0] == pg_idx) | ||
482 | return ibc->kpage[0][pg_offset/4]; | ||
483 | if (ibc->kpage_idx[1] == pg_idx) | ||
484 | return ibc->kpage[1][pg_offset/4]; | ||
485 | |||
486 | new_page = radeon_cs_update_pages(p, pg_idx); | ||
487 | if (new_page < 0) { | ||
488 | p->parser_error = new_page; | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | idx_value = ibc->kpage[new_page][pg_offset/4]; | ||
493 | return idx_value; | ||
494 | } | ||
495 | |||
500 | struct radeon_cs_packet { | 496 | struct radeon_cs_packet { |
501 | unsigned idx; | 497 | unsigned idx; |
502 | unsigned type; | 498 | unsigned type; |
@@ -1000,6 +996,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev); | |||
1000 | extern void radeon_scratch_init(struct radeon_device *rdev); | 996 | extern void radeon_scratch_init(struct radeon_device *rdev); |
1001 | extern void radeon_surface_init(struct radeon_device *rdev); | 997 | extern void radeon_surface_init(struct radeon_device *rdev); |
1002 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); | 998 | extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
999 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | ||
1003 | 1000 | ||
1004 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 1001 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
1005 | struct r100_mc_save { | 1002 | struct r100_mc_save { |
@@ -1031,6 +1028,9 @@ extern void r100_vram_init_sizes(struct radeon_device *rdev); | |||
1031 | extern void r100_wb_disable(struct radeon_device *rdev); | 1028 | extern void r100_wb_disable(struct radeon_device *rdev); |
1032 | extern void r100_wb_fini(struct radeon_device *rdev); | 1029 | extern void r100_wb_fini(struct radeon_device *rdev); |
1033 | extern int r100_wb_init(struct radeon_device *rdev); | 1030 | extern int r100_wb_init(struct radeon_device *rdev); |
1031 | extern void r100_hdp_reset(struct radeon_device *rdev); | ||
1032 | extern int r100_rb2d_reset(struct radeon_device *rdev); | ||
1033 | extern int r100_cp_reset(struct radeon_device *rdev); | ||
1034 | 1034 | ||
1035 | /* r300,r350,rv350,rv370,rv380 */ | 1035 | /* r300,r350,rv350,rv370,rv380 */ |
1036 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 1036 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
@@ -1042,12 +1042,29 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | |||
1042 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); | 1042 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); |
1043 | 1043 | ||
1044 | /* r420,r423,rv410 */ | 1044 | /* r420,r423,rv410 */ |
1045 | extern int r420_mc_init(struct radeon_device *rdev); | ||
1045 | extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); | 1046 | extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); |
1046 | extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); | 1047 | extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
1047 | extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); | 1048 | extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); |
1049 | extern void r420_pipes_init(struct radeon_device *rdev); | ||
1048 | 1050 | ||
1049 | /* rv515 */ | 1051 | /* rv515 */ |
1052 | struct rv515_mc_save { | ||
1053 | u32 d1vga_control; | ||
1054 | u32 d2vga_control; | ||
1055 | u32 vga_render_control; | ||
1056 | u32 vga_hdp_control; | ||
1057 | u32 d1crtc_control; | ||
1058 | u32 d2crtc_control; | ||
1059 | }; | ||
1050 | extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | 1060 | extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); |
1061 | extern void rv515_vga_render_disable(struct radeon_device *rdev); | ||
1062 | extern void rv515_set_safe_registers(struct radeon_device *rdev); | ||
1063 | extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
1064 | extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
1065 | extern void rv515_clock_startup(struct radeon_device *rdev); | ||
1066 | extern void rv515_debugfs(struct radeon_device *rdev); | ||
1067 | extern int rv515_suspend(struct radeon_device *rdev); | ||
1051 | 1068 | ||
1052 | /* rs690, rs740 */ | 1069 | /* rs690, rs740 */ |
1053 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | 1070 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 8968f78fa1e3..c8a4e7b5663d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -420,41 +420,43 @@ static struct radeon_asic rs690_asic = { | |||
420 | * rv515 | 420 | * rv515 |
421 | */ | 421 | */ |
422 | int rv515_init(struct radeon_device *rdev); | 422 | int rv515_init(struct radeon_device *rdev); |
423 | void rv515_errata(struct radeon_device *rdev); | 423 | void rv515_fini(struct radeon_device *rdev); |
424 | void rv515_vram_info(struct radeon_device *rdev); | ||
425 | int rv515_gpu_reset(struct radeon_device *rdev); | 424 | int rv515_gpu_reset(struct radeon_device *rdev); |
426 | int rv515_mc_init(struct radeon_device *rdev); | ||
427 | void rv515_mc_fini(struct radeon_device *rdev); | ||
428 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 425 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
429 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 426 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
430 | void rv515_ring_start(struct radeon_device *rdev); | 427 | void rv515_ring_start(struct radeon_device *rdev); |
431 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 428 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
432 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 429 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
433 | void rv515_bandwidth_update(struct radeon_device *rdev); | 430 | void rv515_bandwidth_update(struct radeon_device *rdev); |
431 | int rv515_resume(struct radeon_device *rdev); | ||
432 | int rv515_suspend(struct radeon_device *rdev); | ||
434 | static struct radeon_asic rv515_asic = { | 433 | static struct radeon_asic rv515_asic = { |
435 | .init = &rv515_init, | 434 | .init = &rv515_init, |
436 | .errata = &rv515_errata, | 435 | .fini = &rv515_fini, |
437 | .vram_info = &rv515_vram_info, | 436 | .suspend = &rv515_suspend, |
437 | .resume = &rv515_resume, | ||
438 | .errata = NULL, | ||
439 | .vram_info = NULL, | ||
438 | .vga_set_state = &r100_vga_set_state, | 440 | .vga_set_state = &r100_vga_set_state, |
439 | .gpu_reset = &rv515_gpu_reset, | 441 | .gpu_reset = &rv515_gpu_reset, |
440 | .mc_init = &rv515_mc_init, | 442 | .mc_init = NULL, |
441 | .mc_fini = &rv515_mc_fini, | 443 | .mc_fini = NULL, |
442 | .wb_init = &r100_wb_init, | 444 | .wb_init = NULL, |
443 | .wb_fini = &r100_wb_fini, | 445 | .wb_fini = NULL, |
444 | .gart_init = &rv370_pcie_gart_init, | 446 | .gart_init = &rv370_pcie_gart_init, |
445 | .gart_fini = &rv370_pcie_gart_fini, | 447 | .gart_fini = &rv370_pcie_gart_fini, |
446 | .gart_enable = &rv370_pcie_gart_enable, | 448 | .gart_enable = NULL, |
447 | .gart_disable = &rv370_pcie_gart_disable, | 449 | .gart_disable = NULL, |
448 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 450 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
449 | .gart_set_page = &rv370_pcie_gart_set_page, | 451 | .gart_set_page = &rv370_pcie_gart_set_page, |
450 | .cp_init = &r100_cp_init, | 452 | .cp_init = NULL, |
451 | .cp_fini = &r100_cp_fini, | 453 | .cp_fini = NULL, |
452 | .cp_disable = &r100_cp_disable, | 454 | .cp_disable = NULL, |
453 | .cp_commit = &r100_cp_commit, | 455 | .cp_commit = &r100_cp_commit, |
454 | .ring_start = &rv515_ring_start, | 456 | .ring_start = &rv515_ring_start, |
455 | .ring_test = &r100_ring_test, | 457 | .ring_test = &r100_ring_test, |
456 | .ring_ib_execute = &r100_ring_ib_execute, | 458 | .ring_ib_execute = &r100_ring_ib_execute, |
457 | .ib_test = &r100_ib_test, | 459 | .ib_test = NULL, |
458 | .irq_set = &rs600_irq_set, | 460 | .irq_set = &rs600_irq_set, |
459 | .irq_process = &rs600_irq_process, | 461 | .irq_process = &rs600_irq_process, |
460 | .get_vblank_counter = &rs600_get_vblank_counter, | 462 | .get_vblank_counter = &rs600_get_vblank_counter, |
@@ -476,35 +478,35 @@ static struct radeon_asic rv515_asic = { | |||
476 | /* | 478 | /* |
477 | * r520,rv530,rv560,rv570,r580 | 479 | * r520,rv530,rv560,rv570,r580 |
478 | */ | 480 | */ |
479 | void r520_errata(struct radeon_device *rdev); | 481 | int r520_init(struct radeon_device *rdev); |
480 | void r520_vram_info(struct radeon_device *rdev); | 482 | int r520_resume(struct radeon_device *rdev); |
481 | int r520_mc_init(struct radeon_device *rdev); | ||
482 | void r520_mc_fini(struct radeon_device *rdev); | ||
483 | void r520_bandwidth_update(struct radeon_device *rdev); | ||
484 | static struct radeon_asic r520_asic = { | 483 | static struct radeon_asic r520_asic = { |
485 | .init = &rv515_init, | 484 | .init = &r520_init, |
486 | .errata = &r520_errata, | 485 | .fini = &rv515_fini, |
487 | .vram_info = &r520_vram_info, | 486 | .suspend = &rv515_suspend, |
487 | .resume = &r520_resume, | ||
488 | .errata = NULL, | ||
489 | .vram_info = NULL, | ||
488 | .vga_set_state = &r100_vga_set_state, | 490 | .vga_set_state = &r100_vga_set_state, |
489 | .gpu_reset = &rv515_gpu_reset, | 491 | .gpu_reset = &rv515_gpu_reset, |
490 | .mc_init = &r520_mc_init, | 492 | .mc_init = NULL, |
491 | .mc_fini = &r520_mc_fini, | 493 | .mc_fini = NULL, |
492 | .wb_init = &r100_wb_init, | 494 | .wb_init = NULL, |
493 | .wb_fini = &r100_wb_fini, | 495 | .wb_fini = NULL, |
494 | .gart_init = &rv370_pcie_gart_init, | 496 | .gart_init = NULL, |
495 | .gart_fini = &rv370_pcie_gart_fini, | 497 | .gart_fini = NULL, |
496 | .gart_enable = &rv370_pcie_gart_enable, | 498 | .gart_enable = NULL, |
497 | .gart_disable = &rv370_pcie_gart_disable, | 499 | .gart_disable = NULL, |
498 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 500 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
499 | .gart_set_page = &rv370_pcie_gart_set_page, | 501 | .gart_set_page = &rv370_pcie_gart_set_page, |
500 | .cp_init = &r100_cp_init, | 502 | .cp_init = NULL, |
501 | .cp_fini = &r100_cp_fini, | 503 | .cp_fini = NULL, |
502 | .cp_disable = &r100_cp_disable, | 504 | .cp_disable = NULL, |
503 | .cp_commit = &r100_cp_commit, | 505 | .cp_commit = &r100_cp_commit, |
504 | .ring_start = &rv515_ring_start, | 506 | .ring_start = &rv515_ring_start, |
505 | .ring_test = &r100_ring_test, | 507 | .ring_test = &r100_ring_test, |
506 | .ring_ib_execute = &r100_ring_ib_execute, | 508 | .ring_ib_execute = &r100_ring_ib_execute, |
507 | .ib_test = &r100_ib_test, | 509 | .ib_test = NULL, |
508 | .irq_set = &rs600_irq_set, | 510 | .irq_set = &rs600_irq_set, |
509 | .irq_process = &rs600_irq_process, | 511 | .irq_process = &rs600_irq_process, |
510 | .get_vblank_counter = &rs600_get_vblank_counter, | 512 | .get_vblank_counter = &rs600_get_vblank_counter, |
@@ -519,7 +521,7 @@ static struct radeon_asic r520_asic = { | |||
519 | .set_clock_gating = &radeon_atom_set_clock_gating, | 521 | .set_clock_gating = &radeon_atom_set_clock_gating, |
520 | .set_surface_reg = r100_set_surface_reg, | 522 | .set_surface_reg = r100_set_surface_reg, |
521 | .clear_surface_reg = r100_clear_surface_reg, | 523 | .clear_surface_reg = r100_clear_surface_reg, |
522 | .bandwidth_update = &r520_bandwidth_update, | 524 | .bandwidth_update = &rv515_bandwidth_update, |
523 | }; | 525 | }; |
524 | 526 | ||
525 | /* | 527 | /* |
@@ -596,7 +598,7 @@ static struct radeon_asic r600_asic = { | |||
596 | .set_clock_gating = &radeon_atom_set_clock_gating, | 598 | .set_clock_gating = &radeon_atom_set_clock_gating, |
597 | .set_surface_reg = r600_set_surface_reg, | 599 | .set_surface_reg = r600_set_surface_reg, |
598 | .clear_surface_reg = r600_clear_surface_reg, | 600 | .clear_surface_reg = r600_clear_surface_reg, |
599 | .bandwidth_update = &r520_bandwidth_update, | 601 | .bandwidth_update = &rv515_bandwidth_update, |
600 | }; | 602 | }; |
601 | 603 | ||
602 | /* | 604 | /* |
@@ -646,7 +648,7 @@ static struct radeon_asic rv770_asic = { | |||
646 | .set_clock_gating = &radeon_atom_set_clock_gating, | 648 | .set_clock_gating = &radeon_atom_set_clock_gating, |
647 | .set_surface_reg = r600_set_surface_reg, | 649 | .set_surface_reg = r600_set_surface_reg, |
648 | .clear_surface_reg = r600_clear_surface_reg, | 650 | .clear_surface_reg = r600_clear_surface_reg, |
649 | .bandwidth_update = &r520_bandwidth_update, | 651 | .bandwidth_update = &rv515_bandwidth_update, |
650 | }; | 652 | }; |
651 | 653 | ||
652 | #endif | 654 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 743742128307..5b6c08cee40e 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -272,12 +272,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
272 | (le16_to_cpu(path->usConnObjectId) & | 272 | (le16_to_cpu(path->usConnObjectId) & |
273 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 273 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
274 | 274 | ||
275 | if ((le16_to_cpu(path->usDeviceTag) == | 275 | /* TODO CV support */ |
276 | ATOM_DEVICE_TV1_SUPPORT) | 276 | if (le16_to_cpu(path->usDeviceTag) == |
277 | || (le16_to_cpu(path->usDeviceTag) == | 277 | ATOM_DEVICE_CV_SUPPORT) |
278 | ATOM_DEVICE_TV2_SUPPORT) | ||
279 | || (le16_to_cpu(path->usDeviceTag) == | ||
280 | ATOM_DEVICE_CV_SUPPORT)) | ||
281 | continue; | 278 | continue; |
282 | 279 | ||
283 | if ((rdev->family == CHIP_RS780) && | 280 | if ((rdev->family == CHIP_RS780) && |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index af1d551f1a8f..e376be47a4a0 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "drm_edid.h" | 27 | #include "drm_edid.h" |
28 | #include "drm_crtc_helper.h" | 28 | #include "drm_crtc_helper.h" |
29 | #include "drm_fb_helper.h" | ||
29 | #include "radeon_drm.h" | 30 | #include "radeon_drm.h" |
30 | #include "radeon.h" | 31 | #include "radeon.h" |
31 | #include "atom.h" | 32 | #include "atom.h" |
@@ -245,7 +246,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn | |||
245 | if (common_modes[i].w < 320 || common_modes[i].h < 200) | 246 | if (common_modes[i].w < 320 || common_modes[i].h < 200) |
246 | continue; | 247 | continue; |
247 | 248 | ||
248 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false); | 249 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); |
249 | drm_mode_probed_add(connector, mode); | 250 | drm_mode_probed_add(connector, mode); |
250 | } | 251 | } |
251 | } | 252 | } |
@@ -559,7 +560,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) | |||
559 | radeon_add_common_modes(encoder, connector); | 560 | radeon_add_common_modes(encoder, connector); |
560 | else { | 561 | else { |
561 | /* only 800x600 is supported right now on pre-avivo chips */ | 562 | /* only 800x600 is supported right now on pre-avivo chips */ |
562 | tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false); | 563 | tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false); |
563 | tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; | 564 | tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; |
564 | drm_mode_probed_add(connector, tv_mode); | 565 | drm_mode_probed_add(connector, tv_mode); |
565 | } | 566 | } |
@@ -743,6 +744,15 @@ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) | |||
743 | return NULL; | 744 | return NULL; |
744 | } | 745 | } |
745 | 746 | ||
747 | static void radeon_dvi_force(struct drm_connector *connector) | ||
748 | { | ||
749 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
750 | if (connector->force == DRM_FORCE_ON) | ||
751 | radeon_connector->use_digital = false; | ||
752 | if (connector->force == DRM_FORCE_ON_DIGITAL) | ||
753 | radeon_connector->use_digital = true; | ||
754 | } | ||
755 | |||
746 | struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { | 756 | struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { |
747 | .get_modes = radeon_dvi_get_modes, | 757 | .get_modes = radeon_dvi_get_modes, |
748 | .mode_valid = radeon_vga_mode_valid, | 758 | .mode_valid = radeon_vga_mode_valid, |
@@ -755,6 +765,7 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = { | |||
755 | .fill_modes = drm_helper_probe_single_connector_modes, | 765 | .fill_modes = drm_helper_probe_single_connector_modes, |
756 | .set_property = radeon_connector_set_property, | 766 | .set_property = radeon_connector_set_property, |
757 | .destroy = radeon_connector_destroy, | 767 | .destroy = radeon_connector_destroy, |
768 | .force = radeon_dvi_force, | ||
758 | }; | 769 | }; |
759 | 770 | ||
760 | void | 771 | void |
@@ -771,6 +782,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
771 | struct radeon_connector *radeon_connector; | 782 | struct radeon_connector *radeon_connector; |
772 | struct radeon_connector_atom_dig *radeon_dig_connector; | 783 | struct radeon_connector_atom_dig *radeon_dig_connector; |
773 | uint32_t subpixel_order = SubPixelNone; | 784 | uint32_t subpixel_order = SubPixelNone; |
785 | int ret; | ||
774 | 786 | ||
775 | /* fixme - tv/cv/din */ | 787 | /* fixme - tv/cv/din */ |
776 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 788 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
@@ -796,24 +808,30 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
796 | switch (connector_type) { | 808 | switch (connector_type) { |
797 | case DRM_MODE_CONNECTOR_VGA: | 809 | case DRM_MODE_CONNECTOR_VGA: |
798 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 810 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
799 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | 811 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); |
812 | if (ret) | ||
813 | goto failed; | ||
800 | if (i2c_bus->valid) { | 814 | if (i2c_bus->valid) { |
801 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); | 815 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); |
802 | if (!radeon_connector->ddc_bus) | 816 | if (!radeon_connector->ddc_bus) |
803 | goto failed; | 817 | goto failed; |
804 | } | 818 | } |
819 | radeon_connector->dac_load_detect = true; | ||
805 | drm_connector_attach_property(&radeon_connector->base, | 820 | drm_connector_attach_property(&radeon_connector->base, |
806 | rdev->mode_info.load_detect_property, | 821 | rdev->mode_info.load_detect_property, |
807 | 1); | 822 | 1); |
808 | break; | 823 | break; |
809 | case DRM_MODE_CONNECTOR_DVIA: | 824 | case DRM_MODE_CONNECTOR_DVIA: |
810 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 825 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
811 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | 826 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); |
827 | if (ret) | ||
828 | goto failed; | ||
812 | if (i2c_bus->valid) { | 829 | if (i2c_bus->valid) { |
813 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); | 830 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); |
814 | if (!radeon_connector->ddc_bus) | 831 | if (!radeon_connector->ddc_bus) |
815 | goto failed; | 832 | goto failed; |
816 | } | 833 | } |
834 | radeon_connector->dac_load_detect = true; | ||
817 | drm_connector_attach_property(&radeon_connector->base, | 835 | drm_connector_attach_property(&radeon_connector->base, |
818 | rdev->mode_info.load_detect_property, | 836 | rdev->mode_info.load_detect_property, |
819 | 1); | 837 | 1); |
@@ -827,7 +845,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
827 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 845 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
828 | radeon_connector->con_priv = radeon_dig_connector; | 846 | radeon_connector->con_priv = radeon_dig_connector; |
829 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 847 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
830 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 848 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); |
849 | if (ret) | ||
850 | goto failed; | ||
831 | if (i2c_bus->valid) { | 851 | if (i2c_bus->valid) { |
832 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); | 852 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); |
833 | if (!radeon_connector->ddc_bus) | 853 | if (!radeon_connector->ddc_bus) |
@@ -837,6 +857,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
837 | drm_connector_attach_property(&radeon_connector->base, | 857 | drm_connector_attach_property(&radeon_connector->base, |
838 | rdev->mode_info.coherent_mode_property, | 858 | rdev->mode_info.coherent_mode_property, |
839 | 1); | 859 | 1); |
860 | radeon_connector->dac_load_detect = true; | ||
840 | drm_connector_attach_property(&radeon_connector->base, | 861 | drm_connector_attach_property(&radeon_connector->base, |
841 | rdev->mode_info.load_detect_property, | 862 | rdev->mode_info.load_detect_property, |
842 | 1); | 863 | 1); |
@@ -850,7 +871,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
850 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 871 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
851 | radeon_connector->con_priv = radeon_dig_connector; | 872 | radeon_connector->con_priv = radeon_dig_connector; |
852 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 873 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
853 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 874 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); |
875 | if (ret) | ||
876 | goto failed; | ||
854 | if (i2c_bus->valid) { | 877 | if (i2c_bus->valid) { |
855 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); | 878 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); |
856 | if (!radeon_connector->ddc_bus) | 879 | if (!radeon_connector->ddc_bus) |
@@ -869,7 +892,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
869 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 892 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
870 | radeon_connector->con_priv = radeon_dig_connector; | 893 | radeon_connector->con_priv = radeon_dig_connector; |
871 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 894 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
872 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 895 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); |
896 | if (ret) | ||
897 | goto failed; | ||
873 | if (i2c_bus->valid) { | 898 | if (i2c_bus->valid) { |
874 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); | 899 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); |
875 | if (!radeon_connector->ddc_bus) | 900 | if (!radeon_connector->ddc_bus) |
@@ -882,11 +907,14 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
882 | case DRM_MODE_CONNECTOR_9PinDIN: | 907 | case DRM_MODE_CONNECTOR_9PinDIN: |
883 | if (radeon_tv == 1) { | 908 | if (radeon_tv == 1) { |
884 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 909 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
885 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 910 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
911 | if (ret) | ||
912 | goto failed; | ||
913 | radeon_connector->dac_load_detect = true; | ||
914 | drm_connector_attach_property(&radeon_connector->base, | ||
915 | rdev->mode_info.load_detect_property, | ||
916 | 1); | ||
886 | } | 917 | } |
887 | drm_connector_attach_property(&radeon_connector->base, | ||
888 | rdev->mode_info.load_detect_property, | ||
889 | 1); | ||
890 | break; | 918 | break; |
891 | case DRM_MODE_CONNECTOR_LVDS: | 919 | case DRM_MODE_CONNECTOR_LVDS: |
892 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 920 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
@@ -896,7 +924,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
896 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 924 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
897 | radeon_connector->con_priv = radeon_dig_connector; | 925 | radeon_connector->con_priv = radeon_dig_connector; |
898 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 926 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
899 | drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); | 927 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); |
928 | if (ret) | ||
929 | goto failed; | ||
900 | if (i2c_bus->valid) { | 930 | if (i2c_bus->valid) { |
901 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); | 931 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); |
902 | if (!radeon_connector->ddc_bus) | 932 | if (!radeon_connector->ddc_bus) |
@@ -932,6 +962,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
932 | struct drm_connector *connector; | 962 | struct drm_connector *connector; |
933 | struct radeon_connector *radeon_connector; | 963 | struct radeon_connector *radeon_connector; |
934 | uint32_t subpixel_order = SubPixelNone; | 964 | uint32_t subpixel_order = SubPixelNone; |
965 | int ret; | ||
935 | 966 | ||
936 | /* fixme - tv/cv/din */ | 967 | /* fixme - tv/cv/din */ |
937 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 968 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
@@ -957,24 +988,30 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
957 | switch (connector_type) { | 988 | switch (connector_type) { |
958 | case DRM_MODE_CONNECTOR_VGA: | 989 | case DRM_MODE_CONNECTOR_VGA: |
959 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 990 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
960 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | 991 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); |
992 | if (ret) | ||
993 | goto failed; | ||
961 | if (i2c_bus->valid) { | 994 | if (i2c_bus->valid) { |
962 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); | 995 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); |
963 | if (!radeon_connector->ddc_bus) | 996 | if (!radeon_connector->ddc_bus) |
964 | goto failed; | 997 | goto failed; |
965 | } | 998 | } |
999 | radeon_connector->dac_load_detect = true; | ||
966 | drm_connector_attach_property(&radeon_connector->base, | 1000 | drm_connector_attach_property(&radeon_connector->base, |
967 | rdev->mode_info.load_detect_property, | 1001 | rdev->mode_info.load_detect_property, |
968 | 1); | 1002 | 1); |
969 | break; | 1003 | break; |
970 | case DRM_MODE_CONNECTOR_DVIA: | 1004 | case DRM_MODE_CONNECTOR_DVIA: |
971 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1005 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
972 | drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); | 1006 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); |
1007 | if (ret) | ||
1008 | goto failed; | ||
973 | if (i2c_bus->valid) { | 1009 | if (i2c_bus->valid) { |
974 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); | 1010 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); |
975 | if (!radeon_connector->ddc_bus) | 1011 | if (!radeon_connector->ddc_bus) |
976 | goto failed; | 1012 | goto failed; |
977 | } | 1013 | } |
1014 | radeon_connector->dac_load_detect = true; | ||
978 | drm_connector_attach_property(&radeon_connector->base, | 1015 | drm_connector_attach_property(&radeon_connector->base, |
979 | rdev->mode_info.load_detect_property, | 1016 | rdev->mode_info.load_detect_property, |
980 | 1); | 1017 | 1); |
@@ -982,11 +1019,14 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
982 | case DRM_MODE_CONNECTOR_DVII: | 1019 | case DRM_MODE_CONNECTOR_DVII: |
983 | case DRM_MODE_CONNECTOR_DVID: | 1020 | case DRM_MODE_CONNECTOR_DVID: |
984 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1021 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
985 | drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 1022 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); |
1023 | if (ret) | ||
1024 | goto failed; | ||
986 | if (i2c_bus->valid) { | 1025 | if (i2c_bus->valid) { |
987 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); | 1026 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); |
988 | if (!radeon_connector->ddc_bus) | 1027 | if (!radeon_connector->ddc_bus) |
989 | goto failed; | 1028 | goto failed; |
1029 | radeon_connector->dac_load_detect = true; | ||
990 | drm_connector_attach_property(&radeon_connector->base, | 1030 | drm_connector_attach_property(&radeon_connector->base, |
991 | rdev->mode_info.load_detect_property, | 1031 | rdev->mode_info.load_detect_property, |
992 | 1); | 1032 | 1); |
@@ -998,7 +1038,10 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
998 | case DRM_MODE_CONNECTOR_9PinDIN: | 1038 | case DRM_MODE_CONNECTOR_9PinDIN: |
999 | if (radeon_tv == 1) { | 1039 | if (radeon_tv == 1) { |
1000 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); | 1040 | drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
1001 | drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); | 1041 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
1042 | if (ret) | ||
1043 | goto failed; | ||
1044 | radeon_connector->dac_load_detect = true; | ||
1002 | drm_connector_attach_property(&radeon_connector->base, | 1045 | drm_connector_attach_property(&radeon_connector->base, |
1003 | rdev->mode_info.load_detect_property, | 1046 | rdev->mode_info.load_detect_property, |
1004 | 1); | 1047 | 1); |
@@ -1006,7 +1049,9 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1006 | break; | 1049 | break; |
1007 | case DRM_MODE_CONNECTOR_LVDS: | 1050 | case DRM_MODE_CONNECTOR_LVDS: |
1008 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1051 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
1009 | drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); | 1052 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); |
1053 | if (ret) | ||
1054 | goto failed; | ||
1010 | if (i2c_bus->valid) { | 1055 | if (i2c_bus->valid) { |
1011 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); | 1056 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); |
1012 | if (!radeon_connector->ddc_bus) | 1057 | if (!radeon_connector->ddc_bus) |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 12f5990c2d2a..5ab2cf96a264 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | p->chunks[i].length_dw = user_chunk.length_dw; | 144 | p->chunks[i].length_dw = user_chunk.length_dw; |
145 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; | 145 | p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; |
146 | 146 | ||
147 | size = p->chunks[i].length_dw * sizeof(uint32_t); | 147 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; |
148 | p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); | 148 | if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) { |
149 | if (p->chunks[i].kdata == NULL) { | 149 | size = p->chunks[i].length_dw * sizeof(uint32_t); |
150 | return -ENOMEM; | 150 | p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); |
151 | } | 151 | if (p->chunks[i].kdata == NULL) { |
152 | if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { | 152 | return -ENOMEM; |
153 | return -EFAULT; | 153 | } |
154 | if (DRM_COPY_FROM_USER(p->chunks[i].kdata, | ||
155 | p->chunks[i].user_ptr, size)) { | ||
156 | return -EFAULT; | ||
157 | } | ||
158 | } else { | ||
159 | p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
160 | p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
161 | if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) { | ||
162 | kfree(p->chunks[i].kpage[0]); | ||
163 | kfree(p->chunks[i].kpage[1]); | ||
164 | return -ENOMEM; | ||
165 | } | ||
166 | p->chunks[i].kpage_idx[0] = -1; | ||
167 | p->chunks[i].kpage_idx[1] = -1; | ||
168 | p->chunks[i].last_copied_page = -1; | ||
169 | p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE; | ||
154 | } | 170 | } |
155 | } | 171 | } |
156 | if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { | 172 | if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { |
@@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
190 | kfree(parser->relocs_ptr); | 206 | kfree(parser->relocs_ptr); |
191 | for (i = 0; i < parser->nchunks; i++) { | 207 | for (i = 0; i < parser->nchunks; i++) { |
192 | kfree(parser->chunks[i].kdata); | 208 | kfree(parser->chunks[i].kdata); |
209 | kfree(parser->chunks[i].kpage[0]); | ||
210 | kfree(parser->chunks[i].kpage[1]); | ||
193 | } | 211 | } |
194 | kfree(parser->chunks); | 212 | kfree(parser->chunks); |
195 | kfree(parser->chunks_array); | 213 | kfree(parser->chunks_array); |
@@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
238 | * uncached). */ | 256 | * uncached). */ |
239 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 257 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
240 | parser.ib->length_dw = ib_chunk->length_dw; | 258 | parser.ib->length_dw = ib_chunk->length_dw; |
241 | memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4); | ||
242 | r = radeon_cs_parse(&parser); | 259 | r = radeon_cs_parse(&parser); |
260 | if (r || parser.parser_error) { | ||
261 | DRM_ERROR("Invalid command stream !\n"); | ||
262 | radeon_cs_parser_fini(&parser, r); | ||
263 | mutex_unlock(&rdev->cs_mutex); | ||
264 | return r; | ||
265 | } | ||
266 | r = radeon_cs_finish_pages(&parser); | ||
243 | if (r) { | 267 | if (r) { |
244 | DRM_ERROR("Invalid command stream !\n"); | 268 | DRM_ERROR("Invalid command stream !\n"); |
245 | radeon_cs_parser_fini(&parser, r); | 269 | radeon_cs_parser_fini(&parser, r); |
@@ -254,3 +278,64 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
254 | mutex_unlock(&rdev->cs_mutex); | 278 | mutex_unlock(&rdev->cs_mutex); |
255 | return r; | 279 | return r; |
256 | } | 280 | } |
281 | |||
282 | int radeon_cs_finish_pages(struct radeon_cs_parser *p) | ||
283 | { | ||
284 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
285 | int i; | ||
286 | int size = PAGE_SIZE; | ||
287 | |||
288 | for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) { | ||
289 | if (i == ibc->last_page_index) { | ||
290 | size = (ibc->length_dw * 4) % PAGE_SIZE; | ||
291 | if (size == 0) | ||
292 | size = PAGE_SIZE; | ||
293 | } | ||
294 | |||
295 | if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), | ||
296 | ibc->user_ptr + (i * PAGE_SIZE), | ||
297 | size)) | ||
298 | return -EFAULT; | ||
299 | } | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) | ||
304 | { | ||
305 | int new_page; | ||
306 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | ||
307 | int i; | ||
308 | int size = PAGE_SIZE; | ||
309 | |||
310 | for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { | ||
311 | if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), | ||
312 | ibc->user_ptr + (i * PAGE_SIZE), | ||
313 | PAGE_SIZE)) { | ||
314 | p->parser_error = -EFAULT; | ||
315 | return 0; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; | ||
320 | |||
321 | if (pg_idx == ibc->last_page_index) { | ||
322 | size = (ibc->length_dw * 4) % PAGE_SIZE; | ||
323 | if (size == 0) | ||
324 | size = PAGE_SIZE; | ||
325 | } | ||
326 | |||
327 | if (DRM_COPY_FROM_USER(ibc->kpage[new_page], | ||
328 | ibc->user_ptr + (pg_idx * PAGE_SIZE), | ||
329 | size)) { | ||
330 | p->parser_error = -EFAULT; | ||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | /* copy to IB here */ | ||
335 | memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); | ||
336 | |||
337 | ibc->last_copied_page = pg_idx; | ||
338 | ibc->kpage_idx[new_page] = pg_idx; | ||
339 | |||
340 | return new_page; | ||
341 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index daf5db780956..ec835d56d30a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -532,10 +532,13 @@ int radeon_device_init(struct radeon_device *rdev, | |||
532 | 532 | ||
533 | if (radeon_agpmode == -1) { | 533 | if (radeon_agpmode == -1) { |
534 | rdev->flags &= ~RADEON_IS_AGP; | 534 | rdev->flags &= ~RADEON_IS_AGP; |
535 | if (rdev->family >= CHIP_RV515 || | 535 | if (rdev->family >= CHIP_R600) { |
536 | rdev->family == CHIP_RV380 || | 536 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
537 | rdev->family == CHIP_RV410 || | 537 | rdev->flags |= RADEON_IS_PCIE; |
538 | rdev->family == CHIP_R423) { | 538 | } else if (rdev->family >= CHIP_RV515 || |
539 | rdev->family == CHIP_RV380 || | ||
540 | rdev->family == CHIP_RV410 || | ||
541 | rdev->family == CHIP_R423) { | ||
539 | DRM_INFO("Forcing AGP to PCIE mode\n"); | 542 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
540 | rdev->flags |= RADEON_IS_PCIE; | 543 | rdev->flags |= RADEON_IS_PCIE; |
541 | rdev->asic->gart_init = &rv370_pcie_gart_init; | 544 | rdev->asic->gart_init = &rv370_pcie_gart_init; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 50fce498910c..7f50fb864af8 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -62,9 +62,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev); | |||
62 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); | 62 | int radeon_driver_irq_postinstall_kms(struct drm_device *dev); |
63 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); | 63 | void radeon_driver_irq_uninstall_kms(struct drm_device *dev); |
64 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); | 64 | irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); |
65 | int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master); | ||
66 | void radeon_master_destroy_kms(struct drm_device *dev, | ||
67 | struct drm_master *master); | ||
68 | int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, | 65 | int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, |
69 | struct drm_file *file_priv); | 66 | struct drm_file *file_priv); |
70 | int radeon_gem_object_init(struct drm_gem_object *obj); | 67 | int radeon_gem_object_init(struct drm_gem_object *obj); |
@@ -260,8 +257,6 @@ static struct drm_driver kms_driver = { | |||
260 | .get_vblank_counter = radeon_get_vblank_counter_kms, | 257 | .get_vblank_counter = radeon_get_vblank_counter_kms, |
261 | .enable_vblank = radeon_enable_vblank_kms, | 258 | .enable_vblank = radeon_enable_vblank_kms, |
262 | .disable_vblank = radeon_disable_vblank_kms, | 259 | .disable_vblank = radeon_disable_vblank_kms, |
263 | .master_create = radeon_master_create_kms, | ||
264 | .master_destroy = radeon_master_destroy_kms, | ||
265 | #if defined(CONFIG_DEBUG_FS) | 260 | #if defined(CONFIG_DEBUG_FS) |
266 | .debugfs_init = radeon_debugfs_init, | 261 | .debugfs_init = radeon_debugfs_init, |
267 | .debugfs_cleanup = radeon_debugfs_cleanup, | 262 | .debugfs_cleanup = radeon_debugfs_cleanup, |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index cb0cfe4b3082..350962e0f346 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -34,6 +34,8 @@ | |||
34 | #include <linux/firmware.h> | 34 | #include <linux/firmware.h> |
35 | #include <linux/platform_device.h> | 35 | #include <linux/platform_device.h> |
36 | 36 | ||
37 | #include "radeon_family.h" | ||
38 | |||
37 | /* General customization: | 39 | /* General customization: |
38 | */ | 40 | */ |
39 | 41 | ||
@@ -109,75 +111,12 @@ | |||
109 | #define DRIVER_MINOR 31 | 111 | #define DRIVER_MINOR 31 |
110 | #define DRIVER_PATCHLEVEL 0 | 112 | #define DRIVER_PATCHLEVEL 0 |
111 | 113 | ||
112 | /* | ||
113 | * Radeon chip families | ||
114 | */ | ||
115 | enum radeon_family { | ||
116 | CHIP_R100, | ||
117 | CHIP_RV100, | ||
118 | CHIP_RS100, | ||
119 | CHIP_RV200, | ||
120 | CHIP_RS200, | ||
121 | CHIP_R200, | ||
122 | CHIP_RV250, | ||
123 | CHIP_RS300, | ||
124 | CHIP_RV280, | ||
125 | CHIP_R300, | ||
126 | CHIP_R350, | ||
127 | CHIP_RV350, | ||
128 | CHIP_RV380, | ||
129 | CHIP_R420, | ||
130 | CHIP_R423, | ||
131 | CHIP_RV410, | ||
132 | CHIP_RS400, | ||
133 | CHIP_RS480, | ||
134 | CHIP_RS600, | ||
135 | CHIP_RS690, | ||
136 | CHIP_RS740, | ||
137 | CHIP_RV515, | ||
138 | CHIP_R520, | ||
139 | CHIP_RV530, | ||
140 | CHIP_RV560, | ||
141 | CHIP_RV570, | ||
142 | CHIP_R580, | ||
143 | CHIP_R600, | ||
144 | CHIP_RV610, | ||
145 | CHIP_RV630, | ||
146 | CHIP_RV620, | ||
147 | CHIP_RV635, | ||
148 | CHIP_RV670, | ||
149 | CHIP_RS780, | ||
150 | CHIP_RS880, | ||
151 | CHIP_RV770, | ||
152 | CHIP_RV730, | ||
153 | CHIP_RV710, | ||
154 | CHIP_RV740, | ||
155 | CHIP_LAST, | ||
156 | }; | ||
157 | |||
158 | enum radeon_cp_microcode_version { | 114 | enum radeon_cp_microcode_version { |
159 | UCODE_R100, | 115 | UCODE_R100, |
160 | UCODE_R200, | 116 | UCODE_R200, |
161 | UCODE_R300, | 117 | UCODE_R300, |
162 | }; | 118 | }; |
163 | 119 | ||
164 | /* | ||
165 | * Chip flags | ||
166 | */ | ||
167 | enum radeon_chip_flags { | ||
168 | RADEON_FAMILY_MASK = 0x0000ffffUL, | ||
169 | RADEON_FLAGS_MASK = 0xffff0000UL, | ||
170 | RADEON_IS_MOBILITY = 0x00010000UL, | ||
171 | RADEON_IS_IGP = 0x00020000UL, | ||
172 | RADEON_SINGLE_CRTC = 0x00040000UL, | ||
173 | RADEON_IS_AGP = 0x00080000UL, | ||
174 | RADEON_HAS_HIERZ = 0x00100000UL, | ||
175 | RADEON_IS_PCIE = 0x00200000UL, | ||
176 | RADEON_NEW_MEMMAP = 0x00400000UL, | ||
177 | RADEON_IS_PCI = 0x00800000UL, | ||
178 | RADEON_IS_IGPGART = 0x01000000UL, | ||
179 | }; | ||
180 | |||
181 | typedef struct drm_radeon_freelist { | 120 | typedef struct drm_radeon_freelist { |
182 | unsigned int age; | 121 | unsigned int age; |
183 | struct drm_buf *buf; | 122 | struct drm_buf *buf; |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h new file mode 100644 index 000000000000..797972e344a6 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | |||
29 | /* this file defines the CHIP_ and family flags used in the pciids, | ||
30 | * its is common between kms and non-kms because duplicating it and | ||
31 | * changing one place is fail. | ||
32 | */ | ||
33 | #ifndef RADEON_FAMILY_H | ||
34 | #define RADEON_FAMILY_H | ||
35 | /* | ||
36 | * Radeon chip families | ||
37 | */ | ||
38 | enum radeon_family { | ||
39 | CHIP_R100, | ||
40 | CHIP_RV100, | ||
41 | CHIP_RS100, | ||
42 | CHIP_RV200, | ||
43 | CHIP_RS200, | ||
44 | CHIP_R200, | ||
45 | CHIP_RV250, | ||
46 | CHIP_RS300, | ||
47 | CHIP_RV280, | ||
48 | CHIP_R300, | ||
49 | CHIP_R350, | ||
50 | CHIP_RV350, | ||
51 | CHIP_RV380, | ||
52 | CHIP_R420, | ||
53 | CHIP_R423, | ||
54 | CHIP_RV410, | ||
55 | CHIP_RS400, | ||
56 | CHIP_RS480, | ||
57 | CHIP_RS600, | ||
58 | CHIP_RS690, | ||
59 | CHIP_RS740, | ||
60 | CHIP_RV515, | ||
61 | CHIP_R520, | ||
62 | CHIP_RV530, | ||
63 | CHIP_RV560, | ||
64 | CHIP_RV570, | ||
65 | CHIP_R580, | ||
66 | CHIP_R600, | ||
67 | CHIP_RV610, | ||
68 | CHIP_RV630, | ||
69 | CHIP_RV670, | ||
70 | CHIP_RV620, | ||
71 | CHIP_RV635, | ||
72 | CHIP_RS780, | ||
73 | CHIP_RS880, | ||
74 | CHIP_RV770, | ||
75 | CHIP_RV730, | ||
76 | CHIP_RV710, | ||
77 | CHIP_RV740, | ||
78 | CHIP_LAST, | ||
79 | }; | ||
80 | |||
81 | /* | ||
82 | * Chip flags | ||
83 | */ | ||
84 | enum radeon_chip_flags { | ||
85 | RADEON_FAMILY_MASK = 0x0000ffffUL, | ||
86 | RADEON_FLAGS_MASK = 0xffff0000UL, | ||
87 | RADEON_IS_MOBILITY = 0x00010000UL, | ||
88 | RADEON_IS_IGP = 0x00020000UL, | ||
89 | RADEON_SINGLE_CRTC = 0x00040000UL, | ||
90 | RADEON_IS_AGP = 0x00080000UL, | ||
91 | RADEON_HAS_HIERZ = 0x00100000UL, | ||
92 | RADEON_IS_PCIE = 0x00200000UL, | ||
93 | RADEON_NEW_MEMMAP = 0x00400000UL, | ||
94 | RADEON_IS_PCI = 0x00800000UL, | ||
95 | RADEON_IS_IGPGART = 0x01000000UL, | ||
96 | }; | ||
97 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 944e4fa78db5..1ba704eedefb 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -128,6 +128,7 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { | |||
128 | int radeonfb_create(struct drm_device *dev, | 128 | int radeonfb_create(struct drm_device *dev, |
129 | uint32_t fb_width, uint32_t fb_height, | 129 | uint32_t fb_width, uint32_t fb_height, |
130 | uint32_t surface_width, uint32_t surface_height, | 130 | uint32_t surface_width, uint32_t surface_height, |
131 | uint32_t surface_depth, uint32_t surface_bpp, | ||
131 | struct drm_framebuffer **fb_p) | 132 | struct drm_framebuffer **fb_p) |
132 | { | 133 | { |
133 | struct radeon_device *rdev = dev->dev_private; | 134 | struct radeon_device *rdev = dev->dev_private; |
@@ -148,10 +149,10 @@ int radeonfb_create(struct drm_device *dev, | |||
148 | 149 | ||
149 | mode_cmd.width = surface_width; | 150 | mode_cmd.width = surface_width; |
150 | mode_cmd.height = surface_height; | 151 | mode_cmd.height = surface_height; |
151 | mode_cmd.bpp = 32; | 152 | mode_cmd.bpp = surface_bpp; |
152 | /* need to align pitch with crtc limits */ | 153 | /* need to align pitch with crtc limits */ |
153 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); | 154 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
154 | mode_cmd.depth = 24; | 155 | mode_cmd.depth = surface_depth; |
155 | 156 | ||
156 | size = mode_cmd.pitch * mode_cmd.height; | 157 | size = mode_cmd.pitch * mode_cmd.height; |
157 | aligned_size = ALIGN(size, PAGE_SIZE); | 158 | aligned_size = ALIGN(size, PAGE_SIZE); |
@@ -290,13 +291,26 @@ out: | |||
290 | return ret; | 291 | return ret; |
291 | } | 292 | } |
292 | 293 | ||
294 | static char *mode_option; | ||
295 | int radeon_parse_options(char *options) | ||
296 | { | ||
297 | char *this_opt; | ||
298 | |||
299 | if (!options || !*options) | ||
300 | return 0; | ||
301 | |||
302 | while ((this_opt = strsep(&options, ",")) != NULL) { | ||
303 | if (!*this_opt) | ||
304 | continue; | ||
305 | mode_option = this_opt; | ||
306 | } | ||
307 | return 0; | ||
308 | } | ||
309 | |||
293 | int radeonfb_probe(struct drm_device *dev) | 310 | int radeonfb_probe(struct drm_device *dev) |
294 | { | 311 | { |
295 | int ret; | 312 | return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); |
296 | ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create); | ||
297 | return ret; | ||
298 | } | 313 | } |
299 | EXPORT_SYMBOL(radeonfb_probe); | ||
300 | 314 | ||
301 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | 315 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
302 | { | 316 | { |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 709bd892b3a9..ba128621057a 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -201,55 +201,6 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) | |||
201 | 201 | ||
202 | 202 | ||
203 | /* | 203 | /* |
204 | * For multiple master (like multiple X). | ||
205 | */ | ||
206 | struct drm_radeon_master_private { | ||
207 | drm_local_map_t *sarea; | ||
208 | drm_radeon_sarea_t *sarea_priv; | ||
209 | }; | ||
210 | |||
211 | int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master) | ||
212 | { | ||
213 | struct drm_radeon_master_private *master_priv; | ||
214 | unsigned long sareapage; | ||
215 | int ret; | ||
216 | |||
217 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); | ||
218 | if (master_priv == NULL) { | ||
219 | return -ENOMEM; | ||
220 | } | ||
221 | /* prebuild the SAREA */ | ||
222 | sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); | ||
223 | ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, | ||
224 | _DRM_CONTAINS_LOCK, | ||
225 | &master_priv->sarea); | ||
226 | if (ret) { | ||
227 | DRM_ERROR("SAREA setup failed\n"); | ||
228 | return ret; | ||
229 | } | ||
230 | master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); | ||
231 | master_priv->sarea_priv->pfCurrentPage = 0; | ||
232 | master->driver_priv = master_priv; | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | void radeon_master_destroy_kms(struct drm_device *dev, | ||
237 | struct drm_master *master) | ||
238 | { | ||
239 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
240 | |||
241 | if (master_priv == NULL) { | ||
242 | return; | ||
243 | } | ||
244 | if (master_priv->sarea) { | ||
245 | drm_rmmap_locked(dev, master_priv->sarea); | ||
246 | } | ||
247 | kfree(master_priv); | ||
248 | master->driver_priv = NULL; | ||
249 | } | ||
250 | |||
251 | |||
252 | /* | ||
253 | * IOCTL. | 204 | * IOCTL. |
254 | */ | 205 | */ |
255 | int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, | 206 | int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 21da871a793c..bfa1ab9c93e1 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -3333,6 +3333,7 @@ | |||
3333 | # define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) | 3333 | # define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) |
3334 | # define RADEON_CP_PACKET0_REG_MASK 0x000007ff | 3334 | # define RADEON_CP_PACKET0_REG_MASK 0x000007ff |
3335 | # define R300_CP_PACKET0_REG_MASK 0x00001fff | 3335 | # define R300_CP_PACKET0_REG_MASK 0x00001fff |
3336 | # define R600_CP_PACKET0_REG_MASK 0x0000ffff | ||
3336 | # define RADEON_CP_PACKET1_REG0_MASK 0x000007ff | 3337 | # define RADEON_CP_PACKET1_REG0_MASK 0x000007ff |
3337 | # define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 | 3338 | # define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 |
3338 | 3339 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index acd889c94549..765bd184b6fc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -530,7 +530,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
530 | } | 530 | } |
531 | 531 | ||
532 | static struct vm_operations_struct radeon_ttm_vm_ops; | 532 | static struct vm_operations_struct radeon_ttm_vm_ops; |
533 | static struct vm_operations_struct *ttm_vm_ops = NULL; | 533 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
534 | 534 | ||
535 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 535 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
536 | { | 536 | { |
@@ -689,9 +689,6 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) | |||
689 | 689 | ||
690 | #define RADEON_DEBUGFS_MEM_TYPES 2 | 690 | #define RADEON_DEBUGFS_MEM_TYPES 2 |
691 | 691 | ||
692 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; | ||
693 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; | ||
694 | |||
695 | #if defined(CONFIG_DEBUG_FS) | 692 | #if defined(CONFIG_DEBUG_FS) |
696 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | 693 | static int radeon_mm_dump_table(struct seq_file *m, void *data) |
697 | { | 694 | { |
@@ -711,9 +708,11 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) | |||
711 | 708 | ||
712 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | 709 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) |
713 | { | 710 | { |
711 | #if defined(CONFIG_DEBUG_FS) | ||
712 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; | ||
713 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; | ||
714 | unsigned i; | 714 | unsigned i; |
715 | 715 | ||
716 | #if defined(CONFIG_DEBUG_FS) | ||
717 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { | 716 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { |
718 | if (i == 0) | 717 | if (i == 0) |
719 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | 718 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 0e791e26def3..4a4fe1cb131c 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "avivod.h" | ||
32 | 31 | ||
33 | #include "rs600_reg_safe.h" | 32 | #include "rs600_reg_safe.h" |
34 | 33 | ||
@@ -45,7 +44,6 @@ void r420_pipes_init(struct radeon_device *rdev); | |||
45 | */ | 44 | */ |
46 | void rs600_gpu_init(struct radeon_device *rdev); | 45 | void rs600_gpu_init(struct radeon_device *rdev); |
47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
48 | void rs600_disable_vga(struct radeon_device *rdev); | ||
49 | 47 | ||
50 | 48 | ||
51 | /* | 49 | /* |
@@ -198,7 +196,7 @@ void rs600_mc_disable_clients(struct radeon_device *rdev) | |||
198 | "programming pipes. Bad things might happen.\n"); | 196 | "programming pipes. Bad things might happen.\n"); |
199 | } | 197 | } |
200 | 198 | ||
201 | radeon_avivo_vga_render_disable(rdev); | 199 | rv515_vga_render_disable(rdev); |
202 | 200 | ||
203 | tmp = RREG32(AVIVO_D1VGA_CONTROL); | 201 | tmp = RREG32(AVIVO_D1VGA_CONTROL); |
204 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); | 202 | WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); |
@@ -346,20 +344,6 @@ u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
346 | /* | 344 | /* |
347 | * Global GPU functions | 345 | * Global GPU functions |
348 | */ | 346 | */ |
349 | void rs600_disable_vga(struct radeon_device *rdev) | ||
350 | { | ||
351 | unsigned tmp; | ||
352 | |||
353 | WREG32(0x330, 0); | ||
354 | WREG32(0x338, 0); | ||
355 | tmp = RREG32(0x300); | ||
356 | tmp &= ~(3 << 16); | ||
357 | WREG32(0x300, tmp); | ||
358 | WREG32(0x308, (1 << 8)); | ||
359 | WREG32(0x310, rdev->mc.vram_location); | ||
360 | WREG32(0x594, 0); | ||
361 | } | ||
362 | |||
363 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) | 347 | int rs600_mc_wait_for_idle(struct radeon_device *rdev) |
364 | { | 348 | { |
365 | unsigned i; | 349 | unsigned i; |
@@ -385,7 +369,7 @@ void rs600_gpu_init(struct radeon_device *rdev) | |||
385 | { | 369 | { |
386 | /* FIXME: HDP same place on rs600 ? */ | 370 | /* FIXME: HDP same place on rs600 ? */ |
387 | r100_hdp_reset(rdev); | 371 | r100_hdp_reset(rdev); |
388 | rs600_disable_vga(rdev); | 372 | rv515_vga_render_disable(rdev); |
389 | /* FIXME: is this correct ? */ | 373 | /* FIXME: is this correct ? */ |
390 | r420_pipes_init(rdev); | 374 | r420_pipes_init(rdev); |
391 | if (rs600_mc_wait_for_idle(rdev)) { | 375 | if (rs600_mc_wait_for_idle(rdev)) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 0f585ca8276d..7a0098ddf977 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -40,7 +40,6 @@ void rs400_gart_disable(struct radeon_device *rdev); | |||
40 | int rs400_gart_enable(struct radeon_device *rdev); | 40 | int rs400_gart_enable(struct radeon_device *rdev); |
41 | void rs400_gart_adjust_size(struct radeon_device *rdev); | 41 | void rs400_gart_adjust_size(struct radeon_device *rdev); |
42 | void rs600_mc_disable_clients(struct radeon_device *rdev); | 42 | void rs600_mc_disable_clients(struct radeon_device *rdev); |
43 | void rs600_disable_vga(struct radeon_device *rdev); | ||
44 | 43 | ||
45 | /* This files gather functions specifics to : | 44 | /* This files gather functions specifics to : |
46 | * rs690,rs740 | 45 | * rs690,rs740 |
@@ -125,7 +124,7 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
125 | { | 124 | { |
126 | /* FIXME: HDP same place on rs690 ? */ | 125 | /* FIXME: HDP same place on rs690 ? */ |
127 | r100_hdp_reset(rdev); | 126 | r100_hdp_reset(rdev); |
128 | rs600_disable_vga(rdev); | 127 | rv515_vga_render_disable(rdev); |
129 | /* FIXME: is this correct ? */ | 128 | /* FIXME: is this correct ? */ |
130 | r420_pipes_init(rdev); | 129 | r420_pipes_init(rdev); |
131 | if (rs690_mc_wait_for_idle(rdev)) { | 130 | if (rs690_mc_wait_for_idle(rdev)) { |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index fd799748e7d8..e53b5ca7a253 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -29,37 +29,17 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "rv515d.h" | 30 | #include "rv515d.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | 32 | #include "atom.h" | |
33 | #include "rv515_reg_safe.h" | 33 | #include "rv515_reg_safe.h" |
34 | /* rv515 depends on : */ | 34 | |
35 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | /* This files gather functions specifics to: rv515 */ |
36 | int r100_cp_reset(struct radeon_device *rdev); | ||
37 | int r100_rb2d_reset(struct radeon_device *rdev); | ||
38 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
39 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); | ||
40 | void r420_pipes_init(struct radeon_device *rdev); | ||
41 | void rs600_mc_disable_clients(struct radeon_device *rdev); | ||
42 | void rs600_disable_vga(struct radeon_device *rdev); | ||
43 | |||
44 | /* This files gather functions specifics to: | ||
45 | * rv515 | ||
46 | * | ||
47 | * Some of these functions might be used by newer ASICs. | ||
48 | */ | ||
49 | int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); | 36 | int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); |
50 | int rv515_debugfs_ga_info_init(struct radeon_device *rdev); | 37 | int rv515_debugfs_ga_info_init(struct radeon_device *rdev); |
51 | void rv515_gpu_init(struct radeon_device *rdev); | 38 | void rv515_gpu_init(struct radeon_device *rdev); |
52 | int rv515_mc_wait_for_idle(struct radeon_device *rdev); | 39 | int rv515_mc_wait_for_idle(struct radeon_device *rdev); |
53 | 40 | ||
54 | 41 | void rv515_debugfs(struct radeon_device *rdev) | |
55 | /* | ||
56 | * MC | ||
57 | */ | ||
58 | int rv515_mc_init(struct radeon_device *rdev) | ||
59 | { | 42 | { |
60 | uint32_t tmp; | ||
61 | int r; | ||
62 | |||
63 | if (r100_debugfs_rbbm_init(rdev)) { | 43 | if (r100_debugfs_rbbm_init(rdev)) { |
64 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | 44 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
65 | } | 45 | } |
@@ -69,67 +49,8 @@ int rv515_mc_init(struct radeon_device *rdev) | |||
69 | if (rv515_debugfs_ga_info_init(rdev)) { | 49 | if (rv515_debugfs_ga_info_init(rdev)) { |
70 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); | 50 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); |
71 | } | 51 | } |
72 | |||
73 | rv515_gpu_init(rdev); | ||
74 | rv370_pcie_gart_disable(rdev); | ||
75 | |||
76 | /* Setup GPU memory space */ | ||
77 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
78 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
79 | if (rdev->flags & RADEON_IS_AGP) { | ||
80 | r = radeon_agp_init(rdev); | ||
81 | if (r) { | ||
82 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | ||
83 | rdev->flags &= ~RADEON_IS_AGP; | ||
84 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
85 | } else { | ||
86 | rdev->mc.gtt_location = rdev->mc.agp_base; | ||
87 | } | ||
88 | } | ||
89 | r = radeon_mc_setup(rdev); | ||
90 | if (r) { | ||
91 | return r; | ||
92 | } | ||
93 | |||
94 | /* Program GPU memory space */ | ||
95 | rs600_mc_disable_clients(rdev); | ||
96 | if (rv515_mc_wait_for_idle(rdev)) { | ||
97 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
98 | "programming pipes. Bad things might happen.\n"); | ||
99 | } | ||
100 | /* Write VRAM size in case we are limiting it */ | ||
101 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
102 | tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); | ||
103 | WREG32(0x134, tmp); | ||
104 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
105 | tmp = REG_SET(MC_FB_TOP, tmp >> 16); | ||
106 | tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); | ||
107 | WREG32_MC(MC_FB_LOCATION, tmp); | ||
108 | WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | ||
109 | WREG32(0x310, rdev->mc.vram_location); | ||
110 | if (rdev->flags & RADEON_IS_AGP) { | ||
111 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | ||
112 | tmp = REG_SET(MC_AGP_TOP, tmp >> 16); | ||
113 | tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); | ||
114 | WREG32_MC(MC_AGP_LOCATION, tmp); | ||
115 | WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); | ||
116 | WREG32_MC(MC_AGP_BASE_2, 0); | ||
117 | } else { | ||
118 | WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); | ||
119 | WREG32_MC(MC_AGP_BASE, 0); | ||
120 | WREG32_MC(MC_AGP_BASE_2, 0); | ||
121 | } | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | void rv515_mc_fini(struct radeon_device *rdev) | ||
126 | { | ||
127 | } | 52 | } |
128 | 53 | ||
129 | |||
130 | /* | ||
131 | * Global GPU functions | ||
132 | */ | ||
133 | void rv515_ring_start(struct radeon_device *rdev) | 54 | void rv515_ring_start(struct radeon_device *rdev) |
134 | { | 55 | { |
135 | int r; | 56 | int r; |
@@ -198,11 +119,6 @@ void rv515_ring_start(struct radeon_device *rdev) | |||
198 | radeon_ring_unlock_commit(rdev); | 119 | radeon_ring_unlock_commit(rdev); |
199 | } | 120 | } |
200 | 121 | ||
201 | void rv515_errata(struct radeon_device *rdev) | ||
202 | { | ||
203 | rdev->pll_errata = 0; | ||
204 | } | ||
205 | |||
206 | int rv515_mc_wait_for_idle(struct radeon_device *rdev) | 122 | int rv515_mc_wait_for_idle(struct radeon_device *rdev) |
207 | { | 123 | { |
208 | unsigned i; | 124 | unsigned i; |
@@ -219,6 +135,12 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) | |||
219 | return -1; | 135 | return -1; |
220 | } | 136 | } |
221 | 137 | ||
138 | void rv515_vga_render_disable(struct radeon_device *rdev) | ||
139 | { | ||
140 | WREG32(R_000300_VGA_RENDER_CONTROL, | ||
141 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); | ||
142 | } | ||
143 | |||
222 | void rv515_gpu_init(struct radeon_device *rdev) | 144 | void rv515_gpu_init(struct radeon_device *rdev) |
223 | { | 145 | { |
224 | unsigned pipe_select_current, gb_pipe_select, tmp; | 146 | unsigned pipe_select_current, gb_pipe_select, tmp; |
@@ -231,7 +153,7 @@ void rv515_gpu_init(struct radeon_device *rdev) | |||
231 | "reseting GPU. Bad things might happen.\n"); | 153 | "reseting GPU. Bad things might happen.\n"); |
232 | } | 154 | } |
233 | 155 | ||
234 | rs600_disable_vga(rdev); | 156 | rv515_vga_render_disable(rdev); |
235 | 157 | ||
236 | r420_pipes_init(rdev); | 158 | r420_pipes_init(rdev); |
237 | gb_pipe_select = RREG32(0x402C); | 159 | gb_pipe_select = RREG32(0x402C); |
@@ -335,10 +257,6 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
335 | return 0; | 257 | return 0; |
336 | } | 258 | } |
337 | 259 | ||
338 | |||
339 | /* | ||
340 | * VRAM info | ||
341 | */ | ||
342 | static void rv515_vram_get_type(struct radeon_device *rdev) | 260 | static void rv515_vram_get_type(struct radeon_device *rdev) |
343 | { | 261 | { |
344 | uint32_t tmp; | 262 | uint32_t tmp; |
@@ -374,10 +292,6 @@ void rv515_vram_info(struct radeon_device *rdev) | |||
374 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 292 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
375 | } | 293 | } |
376 | 294 | ||
377 | |||
378 | /* | ||
379 | * Indirect registers accessor | ||
380 | */ | ||
381 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | 295 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
382 | { | 296 | { |
383 | uint32_t r; | 297 | uint32_t r; |
@@ -395,9 +309,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |||
395 | WREG32(MC_IND_INDEX, 0); | 309 | WREG32(MC_IND_INDEX, 0); |
396 | } | 310 | } |
397 | 311 | ||
398 | /* | ||
399 | * Debugfs info | ||
400 | */ | ||
401 | #if defined(CONFIG_DEBUG_FS) | 312 | #if defined(CONFIG_DEBUG_FS) |
402 | static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) | 313 | static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) |
403 | { | 314 | { |
@@ -459,13 +370,258 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) | |||
459 | #endif | 370 | #endif |
460 | } | 371 | } |
461 | 372 | ||
462 | /* | 373 | void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) |
463 | * Asic initialization | 374 | { |
464 | */ | 375 | save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL); |
465 | int rv515_init(struct radeon_device *rdev) | 376 | save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL); |
377 | save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); | ||
378 | save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); | ||
379 | save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL); | ||
380 | save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); | ||
381 | |||
382 | /* Stop all video */ | ||
383 | WREG32(R_000330_D1VGA_CONTROL, 0); | ||
384 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); | ||
385 | WREG32(R_000300_VGA_RENDER_CONTROL, 0); | ||
386 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); | ||
387 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); | ||
388 | WREG32(R_006080_D1CRTC_CONTROL, 0); | ||
389 | WREG32(R_006880_D2CRTC_CONTROL, 0); | ||
390 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); | ||
391 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); | ||
392 | } | ||
393 | |||
394 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) | ||
395 | { | ||
396 | WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); | ||
397 | WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); | ||
398 | WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); | ||
399 | WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); | ||
400 | WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); | ||
401 | /* Unlock host access */ | ||
402 | WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); | ||
403 | mdelay(1); | ||
404 | /* Restore video state */ | ||
405 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); | ||
406 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); | ||
407 | WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); | ||
408 | WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); | ||
409 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); | ||
410 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); | ||
411 | WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); | ||
412 | WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); | ||
413 | WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); | ||
414 | } | ||
415 | |||
416 | void rv515_mc_program(struct radeon_device *rdev) | ||
417 | { | ||
418 | struct rv515_mc_save save; | ||
419 | |||
420 | /* Stops all mc clients */ | ||
421 | rv515_mc_stop(rdev, &save); | ||
422 | |||
423 | /* Wait for mc idle */ | ||
424 | if (rv515_mc_wait_for_idle(rdev)) | ||
425 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | ||
426 | /* Write VRAM size in case we are limiting it */ | ||
427 | WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
428 | /* Program MC, should be a 32bits limited address space */ | ||
429 | WREG32_MC(R_000001_MC_FB_LOCATION, | ||
430 | S_000001_MC_FB_START(rdev->mc.vram_start >> 16) | | ||
431 | S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16)); | ||
432 | WREG32(R_000134_HDP_FB_LOCATION, | ||
433 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | ||
434 | if (rdev->flags & RADEON_IS_AGP) { | ||
435 | WREG32_MC(R_000002_MC_AGP_LOCATION, | ||
436 | S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) | | ||
437 | S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); | ||
438 | WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); | ||
439 | WREG32_MC(R_000004_MC_AGP_BASE_2, | ||
440 | S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base))); | ||
441 | } else { | ||
442 | WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF); | ||
443 | WREG32_MC(R_000003_MC_AGP_BASE, 0); | ||
444 | WREG32_MC(R_000004_MC_AGP_BASE_2, 0); | ||
445 | } | ||
446 | |||
447 | rv515_mc_resume(rdev, &save); | ||
448 | } | ||
449 | |||
450 | void rv515_clock_startup(struct radeon_device *rdev) | ||
451 | { | ||
452 | if (radeon_dynclks != -1 && radeon_dynclks) | ||
453 | radeon_atom_set_clock_gating(rdev, 1); | ||
454 | /* We need to force on some of the block */ | ||
455 | WREG32_PLL(R_00000F_CP_DYN_CNTL, | ||
456 | RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1)); | ||
457 | WREG32_PLL(R_000011_E2_DYN_CNTL, | ||
458 | RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1)); | ||
459 | WREG32_PLL(R_000013_IDCT_DYN_CNTL, | ||
460 | RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1)); | ||
461 | } | ||
462 | |||
463 | static int rv515_startup(struct radeon_device *rdev) | ||
464 | { | ||
465 | int r; | ||
466 | |||
467 | rv515_mc_program(rdev); | ||
468 | /* Resume clock */ | ||
469 | rv515_clock_startup(rdev); | ||
470 | /* Initialize GPU configuration (# pipes, ...) */ | ||
471 | rv515_gpu_init(rdev); | ||
472 | /* Initialize GART (initialize after TTM so we can allocate | ||
473 | * memory through TTM but finalize after TTM) */ | ||
474 | if (rdev->flags & RADEON_IS_PCIE) { | ||
475 | r = rv370_pcie_gart_enable(rdev); | ||
476 | if (r) | ||
477 | return r; | ||
478 | } | ||
479 | /* Enable IRQ */ | ||
480 | rdev->irq.sw_int = true; | ||
481 | r100_irq_set(rdev); | ||
482 | /* 1M ring buffer */ | ||
483 | r = r100_cp_init(rdev, 1024 * 1024); | ||
484 | if (r) { | ||
485 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
486 | return r; | ||
487 | } | ||
488 | r = r100_wb_init(rdev); | ||
489 | if (r) | ||
490 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
491 | r = r100_ib_init(rdev); | ||
492 | if (r) { | ||
493 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
494 | return r; | ||
495 | } | ||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | int rv515_resume(struct radeon_device *rdev) | ||
500 | { | ||
501 | /* Make sur GART are not working */ | ||
502 | if (rdev->flags & RADEON_IS_PCIE) | ||
503 | rv370_pcie_gart_disable(rdev); | ||
504 | /* Resume clock before doing reset */ | ||
505 | rv515_clock_startup(rdev); | ||
506 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
507 | if (radeon_gpu_reset(rdev)) { | ||
508 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
509 | RREG32(R_000E40_RBBM_STATUS), | ||
510 | RREG32(R_0007C0_CP_STAT)); | ||
511 | } | ||
512 | /* post */ | ||
513 | atom_asic_init(rdev->mode_info.atom_context); | ||
514 | /* Resume clock after posting */ | ||
515 | rv515_clock_startup(rdev); | ||
516 | return rv515_startup(rdev); | ||
517 | } | ||
518 | |||
519 | int rv515_suspend(struct radeon_device *rdev) | ||
520 | { | ||
521 | r100_cp_disable(rdev); | ||
522 | r100_wb_disable(rdev); | ||
523 | r100_irq_disable(rdev); | ||
524 | if (rdev->flags & RADEON_IS_PCIE) | ||
525 | rv370_pcie_gart_disable(rdev); | ||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | void rv515_set_safe_registers(struct radeon_device *rdev) | ||
466 | { | 530 | { |
467 | rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; | 531 | rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; |
468 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); | 532 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); |
533 | } | ||
534 | |||
535 | void rv515_fini(struct radeon_device *rdev) | ||
536 | { | ||
537 | rv515_suspend(rdev); | ||
538 | r100_cp_fini(rdev); | ||
539 | r100_wb_fini(rdev); | ||
540 | r100_ib_fini(rdev); | ||
541 | radeon_gem_fini(rdev); | ||
542 | rv370_pcie_gart_fini(rdev); | ||
543 | radeon_agp_fini(rdev); | ||
544 | radeon_irq_kms_fini(rdev); | ||
545 | radeon_fence_driver_fini(rdev); | ||
546 | radeon_object_fini(rdev); | ||
547 | radeon_atombios_fini(rdev); | ||
548 | kfree(rdev->bios); | ||
549 | rdev->bios = NULL; | ||
550 | } | ||
551 | |||
552 | int rv515_init(struct radeon_device *rdev) | ||
553 | { | ||
554 | int r; | ||
555 | |||
556 | rdev->new_init_path = true; | ||
557 | /* Initialize scratch registers */ | ||
558 | radeon_scratch_init(rdev); | ||
559 | /* Initialize surface registers */ | ||
560 | radeon_surface_init(rdev); | ||
561 | /* TODO: disable VGA need to use VGA request */ | ||
562 | /* BIOS*/ | ||
563 | if (!radeon_get_bios(rdev)) { | ||
564 | if (ASIC_IS_AVIVO(rdev)) | ||
565 | return -EINVAL; | ||
566 | } | ||
567 | if (rdev->is_atom_bios) { | ||
568 | r = radeon_atombios_init(rdev); | ||
569 | if (r) | ||
570 | return r; | ||
571 | } else { | ||
572 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | ||
573 | return -EINVAL; | ||
574 | } | ||
575 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
576 | if (radeon_gpu_reset(rdev)) { | ||
577 | dev_warn(rdev->dev, | ||
578 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
579 | RREG32(R_000E40_RBBM_STATUS), | ||
580 | RREG32(R_0007C0_CP_STAT)); | ||
581 | } | ||
582 | /* check if cards are posted or not */ | ||
583 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
584 | DRM_INFO("GPU not posted. posting now...\n"); | ||
585 | atom_asic_init(rdev->mode_info.atom_context); | ||
586 | } | ||
587 | /* Initialize clocks */ | ||
588 | radeon_get_clock_info(rdev->ddev); | ||
589 | /* Get vram informations */ | ||
590 | rv515_vram_info(rdev); | ||
591 | /* Initialize memory controller (also test AGP) */ | ||
592 | r = r420_mc_init(rdev); | ||
593 | if (r) | ||
594 | return r; | ||
595 | rv515_debugfs(rdev); | ||
596 | /* Fence driver */ | ||
597 | r = radeon_fence_driver_init(rdev); | ||
598 | if (r) | ||
599 | return r; | ||
600 | r = radeon_irq_kms_init(rdev); | ||
601 | if (r) | ||
602 | return r; | ||
603 | /* Memory manager */ | ||
604 | r = radeon_object_init(rdev); | ||
605 | if (r) | ||
606 | return r; | ||
607 | r = rv370_pcie_gart_init(rdev); | ||
608 | if (r) | ||
609 | return r; | ||
610 | rv515_set_safe_registers(rdev); | ||
611 | rdev->accel_working = true; | ||
612 | r = rv515_startup(rdev); | ||
613 | if (r) { | ||
614 | /* Somethings want wront with the accel init stop accel */ | ||
615 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
616 | rv515_suspend(rdev); | ||
617 | r100_cp_fini(rdev); | ||
618 | r100_wb_fini(rdev); | ||
619 | r100_ib_fini(rdev); | ||
620 | rv370_pcie_gart_fini(rdev); | ||
621 | radeon_agp_fini(rdev); | ||
622 | radeon_irq_kms_fini(rdev); | ||
623 | rdev->accel_working = false; | ||
624 | } | ||
469 | return 0; | 625 | return 0; |
470 | } | 626 | } |
471 | 627 | ||
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h index a65e17ec1c08..fc216e49384d 100644 --- a/drivers/gpu/drm/radeon/rv515d.h +++ b/drivers/gpu/drm/radeon/rv515d.h | |||
@@ -216,5 +216,388 @@ | |||
216 | #define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) | 216 | #define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) |
217 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) | 217 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) |
218 | 218 | ||
219 | #endif | 219 | /* Registers */ |
220 | #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 | ||
221 | #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) | ||
222 | #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) | ||
223 | #define C_0000F8_CONFIG_MEMSIZE 0x00000000 | ||
224 | #define R_000134_HDP_FB_LOCATION 0x000134 | ||
225 | #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) | ||
226 | #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
227 | #define C_000134_HDP_FB_START 0xFFFF0000 | ||
228 | #define R_000300_VGA_RENDER_CONTROL 0x000300 | ||
229 | #define S_000300_VGA_BLINK_RATE(x) (((x) & 0x1F) << 0) | ||
230 | #define G_000300_VGA_BLINK_RATE(x) (((x) >> 0) & 0x1F) | ||
231 | #define C_000300_VGA_BLINK_RATE 0xFFFFFFE0 | ||
232 | #define S_000300_VGA_BLINK_MODE(x) (((x) & 0x3) << 5) | ||
233 | #define G_000300_VGA_BLINK_MODE(x) (((x) >> 5) & 0x3) | ||
234 | #define C_000300_VGA_BLINK_MODE 0xFFFFFF9F | ||
235 | #define S_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) & 0x1) << 7) | ||
236 | #define G_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) >> 7) & 0x1) | ||
237 | #define C_000300_VGA_CURSOR_BLINK_INVERT 0xFFFFFF7F | ||
238 | #define S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) & 0x1) << 8) | ||
239 | #define G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) >> 8) & 0x1) | ||
240 | #define C_000300_VGA_EXTD_ADDR_COUNT_ENABLE 0xFFFFFEFF | ||
241 | #define S_000300_VGA_VSTATUS_CNTL(x) (((x) & 0x3) << 16) | ||
242 | #define G_000300_VGA_VSTATUS_CNTL(x) (((x) >> 16) & 0x3) | ||
243 | #define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF | ||
244 | #define S_000300_VGA_LOCK_8DOT(x) (((x) & 0x1) << 24) | ||
245 | #define G_000300_VGA_LOCK_8DOT(x) (((x) >> 24) & 0x1) | ||
246 | #define C_000300_VGA_LOCK_8DOT 0xFEFFFFFF | ||
247 | #define S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25) | ||
248 | #define G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1) | ||
249 | #define C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL 0xFDFFFFFF | ||
250 | #define R_000310_VGA_MEMORY_BASE_ADDRESS 0x000310 | ||
251 | #define S_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
252 | #define G_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
253 | #define C_000310_VGA_MEMORY_BASE_ADDRESS 0x00000000 | ||
254 | #define R_000328_VGA_HDP_CONTROL 0x000328 | ||
255 | #define S_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) & 0x1) << 0) | ||
256 | #define G_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) >> 0) & 0x1) | ||
257 | #define C_000328_VGA_MEM_PAGE_SELECT_EN 0xFFFFFFFE | ||
258 | #define S_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) & 0x1) << 8) | ||
259 | #define G_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) >> 8) & 0x1) | ||
260 | #define C_000328_VGA_RBBM_LOCK_DISABLE 0xFFFFFEFF | ||
261 | #define S_000328_VGA_SOFT_RESET(x) (((x) & 0x1) << 16) | ||
262 | #define G_000328_VGA_SOFT_RESET(x) (((x) >> 16) & 0x1) | ||
263 | #define C_000328_VGA_SOFT_RESET 0xFFFEFFFF | ||
264 | #define S_000328_VGA_TEST_RESET_CONTROL(x) (((x) & 0x1) << 24) | ||
265 | #define G_000328_VGA_TEST_RESET_CONTROL(x) (((x) >> 24) & 0x1) | ||
266 | #define C_000328_VGA_TEST_RESET_CONTROL 0xFEFFFFFF | ||
267 | #define R_000330_D1VGA_CONTROL 0x000330 | ||
268 | #define S_000330_D1VGA_MODE_ENABLE(x) (((x) & 0x1) << 0) | ||
269 | #define G_000330_D1VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1) | ||
270 | #define C_000330_D1VGA_MODE_ENABLE 0xFFFFFFFE | ||
271 | #define S_000330_D1VGA_TIMING_SELECT(x) (((x) & 0x1) << 8) | ||
272 | #define G_000330_D1VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1) | ||
273 | #define C_000330_D1VGA_TIMING_SELECT 0xFFFFFEFF | ||
274 | #define S_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9) | ||
275 | #define G_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1) | ||
276 | #define C_000330_D1VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF | ||
277 | #define S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10) | ||
278 | #define G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1) | ||
279 | #define C_000330_D1VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF | ||
280 | #define S_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16) | ||
281 | #define G_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1) | ||
282 | #define C_000330_D1VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF | ||
283 | #define S_000330_D1VGA_ROTATE(x) (((x) & 0x3) << 24) | ||
284 | #define G_000330_D1VGA_ROTATE(x) (((x) >> 24) & 0x3) | ||
285 | #define C_000330_D1VGA_ROTATE 0xFCFFFFFF | ||
286 | #define R_000338_D2VGA_CONTROL 0x000338 | ||
287 | #define S_000338_D2VGA_MODE_ENABLE(x) (((x) & 0x1) << 0) | ||
288 | #define G_000338_D2VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1) | ||
289 | #define C_000338_D2VGA_MODE_ENABLE 0xFFFFFFFE | ||
290 | #define S_000338_D2VGA_TIMING_SELECT(x) (((x) & 0x1) << 8) | ||
291 | #define G_000338_D2VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1) | ||
292 | #define C_000338_D2VGA_TIMING_SELECT 0xFFFFFEFF | ||
293 | #define S_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9) | ||
294 | #define G_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1) | ||
295 | #define C_000338_D2VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF | ||
296 | #define S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10) | ||
297 | #define G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1) | ||
298 | #define C_000338_D2VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF | ||
299 | #define S_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16) | ||
300 | #define G_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1) | ||
301 | #define C_000338_D2VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF | ||
302 | #define S_000338_D2VGA_ROTATE(x) (((x) & 0x3) << 24) | ||
303 | #define G_000338_D2VGA_ROTATE(x) (((x) >> 24) & 0x3) | ||
304 | #define C_000338_D2VGA_ROTATE 0xFCFFFFFF | ||
305 | #define R_0007C0_CP_STAT 0x0007C0 | ||
306 | #define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) | ||
307 | #define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) | ||
308 | #define C_0007C0_MRU_BUSY 0xFFFFFFFE | ||
309 | #define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) | ||
310 | #define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) | ||
311 | #define C_0007C0_MWU_BUSY 0xFFFFFFFD | ||
312 | #define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) | ||
313 | #define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) | ||
314 | #define C_0007C0_RSIU_BUSY 0xFFFFFFFB | ||
315 | #define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) | ||
316 | #define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) | ||
317 | #define C_0007C0_RCIU_BUSY 0xFFFFFFF7 | ||
318 | #define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) | ||
319 | #define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) | ||
320 | #define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF | ||
321 | #define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) | ||
322 | #define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) | ||
323 | #define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF | ||
324 | #define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) | ||
325 | #define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) | ||
326 | #define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF | ||
327 | #define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) | ||
328 | #define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) | ||
329 | #define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF | ||
330 | #define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) | ||
331 | #define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) | ||
332 | #define C_0007C0_CSI_BUSY 0xFFFFDFFF | ||
333 | #define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) | ||
334 | #define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) | ||
335 | #define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF | ||
336 | #define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) | ||
337 | #define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) | ||
338 | #define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF | ||
339 | #define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) | ||
340 | #define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) | ||
341 | #define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF | ||
342 | #define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) | ||
343 | #define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) | ||
344 | #define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF | ||
345 | #define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) | ||
346 | #define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) | ||
347 | #define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF | ||
348 | #define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) | ||
349 | #define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) | ||
350 | #define C_0007C0_CP_BUSY 0x7FFFFFFF | ||
351 | #define R_000E40_RBBM_STATUS 0x000E40 | ||
352 | #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) | ||
353 | #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) | ||
354 | #define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 | ||
355 | #define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) | ||
356 | #define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) | ||
357 | #define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF | ||
358 | #define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) | ||
359 | #define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) | ||
360 | #define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF | ||
361 | #define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) | ||
362 | #define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) | ||
363 | #define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF | ||
364 | #define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) | ||
365 | #define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) | ||
366 | #define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF | ||
367 | #define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) | ||
368 | #define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) | ||
369 | #define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF | ||
370 | #define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) | ||
371 | #define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) | ||
372 | #define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF | ||
373 | #define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) | ||
374 | #define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) | ||
375 | #define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF | ||
376 | #define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) | ||
377 | #define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) | ||
378 | #define C_000E40_ENG_EV_BUSY 0xFFFF7FFF | ||
379 | #define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) | ||
380 | #define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) | ||
381 | #define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF | ||
382 | #define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) | ||
383 | #define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) | ||
384 | #define C_000E40_E2_BUSY 0xFFFDFFFF | ||
385 | #define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) | ||
386 | #define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) | ||
387 | #define C_000E40_RB2D_BUSY 0xFFFBFFFF | ||
388 | #define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) | ||
389 | #define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) | ||
390 | #define C_000E40_RB3D_BUSY 0xFFF7FFFF | ||
391 | #define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) | ||
392 | #define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) | ||
393 | #define C_000E40_VAP_BUSY 0xFFEFFFFF | ||
394 | #define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) | ||
395 | #define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) | ||
396 | #define C_000E40_RE_BUSY 0xFFDFFFFF | ||
397 | #define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) | ||
398 | #define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) | ||
399 | #define C_000E40_TAM_BUSY 0xFFBFFFFF | ||
400 | #define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) | ||
401 | #define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) | ||
402 | #define C_000E40_TDM_BUSY 0xFF7FFFFF | ||
403 | #define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) | ||
404 | #define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) | ||
405 | #define C_000E40_PB_BUSY 0xFEFFFFFF | ||
406 | #define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) | ||
407 | #define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) | ||
408 | #define C_000E40_TIM_BUSY 0xFDFFFFFF | ||
409 | #define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) | ||
410 | #define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) | ||
411 | #define C_000E40_GA_BUSY 0xFBFFFFFF | ||
412 | #define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) | ||
413 | #define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) | ||
414 | #define C_000E40_CBA2D_BUSY 0xF7FFFFFF | ||
415 | #define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28) | ||
416 | #define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1) | ||
417 | #define C_000E40_RBBM_HIBUSY 0xEFFFFFFF | ||
418 | #define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29) | ||
419 | #define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1) | ||
420 | #define C_000E40_SKID_CFBUSY 0xDFFFFFFF | ||
421 | #define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30) | ||
422 | #define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1) | ||
423 | #define C_000E40_VAP_VF_BUSY 0xBFFFFFFF | ||
424 | #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) | ||
425 | #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) | ||
426 | #define C_000E40_GUI_ACTIVE 0x7FFFFFFF | ||
427 | #define R_006080_D1CRTC_CONTROL 0x006080 | ||
428 | #define S_006080_D1CRTC_MASTER_EN(x) (((x) & 0x1) << 0) | ||
429 | #define G_006080_D1CRTC_MASTER_EN(x) (((x) >> 0) & 0x1) | ||
430 | #define C_006080_D1CRTC_MASTER_EN 0xFFFFFFFE | ||
431 | #define S_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4) | ||
432 | #define G_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1) | ||
433 | #define C_006080_D1CRTC_SYNC_RESET_SEL 0xFFFFFFEF | ||
434 | #define S_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8) | ||
435 | #define G_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3) | ||
436 | #define C_006080_D1CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF | ||
437 | #define S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16) | ||
438 | #define G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1) | ||
439 | #define C_006080_D1CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF | ||
440 | #define S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24) | ||
441 | #define G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1) | ||
442 | #define C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF | ||
443 | #define R_0060E8_D1CRTC_UPDATE_LOCK 0x0060E8 | ||
444 | #define S_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0) | ||
445 | #define G_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1) | ||
446 | #define C_0060E8_D1CRTC_UPDATE_LOCK 0xFFFFFFFE | ||
447 | #define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x006110 | ||
448 | #define S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
449 | #define G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
450 | #define C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000 | ||
451 | #define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x006118 | ||
452 | #define S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
453 | #define G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
454 | #define C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000 | ||
455 | #define R_006880_D2CRTC_CONTROL 0x006880 | ||
456 | #define S_006880_D2CRTC_MASTER_EN(x) (((x) & 0x1) << 0) | ||
457 | #define G_006880_D2CRTC_MASTER_EN(x) (((x) >> 0) & 0x1) | ||
458 | #define C_006880_D2CRTC_MASTER_EN 0xFFFFFFFE | ||
459 | #define S_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4) | ||
460 | #define G_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1) | ||
461 | #define C_006880_D2CRTC_SYNC_RESET_SEL 0xFFFFFFEF | ||
462 | #define S_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8) | ||
463 | #define G_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3) | ||
464 | #define C_006880_D2CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF | ||
465 | #define S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16) | ||
466 | #define G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1) | ||
467 | #define C_006880_D2CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF | ||
468 | #define S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24) | ||
469 | #define G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1) | ||
470 | #define C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF | ||
471 | #define R_0068E8_D2CRTC_UPDATE_LOCK 0x0068E8 | ||
472 | #define S_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0) | ||
473 | #define G_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1) | ||
474 | #define C_0068E8_D2CRTC_UPDATE_LOCK 0xFFFFFFFE | ||
475 | #define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x006910 | ||
476 | #define S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
477 | #define G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
478 | #define C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000 | ||
479 | #define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x006918 | ||
480 | #define S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
481 | #define G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
482 | #define C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000 | ||
483 | |||
484 | |||
485 | #define R_000001_MC_FB_LOCATION 0x000001 | ||
486 | #define S_000001_MC_FB_START(x) (((x) & 0xFFFF) << 0) | ||
487 | #define G_000001_MC_FB_START(x) (((x) >> 0) & 0xFFFF) | ||
488 | #define C_000001_MC_FB_START 0xFFFF0000 | ||
489 | #define S_000001_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) | ||
490 | #define G_000001_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) | ||
491 | #define C_000001_MC_FB_TOP 0x0000FFFF | ||
492 | #define R_000002_MC_AGP_LOCATION 0x000002 | ||
493 | #define S_000002_MC_AGP_START(x) (((x) & 0xFFFF) << 0) | ||
494 | #define G_000002_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) | ||
495 | #define C_000002_MC_AGP_START 0xFFFF0000 | ||
496 | #define S_000002_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) | ||
497 | #define G_000002_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) | ||
498 | #define C_000002_MC_AGP_TOP 0x0000FFFF | ||
499 | #define R_000003_MC_AGP_BASE 0x000003 | ||
500 | #define S_000003_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) | ||
501 | #define G_000003_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) | ||
502 | #define C_000003_AGP_BASE_ADDR 0x00000000 | ||
503 | #define R_000004_MC_AGP_BASE_2 0x000004 | ||
504 | #define S_000004_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) | ||
505 | #define G_000004_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) | ||
506 | #define C_000004_AGP_BASE_ADDR_2 0xFFFFFFF0 | ||
220 | 507 | ||
508 | |||
509 | #define R_00000F_CP_DYN_CNTL 0x00000F | ||
510 | #define S_00000F_CP_FORCEON(x) (((x) & 0x1) << 0) | ||
511 | #define G_00000F_CP_FORCEON(x) (((x) >> 0) & 0x1) | ||
512 | #define C_00000F_CP_FORCEON 0xFFFFFFFE | ||
513 | #define S_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1) | ||
514 | #define G_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1) | ||
515 | #define C_00000F_CP_MAX_DYN_STOP_LAT 0xFFFFFFFD | ||
516 | #define S_00000F_CP_CLOCK_STATUS(x) (((x) & 0x1) << 2) | ||
517 | #define G_00000F_CP_CLOCK_STATUS(x) (((x) >> 2) & 0x1) | ||
518 | #define C_00000F_CP_CLOCK_STATUS 0xFFFFFFFB | ||
519 | #define S_00000F_CP_PROG_SHUTOFF(x) (((x) & 0x1) << 3) | ||
520 | #define G_00000F_CP_PROG_SHUTOFF(x) (((x) >> 3) & 0x1) | ||
521 | #define C_00000F_CP_PROG_SHUTOFF 0xFFFFFFF7 | ||
522 | #define S_00000F_CP_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4) | ||
523 | #define G_00000F_CP_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF) | ||
524 | #define C_00000F_CP_PROG_DELAY_VALUE 0xFFFFF00F | ||
525 | #define S_00000F_CP_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12) | ||
526 | #define G_00000F_CP_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF) | ||
527 | #define C_00000F_CP_LOWER_POWER_IDLE 0xFFF00FFF | ||
528 | #define S_00000F_CP_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20) | ||
529 | #define G_00000F_CP_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1) | ||
530 | #define C_00000F_CP_LOWER_POWER_IGNORE 0xFFEFFFFF | ||
531 | #define S_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21) | ||
532 | #define G_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1) | ||
533 | #define C_00000F_CP_NORMAL_POWER_IGNORE 0xFFDFFFFF | ||
534 | #define S_00000F_SPARE(x) (((x) & 0x3) << 22) | ||
535 | #define G_00000F_SPARE(x) (((x) >> 22) & 0x3) | ||
536 | #define C_00000F_SPARE 0xFF3FFFFF | ||
537 | #define S_00000F_CP_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24) | ||
538 | #define G_00000F_CP_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF) | ||
539 | #define C_00000F_CP_NORMAL_POWER_BUSY 0x00FFFFFF | ||
540 | #define R_000011_E2_DYN_CNTL 0x000011 | ||
541 | #define S_000011_E2_FORCEON(x) (((x) & 0x1) << 0) | ||
542 | #define G_000011_E2_FORCEON(x) (((x) >> 0) & 0x1) | ||
543 | #define C_000011_E2_FORCEON 0xFFFFFFFE | ||
544 | #define S_000011_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1) | ||
545 | #define G_000011_E2_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1) | ||
546 | #define C_000011_E2_MAX_DYN_STOP_LAT 0xFFFFFFFD | ||
547 | #define S_000011_E2_CLOCK_STATUS(x) (((x) & 0x1) << 2) | ||
548 | #define G_000011_E2_CLOCK_STATUS(x) (((x) >> 2) & 0x1) | ||
549 | #define C_000011_E2_CLOCK_STATUS 0xFFFFFFFB | ||
550 | #define S_000011_E2_PROG_SHUTOFF(x) (((x) & 0x1) << 3) | ||
551 | #define G_000011_E2_PROG_SHUTOFF(x) (((x) >> 3) & 0x1) | ||
552 | #define C_000011_E2_PROG_SHUTOFF 0xFFFFFFF7 | ||
553 | #define S_000011_E2_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4) | ||
554 | #define G_000011_E2_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF) | ||
555 | #define C_000011_E2_PROG_DELAY_VALUE 0xFFFFF00F | ||
556 | #define S_000011_E2_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12) | ||
557 | #define G_000011_E2_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF) | ||
558 | #define C_000011_E2_LOWER_POWER_IDLE 0xFFF00FFF | ||
559 | #define S_000011_E2_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20) | ||
560 | #define G_000011_E2_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1) | ||
561 | #define C_000011_E2_LOWER_POWER_IGNORE 0xFFEFFFFF | ||
562 | #define S_000011_E2_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21) | ||
563 | #define G_000011_E2_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1) | ||
564 | #define C_000011_E2_NORMAL_POWER_IGNORE 0xFFDFFFFF | ||
565 | #define S_000011_SPARE(x) (((x) & 0x3) << 22) | ||
566 | #define G_000011_SPARE(x) (((x) >> 22) & 0x3) | ||
567 | #define C_000011_SPARE 0xFF3FFFFF | ||
568 | #define S_000011_E2_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24) | ||
569 | #define G_000011_E2_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF) | ||
570 | #define C_000011_E2_NORMAL_POWER_BUSY 0x00FFFFFF | ||
571 | #define R_000013_IDCT_DYN_CNTL 0x000013 | ||
572 | #define S_000013_IDCT_FORCEON(x) (((x) & 0x1) << 0) | ||
573 | #define G_000013_IDCT_FORCEON(x) (((x) >> 0) & 0x1) | ||
574 | #define C_000013_IDCT_FORCEON 0xFFFFFFFE | ||
575 | #define S_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1) | ||
576 | #define G_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1) | ||
577 | #define C_000013_IDCT_MAX_DYN_STOP_LAT 0xFFFFFFFD | ||
578 | #define S_000013_IDCT_CLOCK_STATUS(x) (((x) & 0x1) << 2) | ||
579 | #define G_000013_IDCT_CLOCK_STATUS(x) (((x) >> 2) & 0x1) | ||
580 | #define C_000013_IDCT_CLOCK_STATUS 0xFFFFFFFB | ||
581 | #define S_000013_IDCT_PROG_SHUTOFF(x) (((x) & 0x1) << 3) | ||
582 | #define G_000013_IDCT_PROG_SHUTOFF(x) (((x) >> 3) & 0x1) | ||
583 | #define C_000013_IDCT_PROG_SHUTOFF 0xFFFFFFF7 | ||
584 | #define S_000013_IDCT_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4) | ||
585 | #define G_000013_IDCT_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF) | ||
586 | #define C_000013_IDCT_PROG_DELAY_VALUE 0xFFFFF00F | ||
587 | #define S_000013_IDCT_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12) | ||
588 | #define G_000013_IDCT_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF) | ||
589 | #define C_000013_IDCT_LOWER_POWER_IDLE 0xFFF00FFF | ||
590 | #define S_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20) | ||
591 | #define G_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1) | ||
592 | #define C_000013_IDCT_LOWER_POWER_IGNORE 0xFFEFFFFF | ||
593 | #define S_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21) | ||
594 | #define G_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1) | ||
595 | #define C_000013_IDCT_NORMAL_POWER_IGNORE 0xFFDFFFFF | ||
596 | #define S_000013_SPARE(x) (((x) & 0x3) << 22) | ||
597 | #define G_000013_SPARE(x) (((x) >> 22) & 0x3) | ||
598 | #define C_000013_SPARE 0xFF3FFFFF | ||
599 | #define S_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24) | ||
600 | #define G_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF) | ||
601 | #define C_000013_IDCT_NORMAL_POWER_BUSY 0x00FFFFFF | ||
602 | |||
603 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b574c73a5109..e0b97d161397 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -31,8 +31,8 @@ | |||
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_drm.h" | 32 | #include "radeon_drm.h" |
33 | #include "rv770d.h" | 33 | #include "rv770d.h" |
34 | #include "avivod.h" | ||
35 | #include "atom.h" | 34 | #include "atom.h" |
35 | #include "avivod.h" | ||
36 | 36 | ||
37 | #define R700_PFP_UCODE_SIZE 848 | 37 | #define R700_PFP_UCODE_SIZE 848 |
38 | #define R700_PM4_UCODE_SIZE 1360 | 38 | #define R700_PM4_UCODE_SIZE 1360 |
@@ -231,7 +231,7 @@ static void rv770_mc_resume(struct radeon_device *rdev) | |||
231 | 231 | ||
232 | /* we need to own VRAM, so turn off the VGA renderer here | 232 | /* we need to own VRAM, so turn off the VGA renderer here |
233 | * to stop it overwriting our objects */ | 233 | * to stop it overwriting our objects */ |
234 | radeon_avivo_vga_render_disable(rdev); | 234 | rv515_vga_render_disable(rdev); |
235 | } | 235 | } |
236 | 236 | ||
237 | 237 | ||
@@ -801,6 +801,13 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
801 | /* Setup GPU memory space */ | 801 | /* Setup GPU memory space */ |
802 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 802 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
803 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 803 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
804 | |||
805 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
806 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
807 | |||
808 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
809 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
810 | |||
804 | if (rdev->flags & RADEON_IS_AGP) { | 811 | if (rdev->flags & RADEON_IS_AGP) { |
805 | r = radeon_agp_init(rdev); | 812 | r = radeon_agp_init(rdev); |
806 | if (r) | 813 | if (r) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 33de7637c0c6..1c040d040338 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -228,7 +228,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) | |||
228 | vma->vm_private_data = NULL; | 228 | vma->vm_private_data = NULL; |
229 | } | 229 | } |
230 | 230 | ||
231 | static struct vm_operations_struct ttm_bo_vm_ops = { | 231 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
232 | .fault = ttm_bo_vm_fault, | 232 | .fault = ttm_bo_vm_fault, |
233 | .open = ttm_bo_vm_open, | 233 | .open = ttm_bo_vm_open, |
234 | .close = ttm_bo_vm_close | 234 | .close = ttm_bo_vm_close |