aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Deucher <alexdeucher@gmail.com>2010-11-21 10:59:01 -0500
committerDave Airlie <airlied@redhat.com>2010-11-21 20:51:08 -0500
commit6f34be50bd1bdd2ff3c955940e033a80d05f248a (patch)
tree7e9635a2e589cd3a49490a4656611c112e485059
parentf5a8020903932624cf020dc72455a10a3e005087 (diff)
drm/radeon/kms: add pageflip ioctl support (v3)
This adds support for dri2 pageflipping. v2: precision updates from Mario Kleiner. v3: Multihead fixes from Mario Kleiner; missing crtc offset add note about update pending bit on pre-avivo chips Signed-off-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Mario Kleiner <mario.kleiner@tuebingen.mpg.de> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c301
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/r100.c74
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r600.c126
-rw-r--r--drivers/gpu/drm/radeon/r600d.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c261
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/rs600.c116
-rw-r--r--drivers/gpu/drm/radeon/rv770.c34
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h7
17 files changed, 900 insertions, 196 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4dc5b4714c5a..df3f37243222 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -40,6 +40,61 @@
40static void evergreen_gpu_init(struct radeon_device *rdev); 40static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 41void evergreen_fini(struct radeon_device *rdev);
42 42
43void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
44{
45 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
46 u32 tmp;
47
48 /* make sure flip is at vb rather than hb */
49 tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
50 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
51 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
52
53 /* set pageflip to happen anywhere in vblank interval */
54 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
55
56 /* enable the pflip int */
57 radeon_irq_kms_pflip_irq_get(rdev, crtc);
58}
59
60void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
61{
62 /* disable the pflip int */
63 radeon_irq_kms_pflip_irq_put(rdev, crtc);
64}
65
66u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
67{
68 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
69 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
70
71 /* Lock the graphics update lock */
72 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
73 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
74
75 /* update the scanout addresses */
76 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
77 upper_32_bits(crtc_base));
78 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
79 (u32)crtc_base);
80
81 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
82 upper_32_bits(crtc_base));
83 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
84 (u32)crtc_base);
85
86 /* Wait for update_pending to go high. */
87 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
88 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
89
90 /* Unlock the lock, so double-buffering can take place inside vblank */
91 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
92 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
93
94 /* Return current update_pending status: */
95 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
96}
97
43/* get temperature in millidegrees */ 98/* get temperature in millidegrees */
44u32 evergreen_get_temp(struct radeon_device *rdev) 99u32 evergreen_get_temp(struct radeon_device *rdev)
45{ 100{
@@ -2060,6 +2115,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2060 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 2115 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2061 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 2116 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2062 u32 grbm_int_cntl = 0; 2117 u32 grbm_int_cntl = 0;
2118 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2063 2119
2064 if (!rdev->irq.installed) { 2120 if (!rdev->irq.installed) {
2065 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2121 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2085,27 +2141,33 @@ int evergreen_irq_set(struct radeon_device *rdev)
2085 cp_int_cntl |= RB_INT_ENABLE; 2141 cp_int_cntl |= RB_INT_ENABLE;
2086 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2142 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2087 } 2143 }
2088 if (rdev->irq.crtc_vblank_int[0]) { 2144 if (rdev->irq.crtc_vblank_int[0] ||
2145 rdev->irq.pflip[0]) {
2089 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2146 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2090 crtc1 |= VBLANK_INT_MASK; 2147 crtc1 |= VBLANK_INT_MASK;
2091 } 2148 }
2092 if (rdev->irq.crtc_vblank_int[1]) { 2149 if (rdev->irq.crtc_vblank_int[1] ||
2150 rdev->irq.pflip[1]) {
2093 DRM_DEBUG("evergreen_irq_set: vblank 1\n"); 2151 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2094 crtc2 |= VBLANK_INT_MASK; 2152 crtc2 |= VBLANK_INT_MASK;
2095 } 2153 }
2096 if (rdev->irq.crtc_vblank_int[2]) { 2154 if (rdev->irq.crtc_vblank_int[2] ||
2155 rdev->irq.pflip[2]) {
2097 DRM_DEBUG("evergreen_irq_set: vblank 2\n"); 2156 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2098 crtc3 |= VBLANK_INT_MASK; 2157 crtc3 |= VBLANK_INT_MASK;
2099 } 2158 }
2100 if (rdev->irq.crtc_vblank_int[3]) { 2159 if (rdev->irq.crtc_vblank_int[3] ||
2160 rdev->irq.pflip[3]) {
2101 DRM_DEBUG("evergreen_irq_set: vblank 3\n"); 2161 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2102 crtc4 |= VBLANK_INT_MASK; 2162 crtc4 |= VBLANK_INT_MASK;
2103 } 2163 }
2104 if (rdev->irq.crtc_vblank_int[4]) { 2164 if (rdev->irq.crtc_vblank_int[4] ||
2165 rdev->irq.pflip[4]) {
2105 DRM_DEBUG("evergreen_irq_set: vblank 4\n"); 2166 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2106 crtc5 |= VBLANK_INT_MASK; 2167 crtc5 |= VBLANK_INT_MASK;
2107 } 2168 }
2108 if (rdev->irq.crtc_vblank_int[5]) { 2169 if (rdev->irq.crtc_vblank_int[5] ||
2170 rdev->irq.pflip[5]) {
2109 DRM_DEBUG("evergreen_irq_set: vblank 5\n"); 2171 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2110 crtc6 |= VBLANK_INT_MASK; 2172 crtc6 |= VBLANK_INT_MASK;
2111 } 2173 }
@@ -2148,6 +2210,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
2148 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); 2210 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2149 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 2211 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2150 2212
2213 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2214 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2215 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2216 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2217 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2218 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2219
2151 WREG32(DC_HPD1_INT_CONTROL, hpd1); 2220 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2152 WREG32(DC_HPD2_INT_CONTROL, hpd2); 2221 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2153 WREG32(DC_HPD3_INT_CONTROL, hpd3); 2222 WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2158,79 +2227,92 @@ int evergreen_irq_set(struct radeon_device *rdev)
2158 return 0; 2227 return 0;
2159} 2228}
2160 2229
2161static inline void evergreen_irq_ack(struct radeon_device *rdev, 2230static inline void evergreen_irq_ack(struct radeon_device *rdev)
2162 u32 *disp_int,
2163 u32 *disp_int_cont,
2164 u32 *disp_int_cont2,
2165 u32 *disp_int_cont3,
2166 u32 *disp_int_cont4,
2167 u32 *disp_int_cont5)
2168{ 2231{
2169 u32 tmp; 2232 u32 tmp;
2170 2233
2171 *disp_int = RREG32(DISP_INTERRUPT_STATUS); 2234 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2172 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 2235 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2173 *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); 2236 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2174 *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); 2237 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2175 *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); 2238 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2176 *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 2239 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2177 2240 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2178 if (*disp_int & LB_D1_VBLANK_INTERRUPT) 2241 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2242 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2243 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2244 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2245 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2246
2247 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2248 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2249 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2250 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2251 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2252 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2253 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2254 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2255 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2256 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2257 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2258 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2259
2260 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2179 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 2261 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2180 if (*disp_int & LB_D1_VLINE_INTERRUPT) 2262 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2181 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); 2263 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2182 2264
2183 if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) 2265 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2184 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); 2266 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2185 if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) 2267 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2186 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 2268 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2187 2269
2188 if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 2270 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2189 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 2271 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2190 if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 2272 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2191 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); 2273 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2192 2274
2193 if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) 2275 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2194 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); 2276 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2195 if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) 2277 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2196 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); 2278 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2197 2279
2198 if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 2280 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2199 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 2281 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2200 if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 2282 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2201 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); 2283 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2202 2284
2203 if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) 2285 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2204 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); 2286 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2205 if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) 2287 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2206 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); 2288 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2207 2289
2208 if (*disp_int & DC_HPD1_INTERRUPT) { 2290 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2209 tmp = RREG32(DC_HPD1_INT_CONTROL); 2291 tmp = RREG32(DC_HPD1_INT_CONTROL);
2210 tmp |= DC_HPDx_INT_ACK; 2292 tmp |= DC_HPDx_INT_ACK;
2211 WREG32(DC_HPD1_INT_CONTROL, tmp); 2293 WREG32(DC_HPD1_INT_CONTROL, tmp);
2212 } 2294 }
2213 if (*disp_int_cont & DC_HPD2_INTERRUPT) { 2295 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2214 tmp = RREG32(DC_HPD2_INT_CONTROL); 2296 tmp = RREG32(DC_HPD2_INT_CONTROL);
2215 tmp |= DC_HPDx_INT_ACK; 2297 tmp |= DC_HPDx_INT_ACK;
2216 WREG32(DC_HPD2_INT_CONTROL, tmp); 2298 WREG32(DC_HPD2_INT_CONTROL, tmp);
2217 } 2299 }
2218 if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { 2300 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2219 tmp = RREG32(DC_HPD3_INT_CONTROL); 2301 tmp = RREG32(DC_HPD3_INT_CONTROL);
2220 tmp |= DC_HPDx_INT_ACK; 2302 tmp |= DC_HPDx_INT_ACK;
2221 WREG32(DC_HPD3_INT_CONTROL, tmp); 2303 WREG32(DC_HPD3_INT_CONTROL, tmp);
2222 } 2304 }
2223 if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { 2305 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2224 tmp = RREG32(DC_HPD4_INT_CONTROL); 2306 tmp = RREG32(DC_HPD4_INT_CONTROL);
2225 tmp |= DC_HPDx_INT_ACK; 2307 tmp |= DC_HPDx_INT_ACK;
2226 WREG32(DC_HPD4_INT_CONTROL, tmp); 2308 WREG32(DC_HPD4_INT_CONTROL, tmp);
2227 } 2309 }
2228 if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { 2310 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2229 tmp = RREG32(DC_HPD5_INT_CONTROL); 2311 tmp = RREG32(DC_HPD5_INT_CONTROL);
2230 tmp |= DC_HPDx_INT_ACK; 2312 tmp |= DC_HPDx_INT_ACK;
2231 WREG32(DC_HPD5_INT_CONTROL, tmp); 2313 WREG32(DC_HPD5_INT_CONTROL, tmp);
2232 } 2314 }
2233 if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { 2315 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2234 tmp = RREG32(DC_HPD5_INT_CONTROL); 2316 tmp = RREG32(DC_HPD5_INT_CONTROL);
2235 tmp |= DC_HPDx_INT_ACK; 2317 tmp |= DC_HPDx_INT_ACK;
2236 WREG32(DC_HPD6_INT_CONTROL, tmp); 2318 WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2239,14 +2321,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev,
2239 2321
2240void evergreen_irq_disable(struct radeon_device *rdev) 2322void evergreen_irq_disable(struct radeon_device *rdev)
2241{ 2323{
2242 u32 disp_int, disp_int_cont, disp_int_cont2;
2243 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
2244
2245 r600_disable_interrupts(rdev); 2324 r600_disable_interrupts(rdev);
2246 /* Wait and acknowledge irq */ 2325 /* Wait and acknowledge irq */
2247 mdelay(1); 2326 mdelay(1);
2248 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, 2327 evergreen_irq_ack(rdev);
2249 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
2250 evergreen_disable_interrupt_state(rdev); 2328 evergreen_disable_interrupt_state(rdev);
2251} 2329}
2252 2330
@@ -2286,8 +2364,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
2286 u32 rptr = rdev->ih.rptr; 2364 u32 rptr = rdev->ih.rptr;
2287 u32 src_id, src_data; 2365 u32 src_id, src_data;
2288 u32 ring_index; 2366 u32 ring_index;
2289 u32 disp_int, disp_int_cont, disp_int_cont2;
2290 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
2291 unsigned long flags; 2367 unsigned long flags;
2292 bool queue_hotplug = false; 2368 bool queue_hotplug = false;
2293 2369
@@ -2308,8 +2384,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
2308 2384
2309restart_ih: 2385restart_ih:
2310 /* display interrupts */ 2386 /* display interrupts */
2311 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, 2387 evergreen_irq_ack(rdev);
2312 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
2313 2388
2314 rdev->ih.wptr = wptr; 2389 rdev->ih.wptr = wptr;
2315 while (rptr != wptr) { 2390 while (rptr != wptr) {
@@ -2322,17 +2397,21 @@ restart_ih:
2322 case 1: /* D1 vblank/vline */ 2397 case 1: /* D1 vblank/vline */
2323 switch (src_data) { 2398 switch (src_data) {
2324 case 0: /* D1 vblank */ 2399 case 0: /* D1 vblank */
2325 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2400 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
2326 drm_handle_vblank(rdev->ddev, 0); 2401 if (rdev->irq.pflip[0])
2327 rdev->pm.vblank_sync = true; 2402 radeon_crtc_handle_flip(rdev, 0);
2328 wake_up(&rdev->irq.vblank_queue); 2403 if (rdev->irq.crtc_vblank_int[0]) {
2329 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2404 drm_handle_vblank(rdev->ddev, 0);
2405 rdev->pm.vblank_sync = true;
2406 wake_up(&rdev->irq.vblank_queue);
2407 }
2408 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2330 DRM_DEBUG("IH: D1 vblank\n"); 2409 DRM_DEBUG("IH: D1 vblank\n");
2331 } 2410 }
2332 break; 2411 break;
2333 case 1: /* D1 vline */ 2412 case 1: /* D1 vline */
2334 if (disp_int & LB_D1_VLINE_INTERRUPT) { 2413 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2335 disp_int &= ~LB_D1_VLINE_INTERRUPT; 2414 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
2336 DRM_DEBUG("IH: D1 vline\n"); 2415 DRM_DEBUG("IH: D1 vline\n");
2337 } 2416 }
2338 break; 2417 break;
@@ -2344,17 +2423,21 @@ restart_ih:
2344 case 2: /* D2 vblank/vline */ 2423 case 2: /* D2 vblank/vline */
2345 switch (src_data) { 2424 switch (src_data) {
2346 case 0: /* D2 vblank */ 2425 case 0: /* D2 vblank */
2347 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2426 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2348 drm_handle_vblank(rdev->ddev, 1); 2427 if (rdev->irq.pflip[1])
2349 rdev->pm.vblank_sync = true; 2428 radeon_crtc_handle_flip(rdev, 1);
2350 wake_up(&rdev->irq.vblank_queue); 2429 if (rdev->irq.crtc_vblank_int[1]) {
2351 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2430 drm_handle_vblank(rdev->ddev, 1);
2431 rdev->pm.vblank_sync = true;
2432 wake_up(&rdev->irq.vblank_queue);
2433 }
2434 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2352 DRM_DEBUG("IH: D2 vblank\n"); 2435 DRM_DEBUG("IH: D2 vblank\n");
2353 } 2436 }
2354 break; 2437 break;
2355 case 1: /* D2 vline */ 2438 case 1: /* D2 vline */
2356 if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { 2439 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2357 disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 2440 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
2358 DRM_DEBUG("IH: D2 vline\n"); 2441 DRM_DEBUG("IH: D2 vline\n");
2359 } 2442 }
2360 break; 2443 break;
@@ -2366,17 +2449,21 @@ restart_ih:
2366 case 3: /* D3 vblank/vline */ 2449 case 3: /* D3 vblank/vline */
2367 switch (src_data) { 2450 switch (src_data) {
2368 case 0: /* D3 vblank */ 2451 case 0: /* D3 vblank */
2369 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2452 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2370 drm_handle_vblank(rdev->ddev, 2); 2453 if (rdev->irq.crtc_vblank_int[2]) {
2371 rdev->pm.vblank_sync = true; 2454 drm_handle_vblank(rdev->ddev, 2);
2372 wake_up(&rdev->irq.vblank_queue); 2455 rdev->pm.vblank_sync = true;
2373 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2456 wake_up(&rdev->irq.vblank_queue);
2457 }
2458 if (rdev->irq.pflip[2])
2459 radeon_crtc_handle_flip(rdev, 2);
2460 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2374 DRM_DEBUG("IH: D3 vblank\n"); 2461 DRM_DEBUG("IH: D3 vblank\n");
2375 } 2462 }
2376 break; 2463 break;
2377 case 1: /* D3 vline */ 2464 case 1: /* D3 vline */
2378 if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 2465 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
2379 disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 2466 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
2380 DRM_DEBUG("IH: D3 vline\n"); 2467 DRM_DEBUG("IH: D3 vline\n");
2381 } 2468 }
2382 break; 2469 break;
@@ -2388,17 +2475,21 @@ restart_ih:
2388 case 4: /* D4 vblank/vline */ 2475 case 4: /* D4 vblank/vline */
2389 switch (src_data) { 2476 switch (src_data) {
2390 case 0: /* D4 vblank */ 2477 case 0: /* D4 vblank */
2391 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2478 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2392 drm_handle_vblank(rdev->ddev, 3); 2479 if (rdev->irq.crtc_vblank_int[3]) {
2393 rdev->pm.vblank_sync = true; 2480 drm_handle_vblank(rdev->ddev, 3);
2394 wake_up(&rdev->irq.vblank_queue); 2481 rdev->pm.vblank_sync = true;
2395 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2482 wake_up(&rdev->irq.vblank_queue);
2483 }
2484 if (rdev->irq.pflip[3])
2485 radeon_crtc_handle_flip(rdev, 3);
2486 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2396 DRM_DEBUG("IH: D4 vblank\n"); 2487 DRM_DEBUG("IH: D4 vblank\n");
2397 } 2488 }
2398 break; 2489 break;
2399 case 1: /* D4 vline */ 2490 case 1: /* D4 vline */
2400 if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 2491 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
2401 disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 2492 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
2402 DRM_DEBUG("IH: D4 vline\n"); 2493 DRM_DEBUG("IH: D4 vline\n");
2403 } 2494 }
2404 break; 2495 break;
@@ -2410,17 +2501,21 @@ restart_ih:
2410 case 5: /* D5 vblank/vline */ 2501 case 5: /* D5 vblank/vline */
2411 switch (src_data) { 2502 switch (src_data) {
2412 case 0: /* D5 vblank */ 2503 case 0: /* D5 vblank */
2413 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2504 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2414 drm_handle_vblank(rdev->ddev, 4); 2505 if (rdev->irq.crtc_vblank_int[4]) {
2415 rdev->pm.vblank_sync = true; 2506 drm_handle_vblank(rdev->ddev, 4);
2416 wake_up(&rdev->irq.vblank_queue); 2507 rdev->pm.vblank_sync = true;
2417 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2508 wake_up(&rdev->irq.vblank_queue);
2509 }
2510 if (rdev->irq.pflip[4])
2511 radeon_crtc_handle_flip(rdev, 4);
2512 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2418 DRM_DEBUG("IH: D5 vblank\n"); 2513 DRM_DEBUG("IH: D5 vblank\n");
2419 } 2514 }
2420 break; 2515 break;
2421 case 1: /* D5 vline */ 2516 case 1: /* D5 vline */
2422 if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 2517 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
2423 disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 2518 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
2424 DRM_DEBUG("IH: D5 vline\n"); 2519 DRM_DEBUG("IH: D5 vline\n");
2425 } 2520 }
2426 break; 2521 break;
@@ -2432,17 +2527,21 @@ restart_ih:
2432 case 6: /* D6 vblank/vline */ 2527 case 6: /* D6 vblank/vline */
2433 switch (src_data) { 2528 switch (src_data) {
2434 case 0: /* D6 vblank */ 2529 case 0: /* D6 vblank */
2435 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2530 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2436 drm_handle_vblank(rdev->ddev, 5); 2531 if (rdev->irq.crtc_vblank_int[5]) {
2437 rdev->pm.vblank_sync = true; 2532 drm_handle_vblank(rdev->ddev, 5);
2438 wake_up(&rdev->irq.vblank_queue); 2533 rdev->pm.vblank_sync = true;
2439 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2534 wake_up(&rdev->irq.vblank_queue);
2535 }
2536 if (rdev->irq.pflip[5])
2537 radeon_crtc_handle_flip(rdev, 5);
2538 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2440 DRM_DEBUG("IH: D6 vblank\n"); 2539 DRM_DEBUG("IH: D6 vblank\n");
2441 } 2540 }
2442 break; 2541 break;
2443 case 1: /* D6 vline */ 2542 case 1: /* D6 vline */
2444 if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 2543 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
2445 disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 2544 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
2446 DRM_DEBUG("IH: D6 vline\n"); 2545 DRM_DEBUG("IH: D6 vline\n");
2447 } 2546 }
2448 break; 2547 break;
@@ -2454,43 +2553,43 @@ restart_ih:
2454 case 42: /* HPD hotplug */ 2553 case 42: /* HPD hotplug */
2455 switch (src_data) { 2554 switch (src_data) {
2456 case 0: 2555 case 0:
2457 if (disp_int & DC_HPD1_INTERRUPT) { 2556 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2458 disp_int &= ~DC_HPD1_INTERRUPT; 2557 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
2459 queue_hotplug = true; 2558 queue_hotplug = true;
2460 DRM_DEBUG("IH: HPD1\n"); 2559 DRM_DEBUG("IH: HPD1\n");
2461 } 2560 }
2462 break; 2561 break;
2463 case 1: 2562 case 1:
2464 if (disp_int_cont & DC_HPD2_INTERRUPT) { 2563 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2465 disp_int_cont &= ~DC_HPD2_INTERRUPT; 2564 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
2466 queue_hotplug = true; 2565 queue_hotplug = true;
2467 DRM_DEBUG("IH: HPD2\n"); 2566 DRM_DEBUG("IH: HPD2\n");
2468 } 2567 }
2469 break; 2568 break;
2470 case 2: 2569 case 2:
2471 if (disp_int_cont2 & DC_HPD3_INTERRUPT) { 2570 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2472 disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 2571 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
2473 queue_hotplug = true; 2572 queue_hotplug = true;
2474 DRM_DEBUG("IH: HPD3\n"); 2573 DRM_DEBUG("IH: HPD3\n");
2475 } 2574 }
2476 break; 2575 break;
2477 case 3: 2576 case 3:
2478 if (disp_int_cont3 & DC_HPD4_INTERRUPT) { 2577 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2479 disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 2578 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
2480 queue_hotplug = true; 2579 queue_hotplug = true;
2481 DRM_DEBUG("IH: HPD4\n"); 2580 DRM_DEBUG("IH: HPD4\n");
2482 } 2581 }
2483 break; 2582 break;
2484 case 4: 2583 case 4:
2485 if (disp_int_cont4 & DC_HPD5_INTERRUPT) { 2584 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2486 disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 2585 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
2487 queue_hotplug = true; 2586 queue_hotplug = true;
2488 DRM_DEBUG("IH: HPD5\n"); 2587 DRM_DEBUG("IH: HPD5\n");
2489 } 2588 }
2490 break; 2589 break;
2491 case 5: 2590 case 5:
2492 if (disp_int_cont5 & DC_HPD6_INTERRUPT) { 2591 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2493 disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 2592 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
2494 queue_hotplug = true; 2593 queue_hotplug = true;
2495 DRM_DEBUG("IH: HPD6\n"); 2594 DRM_DEBUG("IH: HPD6\n");
2496 } 2595 }
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a36fd5..c781c92c3451 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
105#define EVERGREEN_GRPH_Y_START 0x6830 105#define EVERGREEN_GRPH_Y_START 0x6830
106#define EVERGREEN_GRPH_X_END 0x6834 106#define EVERGREEN_GRPH_X_END 0x6834
107#define EVERGREEN_GRPH_Y_END 0x6838 107#define EVERGREEN_GRPH_Y_END 0x6838
108#define EVERGREEN_GRPH_UPDATE 0x6844
109# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
110# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
111#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
112# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
108 113
109/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ 114/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
110#define EVERGREEN_CUR_CONTROL 0x6998 115#define EVERGREEN_CUR_CONTROL 0x6998
@@ -178,6 +183,7 @@
178# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) 183# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
179#define EVERGREEN_CRTC_STATUS 0x6e8c 184#define EVERGREEN_CRTC_STATUS 0x6e8c
180#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 185#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
186#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
181#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 187#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
182 188
183#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 189#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9f74b0..b2e29798a99d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,54 @@ MODULE_FIRMWARE(FIRMWARE_R520);
68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
69 */ 69 */
70 70
71void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
72{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
74 u32 tmp;
75
76 /* make sure flip is at vb rather than hb */
77 tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
78 tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
79 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
80
81 /* set pageflip to happen as late as possible in the vblank interval.
82 * same field for crtc1/2
83 */
84 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
85 tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
86 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
87
88 /* enable the pflip int */
89 radeon_irq_kms_pflip_irq_get(rdev, crtc);
90}
91
92void r100_post_page_flip(struct radeon_device *rdev, int crtc)
93{
94 /* disable the pflip int */
95 radeon_irq_kms_pflip_irq_put(rdev, crtc);
96}
97
98u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
99{
100 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
101 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
102
103 /* Lock the graphics update lock */
104 /* update the scanout addresses */
105 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
106
107 /* Note: We don't wait for update_pending to assert, as this never
108 * happens for some reason on R1xx - R4xx. Adds a bit of imprecision.
109 */
110
111 /* Unlock the lock, so double-buffering can take place inside vblank */
112 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
113 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
114
115 /* Return current update_pending status: */
116 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
117}
118
71void r100_pm_get_dynpm_state(struct radeon_device *rdev) 119void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72{ 120{
73 int i; 121 int i;
@@ -526,10 +574,12 @@ int r100_irq_set(struct radeon_device *rdev)
526 if (rdev->irq.gui_idle) { 574 if (rdev->irq.gui_idle) {
527 tmp |= RADEON_GUI_IDLE_MASK; 575 tmp |= RADEON_GUI_IDLE_MASK;
528 } 576 }
529 if (rdev->irq.crtc_vblank_int[0]) { 577 if (rdev->irq.crtc_vblank_int[0] ||
578 rdev->irq.pflip[0]) {
530 tmp |= RADEON_CRTC_VBLANK_MASK; 579 tmp |= RADEON_CRTC_VBLANK_MASK;
531 } 580 }
532 if (rdev->irq.crtc_vblank_int[1]) { 581 if (rdev->irq.crtc_vblank_int[1] ||
582 rdev->irq.pflip[1]) {
533 tmp |= RADEON_CRTC2_VBLANK_MASK; 583 tmp |= RADEON_CRTC2_VBLANK_MASK;
534 } 584 }
535 if (rdev->irq.hpd[0]) { 585 if (rdev->irq.hpd[0]) {
@@ -600,14 +650,22 @@ int r100_irq_process(struct radeon_device *rdev)
600 } 650 }
601 /* Vertical blank interrupts */ 651 /* Vertical blank interrupts */
602 if (status & RADEON_CRTC_VBLANK_STAT) { 652 if (status & RADEON_CRTC_VBLANK_STAT) {
603 drm_handle_vblank(rdev->ddev, 0); 653 if (rdev->irq.pflip[0])
604 rdev->pm.vblank_sync = true; 654 radeon_crtc_handle_flip(rdev, 0);
605 wake_up(&rdev->irq.vblank_queue); 655 if (rdev->irq.crtc_vblank_int[0]) {
656 drm_handle_vblank(rdev->ddev, 0);
657 rdev->pm.vblank_sync = true;
658 wake_up(&rdev->irq.vblank_queue);
659 }
606 } 660 }
607 if (status & RADEON_CRTC2_VBLANK_STAT) { 661 if (status & RADEON_CRTC2_VBLANK_STAT) {
608 drm_handle_vblank(rdev->ddev, 1); 662 if (rdev->irq.pflip[1])
609 rdev->pm.vblank_sync = true; 663 radeon_crtc_handle_flip(rdev, 1);
610 wake_up(&rdev->irq.vblank_queue); 664 if (rdev->irq.crtc_vblank_int[1]) {
665 drm_handle_vblank(rdev->ddev, 1);
666 rdev->pm.vblank_sync = true;
667 wake_up(&rdev->irq.vblank_queue);
668 }
611 } 669 }
612 if (status & RADEON_FP_DETECT_STAT) { 670 if (status & RADEON_FP_DETECT_STAT) {
613 queue_hotplug = true; 671 queue_hotplug = true;
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f604e29b..fc437059918f 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
355#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 355#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
356#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 356#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
357 357
358#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
359
358/* master controls */ 360/* master controls */
359#define AVIVO_DC_CRTC_MASTER_EN 0x60f8 361#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
360#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc 362#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
@@ -409,8 +411,10 @@
409#define AVIVO_D1GRPH_X_END 0x6134 411#define AVIVO_D1GRPH_X_END 0x6134
410#define AVIVO_D1GRPH_Y_END 0x6138 412#define AVIVO_D1GRPH_Y_END 0x6138
411#define AVIVO_D1GRPH_UPDATE 0x6144 413#define AVIVO_D1GRPH_UPDATE 0x6144
414# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
412# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) 415# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
413#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 416#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
417# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
414 418
415#define AVIVO_D1CUR_CONTROL 0x6400 419#define AVIVO_D1CUR_CONTROL 0x6400
416# define AVIVO_D1CURSOR_EN (1 << 0) 420# define AVIVO_D1CURSOR_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a3552594ccc4..15b95724c408 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2863,6 +2863,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2863 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2863 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2864 WREG32(GRBM_INT_CNTL, 0); 2864 WREG32(GRBM_INT_CNTL, 0);
2865 WREG32(DxMODE_INT_MASK, 0); 2865 WREG32(DxMODE_INT_MASK, 0);
2866 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2867 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2866 if (ASIC_IS_DCE3(rdev)) { 2868 if (ASIC_IS_DCE3(rdev)) {
2867 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 2869 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2868 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 2870 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2987,6 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
2987 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 2989 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2988 u32 grbm_int_cntl = 0; 2990 u32 grbm_int_cntl = 0;
2989 u32 hdmi1, hdmi2; 2991 u32 hdmi1, hdmi2;
2992 u32 d1grph = 0, d2grph = 0;
2990 2993
2991 if (!rdev->irq.installed) { 2994 if (!rdev->irq.installed) {
2992 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2995 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3023,11 +3026,13 @@ int r600_irq_set(struct radeon_device *rdev)
3023 cp_int_cntl |= RB_INT_ENABLE; 3026 cp_int_cntl |= RB_INT_ENABLE;
3024 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3027 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3025 } 3028 }
3026 if (rdev->irq.crtc_vblank_int[0]) { 3029 if (rdev->irq.crtc_vblank_int[0] ||
3030 rdev->irq.pflip[0]) {
3027 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3031 DRM_DEBUG("r600_irq_set: vblank 0\n");
3028 mode_int |= D1MODE_VBLANK_INT_MASK; 3032 mode_int |= D1MODE_VBLANK_INT_MASK;
3029 } 3033 }
3030 if (rdev->irq.crtc_vblank_int[1]) { 3034 if (rdev->irq.crtc_vblank_int[1] ||
3035 rdev->irq.pflip[1]) {
3031 DRM_DEBUG("r600_irq_set: vblank 1\n"); 3036 DRM_DEBUG("r600_irq_set: vblank 1\n");
3032 mode_int |= D2MODE_VBLANK_INT_MASK; 3037 mode_int |= D2MODE_VBLANK_INT_MASK;
3033 } 3038 }
@@ -3070,6 +3075,8 @@ int r600_irq_set(struct radeon_device *rdev)
3070 3075
3071 WREG32(CP_INT_CNTL, cp_int_cntl); 3076 WREG32(CP_INT_CNTL, cp_int_cntl);
3072 WREG32(DxMODE_INT_MASK, mode_int); 3077 WREG32(DxMODE_INT_MASK, mode_int);
3078 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3079 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3073 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3080 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3074 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); 3081 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3075 if (ASIC_IS_DCE3(rdev)) { 3082 if (ASIC_IS_DCE3(rdev)) {
@@ -3092,32 +3099,35 @@ int r600_irq_set(struct radeon_device *rdev)
3092 return 0; 3099 return 0;
3093} 3100}
3094 3101
3095static inline void r600_irq_ack(struct radeon_device *rdev, 3102static inline void r600_irq_ack(struct radeon_device *rdev)
3096 u32 *disp_int,
3097 u32 *disp_int_cont,
3098 u32 *disp_int_cont2)
3099{ 3103{
3100 u32 tmp; 3104 u32 tmp;
3101 3105
3102 if (ASIC_IS_DCE3(rdev)) { 3106 if (ASIC_IS_DCE3(rdev)) {
3103 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3107 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3104 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3108 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3105 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3109 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3106 } else { 3110 } else {
3107 *disp_int = RREG32(DISP_INTERRUPT_STATUS); 3111 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3108 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3112 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3109 *disp_int_cont2 = 0; 3113 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3110 } 3114 }
3111 3115 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3112 if (*disp_int & LB_D1_VBLANK_INTERRUPT) 3116 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3117
3118 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3119 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3120 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3121 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3122 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3113 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3123 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3114 if (*disp_int & LB_D1_VLINE_INTERRUPT) 3124 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3115 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3125 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3116 if (*disp_int & LB_D2_VBLANK_INTERRUPT) 3126 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3117 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3127 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3118 if (*disp_int & LB_D2_VLINE_INTERRUPT) 3128 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3119 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3129 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3120 if (*disp_int & DC_HPD1_INTERRUPT) { 3130 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3121 if (ASIC_IS_DCE3(rdev)) { 3131 if (ASIC_IS_DCE3(rdev)) {
3122 tmp = RREG32(DC_HPD1_INT_CONTROL); 3132 tmp = RREG32(DC_HPD1_INT_CONTROL);
3123 tmp |= DC_HPDx_INT_ACK; 3133 tmp |= DC_HPDx_INT_ACK;
@@ -3128,7 +3138,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3128 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3138 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3129 } 3139 }
3130 } 3140 }
3131 if (*disp_int & DC_HPD2_INTERRUPT) { 3141 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3132 if (ASIC_IS_DCE3(rdev)) { 3142 if (ASIC_IS_DCE3(rdev)) {
3133 tmp = RREG32(DC_HPD2_INT_CONTROL); 3143 tmp = RREG32(DC_HPD2_INT_CONTROL);
3134 tmp |= DC_HPDx_INT_ACK; 3144 tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3149,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3139 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3149 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3140 } 3150 }
3141 } 3151 }
3142 if (*disp_int_cont & DC_HPD3_INTERRUPT) { 3152 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3143 if (ASIC_IS_DCE3(rdev)) { 3153 if (ASIC_IS_DCE3(rdev)) {
3144 tmp = RREG32(DC_HPD3_INT_CONTROL); 3154 tmp = RREG32(DC_HPD3_INT_CONTROL);
3145 tmp |= DC_HPDx_INT_ACK; 3155 tmp |= DC_HPDx_INT_ACK;
@@ -3150,18 +3160,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3150 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3160 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3151 } 3161 }
3152 } 3162 }
3153 if (*disp_int_cont & DC_HPD4_INTERRUPT) { 3163 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3154 tmp = RREG32(DC_HPD4_INT_CONTROL); 3164 tmp = RREG32(DC_HPD4_INT_CONTROL);
3155 tmp |= DC_HPDx_INT_ACK; 3165 tmp |= DC_HPDx_INT_ACK;
3156 WREG32(DC_HPD4_INT_CONTROL, tmp); 3166 WREG32(DC_HPD4_INT_CONTROL, tmp);
3157 } 3167 }
3158 if (ASIC_IS_DCE32(rdev)) { 3168 if (ASIC_IS_DCE32(rdev)) {
3159 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { 3169 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3160 tmp = RREG32(DC_HPD5_INT_CONTROL); 3170 tmp = RREG32(DC_HPD5_INT_CONTROL);
3161 tmp |= DC_HPDx_INT_ACK; 3171 tmp |= DC_HPDx_INT_ACK;
3162 WREG32(DC_HPD5_INT_CONTROL, tmp); 3172 WREG32(DC_HPD5_INT_CONTROL, tmp);
3163 } 3173 }
3164 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { 3174 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3165 tmp = RREG32(DC_HPD5_INT_CONTROL); 3175 tmp = RREG32(DC_HPD5_INT_CONTROL);
3166 tmp |= DC_HPDx_INT_ACK; 3176 tmp |= DC_HPDx_INT_ACK;
3167 WREG32(DC_HPD6_INT_CONTROL, tmp); 3177 WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3183,12 +3193,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3183 3193
3184void r600_irq_disable(struct radeon_device *rdev) 3194void r600_irq_disable(struct radeon_device *rdev)
3185{ 3195{
3186 u32 disp_int, disp_int_cont, disp_int_cont2;
3187
3188 r600_disable_interrupts(rdev); 3196 r600_disable_interrupts(rdev);
3189 /* Wait and acknowledge irq */ 3197 /* Wait and acknowledge irq */
3190 mdelay(1); 3198 mdelay(1);
3191 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); 3199 r600_irq_ack(rdev);
3192 r600_disable_interrupt_state(rdev); 3200 r600_disable_interrupt_state(rdev);
3193} 3201}
3194 3202
@@ -3251,7 +3259,7 @@ int r600_irq_process(struct radeon_device *rdev)
3251 u32 wptr = r600_get_ih_wptr(rdev); 3259 u32 wptr = r600_get_ih_wptr(rdev);
3252 u32 rptr = rdev->ih.rptr; 3260 u32 rptr = rdev->ih.rptr;
3253 u32 src_id, src_data; 3261 u32 src_id, src_data;
3254 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 3262 u32 ring_index;
3255 unsigned long flags; 3263 unsigned long flags;
3256 bool queue_hotplug = false; 3264 bool queue_hotplug = false;
3257 3265
@@ -3272,7 +3280,7 @@ int r600_irq_process(struct radeon_device *rdev)
3272 3280
3273restart_ih: 3281restart_ih:
3274 /* display interrupts */ 3282 /* display interrupts */
3275 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); 3283 r600_irq_ack(rdev);
3276 3284
3277 rdev->ih.wptr = wptr; 3285 rdev->ih.wptr = wptr;
3278 while (rptr != wptr) { 3286 while (rptr != wptr) {
@@ -3285,17 +3293,21 @@ restart_ih:
3285 case 1: /* D1 vblank/vline */ 3293 case 1: /* D1 vblank/vline */
3286 switch (src_data) { 3294 switch (src_data) {
3287 case 0: /* D1 vblank */ 3295 case 0: /* D1 vblank */
3288 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 3296 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3289 drm_handle_vblank(rdev->ddev, 0); 3297 if (rdev->irq.pflip[0])
3290 rdev->pm.vblank_sync = true; 3298 radeon_crtc_handle_flip(rdev, 0);
3291 wake_up(&rdev->irq.vblank_queue); 3299 if (rdev->irq.crtc_vblank_int[0]) {
3292 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 3300 drm_handle_vblank(rdev->ddev, 0);
3301 rdev->pm.vblank_sync = true;
3302 wake_up(&rdev->irq.vblank_queue);
3303 }
3304 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3293 DRM_DEBUG("IH: D1 vblank\n"); 3305 DRM_DEBUG("IH: D1 vblank\n");
3294 } 3306 }
3295 break; 3307 break;
3296 case 1: /* D1 vline */ 3308 case 1: /* D1 vline */
3297 if (disp_int & LB_D1_VLINE_INTERRUPT) { 3309 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3298 disp_int &= ~LB_D1_VLINE_INTERRUPT; 3310 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3299 DRM_DEBUG("IH: D1 vline\n"); 3311 DRM_DEBUG("IH: D1 vline\n");
3300 } 3312 }
3301 break; 3313 break;
@@ -3307,17 +3319,21 @@ restart_ih:
3307 case 5: /* D2 vblank/vline */ 3319 case 5: /* D2 vblank/vline */
3308 switch (src_data) { 3320 switch (src_data) {
3309 case 0: /* D2 vblank */ 3321 case 0: /* D2 vblank */
3310 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 3322 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3311 drm_handle_vblank(rdev->ddev, 1); 3323 if (rdev->irq.pflip[1])
3312 rdev->pm.vblank_sync = true; 3324 radeon_crtc_handle_flip(rdev, 1);
3313 wake_up(&rdev->irq.vblank_queue); 3325 if (rdev->irq.crtc_vblank_int[1]) {
3314 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 3326 drm_handle_vblank(rdev->ddev, 1);
3327 rdev->pm.vblank_sync = true;
3328 wake_up(&rdev->irq.vblank_queue);
3329 }
3330 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3315 DRM_DEBUG("IH: D2 vblank\n"); 3331 DRM_DEBUG("IH: D2 vblank\n");
3316 } 3332 }
3317 break; 3333 break;
3318 case 1: /* D1 vline */ 3334 case 1: /* D1 vline */
3319 if (disp_int & LB_D2_VLINE_INTERRUPT) { 3335 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3320 disp_int &= ~LB_D2_VLINE_INTERRUPT; 3336 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3321 DRM_DEBUG("IH: D2 vline\n"); 3337 DRM_DEBUG("IH: D2 vline\n");
3322 } 3338 }
3323 break; 3339 break;
@@ -3329,43 +3345,43 @@ restart_ih:
3329 case 19: /* HPD/DAC hotplug */ 3345 case 19: /* HPD/DAC hotplug */
3330 switch (src_data) { 3346 switch (src_data) {
3331 case 0: 3347 case 0:
3332 if (disp_int & DC_HPD1_INTERRUPT) { 3348 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3333 disp_int &= ~DC_HPD1_INTERRUPT; 3349 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3334 queue_hotplug = true; 3350 queue_hotplug = true;
3335 DRM_DEBUG("IH: HPD1\n"); 3351 DRM_DEBUG("IH: HPD1\n");
3336 } 3352 }
3337 break; 3353 break;
3338 case 1: 3354 case 1:
3339 if (disp_int & DC_HPD2_INTERRUPT) { 3355 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3340 disp_int &= ~DC_HPD2_INTERRUPT; 3356 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3341 queue_hotplug = true; 3357 queue_hotplug = true;
3342 DRM_DEBUG("IH: HPD2\n"); 3358 DRM_DEBUG("IH: HPD2\n");
3343 } 3359 }
3344 break; 3360 break;
3345 case 4: 3361 case 4:
3346 if (disp_int_cont & DC_HPD3_INTERRUPT) { 3362 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3347 disp_int_cont &= ~DC_HPD3_INTERRUPT; 3363 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3348 queue_hotplug = true; 3364 queue_hotplug = true;
3349 DRM_DEBUG("IH: HPD3\n"); 3365 DRM_DEBUG("IH: HPD3\n");
3350 } 3366 }
3351 break; 3367 break;
3352 case 5: 3368 case 5:
3353 if (disp_int_cont & DC_HPD4_INTERRUPT) { 3369 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3354 disp_int_cont &= ~DC_HPD4_INTERRUPT; 3370 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3355 queue_hotplug = true; 3371 queue_hotplug = true;
3356 DRM_DEBUG("IH: HPD4\n"); 3372 DRM_DEBUG("IH: HPD4\n");
3357 } 3373 }
3358 break; 3374 break;
3359 case 10: 3375 case 10:
3360 if (disp_int_cont2 & DC_HPD5_INTERRUPT) { 3376 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3361 disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 3377 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3362 queue_hotplug = true; 3378 queue_hotplug = true;
3363 DRM_DEBUG("IH: HPD5\n"); 3379 DRM_DEBUG("IH: HPD5\n");
3364 } 3380 }
3365 break; 3381 break;
3366 case 12: 3382 case 12:
3367 if (disp_int_cont2 & DC_HPD6_INTERRUPT) { 3383 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3368 disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 3384 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3369 queue_hotplug = true; 3385 queue_hotplug = true;
3370 DRM_DEBUG("IH: HPD6\n"); 3386 DRM_DEBUG("IH: HPD6\n");
3371 } 3387 }
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4f410f..c89cfa8e0c05 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -728,6 +728,15 @@
728/* DCE 3.2 */ 728/* DCE 3.2 */
729# define DC_HPDx_EN (1 << 28) 729# define DC_HPDx_EN (1 << 28)
730 730
731#define D1GRPH_INTERRUPT_STATUS 0x6158
732#define D2GRPH_INTERRUPT_STATUS 0x6958
733# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
734# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
735#define D1GRPH_INTERRUPT_CONTROL 0x615c
736#define D2GRPH_INTERRUPT_CONTROL 0x695c
737# define DxGRPH_PFLIP_INT_MASK (1 << 0)
738# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
739
731/* 740/*
732 * PM4 741 * PM4
733 */ 742 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a7095743d44..ddf1eca13401 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -377,11 +377,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
377/* 377/*
378 * IRQS. 378 * IRQS.
379 */ 379 */
380
381struct radeon_unpin_work {
382 struct work_struct work;
383 struct radeon_device *rdev;
384 int crtc_id;
385 struct radeon_fence *fence;
386 struct drm_pending_vblank_event *event;
387 struct radeon_bo *old_rbo;
388 u64 new_crtc_base;
389};
390
391struct r500_irq_stat_regs {
392 u32 disp_int;
393};
394
395struct r600_irq_stat_regs {
396 u32 disp_int;
397 u32 disp_int_cont;
398 u32 disp_int_cont2;
399 u32 d1grph_int;
400 u32 d2grph_int;
401};
402
403struct evergreen_irq_stat_regs {
404 u32 disp_int;
405 u32 disp_int_cont;
406 u32 disp_int_cont2;
407 u32 disp_int_cont3;
408 u32 disp_int_cont4;
409 u32 disp_int_cont5;
410 u32 d1grph_int;
411 u32 d2grph_int;
412 u32 d3grph_int;
413 u32 d4grph_int;
414 u32 d5grph_int;
415 u32 d6grph_int;
416};
417
418union radeon_irq_stat_regs {
419 struct r500_irq_stat_regs r500;
420 struct r600_irq_stat_regs r600;
421 struct evergreen_irq_stat_regs evergreen;
422};
423
380struct radeon_irq { 424struct radeon_irq {
381 bool installed; 425 bool installed;
382 bool sw_int; 426 bool sw_int;
383 /* FIXME: use a define max crtc rather than hardcode it */ 427 /* FIXME: use a define max crtc rather than hardcode it */
384 bool crtc_vblank_int[6]; 428 bool crtc_vblank_int[6];
429 bool pflip[6];
385 wait_queue_head_t vblank_queue; 430 wait_queue_head_t vblank_queue;
386 /* FIXME: use defines for max hpd/dacs */ 431 /* FIXME: use defines for max hpd/dacs */
387 bool hpd[6]; 432 bool hpd[6];
@@ -392,12 +437,17 @@ struct radeon_irq {
392 bool hdmi[2]; 437 bool hdmi[2];
393 spinlock_t sw_lock; 438 spinlock_t sw_lock;
394 int sw_refcount; 439 int sw_refcount;
440 union radeon_irq_stat_regs stat_regs;
441 spinlock_t pflip_lock[6];
442 int pflip_refcount[6];
395}; 443};
396 444
397int radeon_irq_kms_init(struct radeon_device *rdev); 445int radeon_irq_kms_init(struct radeon_device *rdev);
398void radeon_irq_kms_fini(struct radeon_device *rdev); 446void radeon_irq_kms_fini(struct radeon_device *rdev);
399void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); 447void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
400void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); 448void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
449void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
450void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
401 451
402/* 452/*
403 * CP & ring. 453 * CP & ring.
@@ -881,6 +931,10 @@ struct radeon_asic {
881 void (*pm_finish)(struct radeon_device *rdev); 931 void (*pm_finish)(struct radeon_device *rdev);
882 void (*pm_init_profile)(struct radeon_device *rdev); 932 void (*pm_init_profile)(struct radeon_device *rdev);
883 void (*pm_get_dynpm_state)(struct radeon_device *rdev); 933 void (*pm_get_dynpm_state)(struct radeon_device *rdev);
934 /* pageflipping */
935 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
936 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
937 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
884}; 938};
885 939
886/* 940/*
@@ -1344,6 +1398,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1344#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) 1398#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
1345#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) 1399#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
1346#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) 1400#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
1401#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
1402#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
1403#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
1347 1404
1348/* Common functions */ 1405/* Common functions */
1349/* AGP */ 1406/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89ecbf74..6b126b3f5fa9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = {
171 .pm_finish = &r100_pm_finish, 171 .pm_finish = &r100_pm_finish,
172 .pm_init_profile = &r100_pm_init_profile, 172 .pm_init_profile = &r100_pm_init_profile,
173 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 173 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
174 .pre_page_flip = &r100_pre_page_flip,
175 .page_flip = &r100_page_flip,
176 .post_page_flip = &r100_post_page_flip,
174}; 177};
175 178
176static struct radeon_asic r200_asic = { 179static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = {
215 .pm_finish = &r100_pm_finish, 218 .pm_finish = &r100_pm_finish,
216 .pm_init_profile = &r100_pm_init_profile, 219 .pm_init_profile = &r100_pm_init_profile,
217 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 220 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
221 .pre_page_flip = &r100_pre_page_flip,
222 .page_flip = &r100_page_flip,
223 .post_page_flip = &r100_post_page_flip,
218}; 224};
219 225
220static struct radeon_asic r300_asic = { 226static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = {
260 .pm_finish = &r100_pm_finish, 266 .pm_finish = &r100_pm_finish,
261 .pm_init_profile = &r100_pm_init_profile, 267 .pm_init_profile = &r100_pm_init_profile,
262 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 268 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
269 .pre_page_flip = &r100_pre_page_flip,
270 .page_flip = &r100_page_flip,
271 .post_page_flip = &r100_post_page_flip,
263}; 272};
264 273
265static struct radeon_asic r300_asic_pcie = { 274static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = {
304 .pm_finish = &r100_pm_finish, 313 .pm_finish = &r100_pm_finish,
305 .pm_init_profile = &r100_pm_init_profile, 314 .pm_init_profile = &r100_pm_init_profile,
306 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 315 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
316 .pre_page_flip = &r100_pre_page_flip,
317 .page_flip = &r100_page_flip,
318 .post_page_flip = &r100_post_page_flip,
307}; 319};
308 320
309static struct radeon_asic r420_asic = { 321static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = {
349 .pm_finish = &r100_pm_finish, 361 .pm_finish = &r100_pm_finish,
350 .pm_init_profile = &r420_pm_init_profile, 362 .pm_init_profile = &r420_pm_init_profile,
351 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 363 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
364 .pre_page_flip = &r100_pre_page_flip,
365 .page_flip = &r100_page_flip,
366 .post_page_flip = &r100_post_page_flip,
352}; 367};
353 368
354static struct radeon_asic rs400_asic = { 369static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = {
394 .pm_finish = &r100_pm_finish, 409 .pm_finish = &r100_pm_finish,
395 .pm_init_profile = &r100_pm_init_profile, 410 .pm_init_profile = &r100_pm_init_profile,
396 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 411 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
412 .pre_page_flip = &r100_pre_page_flip,
413 .page_flip = &r100_page_flip,
414 .post_page_flip = &r100_post_page_flip,
397}; 415};
398 416
399static struct radeon_asic rs600_asic = { 417static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = {
439 .pm_finish = &rs600_pm_finish, 457 .pm_finish = &rs600_pm_finish,
440 .pm_init_profile = &r420_pm_init_profile, 458 .pm_init_profile = &r420_pm_init_profile,
441 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 459 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
460 .pre_page_flip = &rs600_pre_page_flip,
461 .page_flip = &rs600_page_flip,
462 .post_page_flip = &rs600_post_page_flip,
442}; 463};
443 464
444static struct radeon_asic rs690_asic = { 465static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = {
484 .pm_finish = &rs600_pm_finish, 505 .pm_finish = &rs600_pm_finish,
485 .pm_init_profile = &r420_pm_init_profile, 506 .pm_init_profile = &r420_pm_init_profile,
486 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 507 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
508 .pre_page_flip = &rs600_pre_page_flip,
509 .page_flip = &rs600_page_flip,
510 .post_page_flip = &rs600_post_page_flip,
487}; 511};
488 512
489static struct radeon_asic rv515_asic = { 513static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = {
529 .pm_finish = &rs600_pm_finish, 553 .pm_finish = &rs600_pm_finish,
530 .pm_init_profile = &r420_pm_init_profile, 554 .pm_init_profile = &r420_pm_init_profile,
531 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 555 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
556 .pre_page_flip = &rs600_pre_page_flip,
557 .page_flip = &rs600_page_flip,
558 .post_page_flip = &rs600_post_page_flip,
532}; 559};
533 560
534static struct radeon_asic r520_asic = { 561static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = {
574 .pm_finish = &rs600_pm_finish, 601 .pm_finish = &rs600_pm_finish,
575 .pm_init_profile = &r420_pm_init_profile, 602 .pm_init_profile = &r420_pm_init_profile,
576 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 603 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
604 .pre_page_flip = &rs600_pre_page_flip,
605 .page_flip = &rs600_page_flip,
606 .post_page_flip = &rs600_post_page_flip,
577}; 607};
578 608
579static struct radeon_asic r600_asic = { 609static struct radeon_asic r600_asic = {
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = {
618 .pm_finish = &rs600_pm_finish, 648 .pm_finish = &rs600_pm_finish,
619 .pm_init_profile = &r600_pm_init_profile, 649 .pm_init_profile = &r600_pm_init_profile,
620 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 650 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
651 .pre_page_flip = &rs600_pre_page_flip,
652 .page_flip = &rs600_page_flip,
653 .post_page_flip = &rs600_post_page_flip,
621}; 654};
622 655
623static struct radeon_asic rs780_asic = { 656static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = {
662 .pm_finish = &rs600_pm_finish, 695 .pm_finish = &rs600_pm_finish,
663 .pm_init_profile = &rs780_pm_init_profile, 696 .pm_init_profile = &rs780_pm_init_profile,
664 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 697 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
698 .pre_page_flip = &rs600_pre_page_flip,
699 .page_flip = &rs600_page_flip,
700 .post_page_flip = &rs600_post_page_flip,
665}; 701};
666 702
667static struct radeon_asic rv770_asic = { 703static struct radeon_asic rv770_asic = {
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = {
706 .pm_finish = &rs600_pm_finish, 742 .pm_finish = &rs600_pm_finish,
707 .pm_init_profile = &r600_pm_init_profile, 743 .pm_init_profile = &r600_pm_init_profile,
708 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 744 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
745 .pre_page_flip = &rs600_pre_page_flip,
746 .page_flip = &rv770_page_flip,
747 .post_page_flip = &rs600_post_page_flip,
709}; 748};
710 749
711static struct radeon_asic evergreen_asic = { 750static struct radeon_asic evergreen_asic = {
@@ -749,6 +788,9 @@ static struct radeon_asic evergreen_asic = {
749 .pm_finish = &evergreen_pm_finish, 788 .pm_finish = &evergreen_pm_finish,
750 .pm_init_profile = &r600_pm_init_profile, 789 .pm_init_profile = &r600_pm_init_profile,
751 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 790 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
791 .pre_page_flip = &evergreen_pre_page_flip,
792 .page_flip = &evergreen_page_flip,
793 .post_page_flip = &evergreen_post_page_flip,
752}; 794};
753 795
754int radeon_asic_init(struct radeon_device *rdev) 796int radeon_asic_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 740988244143..4970eda1bd41 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -130,6 +130,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
130extern void r100_pm_finish(struct radeon_device *rdev); 130extern void r100_pm_finish(struct radeon_device *rdev);
131extern void r100_pm_init_profile(struct radeon_device *rdev); 131extern void r100_pm_init_profile(struct radeon_device *rdev);
132extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); 132extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
133extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
134extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
135extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
133 136
134/* 137/*
135 * r200,rv250,rs300,rv280 138 * r200,rv250,rs300,rv280
@@ -205,6 +208,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
205extern void rs600_pm_misc(struct radeon_device *rdev); 208extern void rs600_pm_misc(struct radeon_device *rdev);
206extern void rs600_pm_prepare(struct radeon_device *rdev); 209extern void rs600_pm_prepare(struct radeon_device *rdev);
207extern void rs600_pm_finish(struct radeon_device *rdev); 210extern void rs600_pm_finish(struct radeon_device *rdev);
211extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
212extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
213extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
208 214
209/* 215/*
210 * rs690,rs740 216 * rs690,rs740
@@ -287,6 +293,7 @@ void rv770_fini(struct radeon_device *rdev);
287int rv770_suspend(struct radeon_device *rdev); 293int rv770_suspend(struct radeon_device *rdev);
288int rv770_resume(struct radeon_device *rdev); 294int rv770_resume(struct radeon_device *rdev);
289extern void rv770_pm_misc(struct radeon_device *rdev); 295extern void rv770_pm_misc(struct radeon_device *rdev);
296extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
290 297
291/* 298/*
292 * evergreen 299 * evergreen
@@ -314,5 +321,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
314extern void evergreen_pm_misc(struct radeon_device *rdev); 321extern void evergreen_pm_misc(struct radeon_device *rdev);
315extern void evergreen_pm_prepare(struct radeon_device *rdev); 322extern void evergreen_pm_prepare(struct radeon_device *rdev);
316extern void evergreen_pm_finish(struct radeon_device *rdev); 323extern void evergreen_pm_finish(struct radeon_device *rdev);
324extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
325extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
326extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
317 327
318#endif 328#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index eeea7cbb9517..f6493f444faa 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -183,12 +183,273 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
183 kfree(radeon_crtc); 183 kfree(radeon_crtc);
184} 184}
185 185
186/*
187 * Handle unpin events outside the interrupt handler proper.
188 */
189static void radeon_unpin_work_func(struct work_struct *__work)
190{
191 struct radeon_unpin_work *work =
192 container_of(__work, struct radeon_unpin_work, work);
193 int r;
194
195 /* unpin of the old buffer */
196 r = radeon_bo_reserve(work->old_rbo, false);
197 if (likely(r == 0)) {
198 r = radeon_bo_unpin(work->old_rbo);
199 if (unlikely(r != 0)) {
200 DRM_ERROR("failed to unpin buffer after flip\n");
201 }
202 radeon_bo_unreserve(work->old_rbo);
203 } else
204 DRM_ERROR("failed to reserve buffer after flip\n");
205 kfree(work);
206}
207
208void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
209{
210 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
211 struct radeon_unpin_work *work;
212 struct drm_pending_vblank_event *e;
213 struct timeval now;
214 unsigned long flags;
215 u32 update_pending;
216 int vpos, hpos;
217
218 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
219 work = radeon_crtc->unpin_work;
220 if (work == NULL ||
221 !radeon_fence_signaled(work->fence)) {
222 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
223 return;
224 }
225 /* New pageflip, or just completion of a previous one? */
226 if (!radeon_crtc->deferred_flip_completion) {
227 /* do the flip (mmio) */
228 update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
229 } else {
230 /* This is just a completion of a flip queued in crtc
231 * at last invocation. Make sure we go directly to
232 * completion routine.
233 */
234 update_pending = 0;
235 radeon_crtc->deferred_flip_completion = 0;
236 }
237
238 /* Has the pageflip already completed in crtc, or is it certain
239 * to complete in this vblank?
240 */
241 if (update_pending &&
242 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
243 &vpos, &hpos)) &&
244 (vpos >=0) &&
245 (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
246 /* crtc didn't flip in this target vblank interval,
247 * but flip is pending in crtc. It will complete it
248 * in next vblank interval, so complete the flip at
249 * next vblank irq.
250 */
251 radeon_crtc->deferred_flip_completion = 1;
252 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
253 return;
254 }
255
256 /* Pageflip (will be) certainly completed in this vblank. Clean up. */
257 radeon_crtc->unpin_work = NULL;
258
259 /* wakeup userspace */
260 if (work->event) {
261 e = work->event;
262 do_gettimeofday(&now);
263 e->event.sequence = drm_vblank_count(rdev->ddev, radeon_crtc->crtc_id);
264 e->event.tv_sec = now.tv_sec;
265 e->event.tv_usec = now.tv_usec;
266 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
267 wake_up_interruptible(&e->base.file_priv->event_wait);
268 }
269 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
270
271 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
272 radeon_fence_unref(&work->fence);
273 radeon_post_page_flip(work->rdev, work->crtc_id);
274 schedule_work(&work->work);
275}
276
277static int radeon_crtc_page_flip(struct drm_crtc *crtc,
278 struct drm_framebuffer *fb,
279 struct drm_pending_vblank_event *event)
280{
281 struct drm_device *dev = crtc->dev;
282 struct radeon_device *rdev = dev->dev_private;
283 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
284 struct radeon_framebuffer *old_radeon_fb;
285 struct radeon_framebuffer *new_radeon_fb;
286 struct drm_gem_object *obj;
287 struct radeon_bo *rbo;
288 struct radeon_fence *fence;
289 struct radeon_unpin_work *work;
290 unsigned long flags;
291 u32 tiling_flags, pitch_pixels;
292 u64 base;
293 int r;
294
295 work = kzalloc(sizeof *work, GFP_KERNEL);
296 if (work == NULL)
297 return -ENOMEM;
298
299 r = radeon_fence_create(rdev, &fence);
300 if (unlikely(r != 0)) {
301 kfree(work);
302 DRM_ERROR("flip queue: failed to create fence.\n");
303 return -ENOMEM;
304 }
305 work->event = event;
306 work->rdev = rdev;
307 work->crtc_id = radeon_crtc->crtc_id;
308 work->fence = radeon_fence_ref(fence);
309 old_radeon_fb = to_radeon_framebuffer(crtc->fb);
310 new_radeon_fb = to_radeon_framebuffer(fb);
311 /* schedule unpin of the old buffer */
312 obj = old_radeon_fb->obj;
313 rbo = obj->driver_private;
314 work->old_rbo = rbo;
315 INIT_WORK(&work->work, radeon_unpin_work_func);
316
317 /* We borrow the event spin lock for protecting unpin_work */
318 spin_lock_irqsave(&dev->event_lock, flags);
319 if (radeon_crtc->unpin_work) {
320 spin_unlock_irqrestore(&dev->event_lock, flags);
321 kfree(work);
322 radeon_fence_unref(&fence);
323
324 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
325 return -EBUSY;
326 }
327 radeon_crtc->unpin_work = work;
328 radeon_crtc->deferred_flip_completion = 0;
329 spin_unlock_irqrestore(&dev->event_lock, flags);
330
331 /* pin the new buffer */
332 obj = new_radeon_fb->obj;
333 rbo = obj->driver_private;
334
335 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
336 work->old_rbo, rbo);
337
338 r = radeon_bo_reserve(rbo, false);
339 if (unlikely(r != 0)) {
340 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
341 goto pflip_cleanup;
342 }
343 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
344 if (unlikely(r != 0)) {
345 radeon_bo_unreserve(rbo);
346 r = -EINVAL;
347 DRM_ERROR("failed to pin new rbo buffer before flip\n");
348 goto pflip_cleanup;
349 }
350 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
351 radeon_bo_unreserve(rbo);
352
353 if (!ASIC_IS_AVIVO(rdev)) {
354 /* crtc offset is from display base addr not FB location */
355 base -= radeon_crtc->legacy_display_base_addr;
356 pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
357
358 if (tiling_flags & RADEON_TILING_MACRO) {
359 if (ASIC_IS_R300(rdev)) {
360 base &= ~0x7ff;
361 } else {
362 int byteshift = fb->bits_per_pixel >> 4;
363 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
364 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
365 }
366 } else {
367 int offset = crtc->y * pitch_pixels + crtc->x;
368 switch (fb->bits_per_pixel) {
369 case 8:
370 default:
371 offset *= 1;
372 break;
373 case 15:
374 case 16:
375 offset *= 2;
376 break;
377 case 24:
378 offset *= 3;
379 break;
380 case 32:
381 offset *= 4;
382 break;
383 }
384 base += offset;
385 }
386 base &= ~7;
387 }
388
389 spin_lock_irqsave(&dev->event_lock, flags);
390 work->new_crtc_base = base;
391 spin_unlock_irqrestore(&dev->event_lock, flags);
392
393 /* update crtc fb */
394 crtc->fb = fb;
395
396 r = drm_vblank_get(dev, radeon_crtc->crtc_id);
397 if (r) {
398 DRM_ERROR("failed to get vblank before flip\n");
399 goto pflip_cleanup1;
400 }
401
402 /* 32 ought to cover us */
403 r = radeon_ring_lock(rdev, 32);
404 if (r) {
405 DRM_ERROR("failed to lock the ring before flip\n");
406 goto pflip_cleanup2;
407 }
408
409 /* emit the fence */
410 radeon_fence_emit(rdev, fence);
411 /* set the proper interrupt */
412 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
413 /* fire the ring */
414 radeon_ring_unlock_commit(rdev);
415
416 return 0;
417
418pflip_cleanup2:
419 drm_vblank_put(dev, radeon_crtc->crtc_id);
420
421pflip_cleanup1:
422 r = radeon_bo_reserve(rbo, false);
423 if (unlikely(r != 0)) {
424 DRM_ERROR("failed to reserve new rbo in error path\n");
425 goto pflip_cleanup;
426 }
427 r = radeon_bo_unpin(rbo);
428 if (unlikely(r != 0)) {
429 radeon_bo_unreserve(rbo);
430 r = -EINVAL;
431 DRM_ERROR("failed to unpin new rbo in error path\n");
432 goto pflip_cleanup;
433 }
434 radeon_bo_unreserve(rbo);
435
436pflip_cleanup:
437 spin_lock_irqsave(&dev->event_lock, flags);
438 radeon_crtc->unpin_work = NULL;
439 spin_unlock_irqrestore(&dev->event_lock, flags);
440 radeon_fence_unref(&fence);
441 kfree(work);
442
443 return r;
444}
445
186static const struct drm_crtc_funcs radeon_crtc_funcs = { 446static const struct drm_crtc_funcs radeon_crtc_funcs = {
187 .cursor_set = radeon_crtc_cursor_set, 447 .cursor_set = radeon_crtc_cursor_set,
188 .cursor_move = radeon_crtc_cursor_move, 448 .cursor_move = radeon_crtc_cursor_move,
189 .gamma_set = radeon_crtc_gamma_set, 449 .gamma_set = radeon_crtc_gamma_set,
190 .set_config = drm_crtc_helper_set_config, 450 .set_config = drm_crtc_helper_set_config,
191 .destroy = radeon_crtc_destroy, 451 .destroy = radeon_crtc_destroy,
452 .page_flip = radeon_crtc_page_flip,
192}; 453};
193 454
194static void radeon_crtc_init(struct drm_device *dev, int index) 455static void radeon_crtc_init(struct drm_device *dev, int index)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 32ec0cc6be92..a92d2a5cea90 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen 48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) 49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support
51 */ 52 */
52#define KMS_DRIVER_MAJOR 2 53#define KMS_DRIVER_MAJOR 2
53#define KMS_DRIVER_MINOR 7 54#define KMS_DRIVER_MINOR 8
54#define KMS_DRIVER_PATCHLEVEL 0 55#define KMS_DRIVER_PATCHLEVEL 0
55int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 56int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
56int radeon_driver_unload_kms(struct drm_device *dev); 57int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7ed14f5..e0d1c6d1b9c7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -71,8 +71,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
71 rdev->irq.gui_idle = false; 71 rdev->irq.gui_idle = false;
72 for (i = 0; i < rdev->num_crtc; i++) 72 for (i = 0; i < rdev->num_crtc; i++)
73 rdev->irq.crtc_vblank_int[i] = false; 73 rdev->irq.crtc_vblank_int[i] = false;
74 for (i = 0; i < 6; i++) 74 for (i = 0; i < 6; i++) {
75 rdev->irq.hpd[i] = false; 75 rdev->irq.hpd[i] = false;
76 rdev->irq.pflip[i] = false;
77 }
76 radeon_irq_set(rdev); 78 radeon_irq_set(rdev);
77 /* Clear bits */ 79 /* Clear bits */
78 radeon_irq_process(rdev); 80 radeon_irq_process(rdev);
@@ -101,8 +103,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
101 rdev->irq.gui_idle = false; 103 rdev->irq.gui_idle = false;
102 for (i = 0; i < rdev->num_crtc; i++) 104 for (i = 0; i < rdev->num_crtc; i++)
103 rdev->irq.crtc_vblank_int[i] = false; 105 rdev->irq.crtc_vblank_int[i] = false;
104 for (i = 0; i < 6; i++) 106 for (i = 0; i < 6; i++) {
105 rdev->irq.hpd[i] = false; 107 rdev->irq.hpd[i] = false;
108 rdev->irq.pflip[i] = false;
109 }
106 radeon_irq_set(rdev); 110 radeon_irq_set(rdev);
107} 111}
108 112
@@ -175,3 +179,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
175 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); 179 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
176} 180}
177 181
182void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
183{
184 unsigned long irqflags;
185
186 if (crtc < 0 || crtc >= rdev->num_crtc)
187 return;
188
189 spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
190 if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
191 rdev->irq.pflip[crtc] = true;
192 radeon_irq_set(rdev);
193 }
194 spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
195}
196
197void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
198{
199 unsigned long irqflags;
200
201 if (crtc < 0 || crtc >= rdev->num_crtc)
202 return;
203
204 spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
205 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
206 if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
207 rdev->irq.pflip[crtc] = false;
208 radeon_irq_set(rdev);
209 }
210 spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
211}
212
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 55856ad0ac41..f406f02bf14e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -277,6 +277,9 @@ struct radeon_crtc {
277 fixed20_12 hsc; 277 fixed20_12 hsc;
278 struct drm_display_mode native_mode; 278 struct drm_display_mode native_mode;
279 int pll_id; 279 int pll_id;
280 /* page flipping */
281 struct radeon_unpin_work *unpin_work;
282 int deferred_flip_completion;
280}; 283};
281 284
282struct radeon_encoder_primary_dac { 285struct radeon_encoder_primary_dac {
@@ -659,4 +662,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev);
659bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); 662bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
660 663
661void radeon_fb_output_poll_changed(struct radeon_device *rdev); 664void radeon_fb_output_poll_changed(struct radeon_device *rdev);
665
666void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
667
662#endif 668#endif
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 64928814de53..26c43e234350 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -422,6 +422,7 @@
422# define RADEON_CRTC_CSYNC_EN (1 << 4) 422# define RADEON_CRTC_CSYNC_EN (1 << 4)
423# define RADEON_CRTC_ICON_EN (1 << 15) 423# define RADEON_CRTC_ICON_EN (1 << 15)
424# define RADEON_CRTC_CUR_EN (1 << 16) 424# define RADEON_CRTC_CUR_EN (1 << 16)
425# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
425# define RADEON_CRTC_CUR_MODE_MASK (7 << 20) 426# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
426# define RADEON_CRTC_CUR_MODE_SHIFT 20 427# define RADEON_CRTC_CUR_MODE_SHIFT 20
427# define RADEON_CRTC_CUR_MODE_MONO 0 428# define RADEON_CRTC_CUR_MODE_MONO 0
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c2e6b..683652bea17c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
46void rs600_gpu_init(struct radeon_device *rdev); 46void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 48
49void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
50{
51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
52 u32 tmp;
53
54 /* make sure flip is at vb rather than hb */
55 tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
56 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
57 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
58
59 /* set pageflip to happen anywhere in vblank interval */
60 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
61
62 /* enable the pflip int */
63 radeon_irq_kms_pflip_irq_get(rdev, crtc);
64}
65
66void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
67{
68 /* disable the pflip int */
69 radeon_irq_kms_pflip_irq_put(rdev, crtc);
70}
71
72u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
73{
74 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
75 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
76
77 /* Lock the graphics update lock */
78 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
79 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
80
81 /* update the scanout addresses */
82 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
83 (u32)crtc_base);
84 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
85 (u32)crtc_base);
86
87 /* Wait for update_pending to go high. */
88 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
89 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
90
91 /* Unlock the lock, so double-buffering can take place inside vblank */
92 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
93 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
94
95 /* Return current update_pending status: */
96 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
97}
98
49void rs600_pm_misc(struct radeon_device *rdev) 99void rs600_pm_misc(struct radeon_device *rdev)
50{ 100{
51 int requested_index = rdev->pm.requested_power_state_index; 101 int requested_index = rdev->pm.requested_power_state_index;
@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev)
515 if (rdev->irq.gui_idle) { 565 if (rdev->irq.gui_idle) {
516 tmp |= S_000040_GUI_IDLE(1); 566 tmp |= S_000040_GUI_IDLE(1);
517 } 567 }
518 if (rdev->irq.crtc_vblank_int[0]) { 568 if (rdev->irq.crtc_vblank_int[0] ||
569 rdev->irq.pflip[0]) {
519 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 570 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
520 } 571 }
521 if (rdev->irq.crtc_vblank_int[1]) { 572 if (rdev->irq.crtc_vblank_int[1] ||
573 rdev->irq.pflip[1]) {
522 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 574 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
523 } 575 }
524 if (rdev->irq.hpd[0]) { 576 if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev)
534 return 0; 586 return 0;
535} 587}
536 588
537static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 589static inline u32 rs600_irq_ack(struct radeon_device *rdev)
538{ 590{
539 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 591 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
540 uint32_t irq_mask = S_000044_SW_INT(1); 592 uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
547 } 599 }
548 600
549 if (G_000044_DISPLAY_INT_STAT(irqs)) { 601 if (G_000044_DISPLAY_INT_STAT(irqs)) {
550 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 602 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
551 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { 603 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
552 WREG32(R_006534_D1MODE_VBLANK_STATUS, 604 WREG32(R_006534_D1MODE_VBLANK_STATUS,
553 S_006534_D1MODE_VBLANK_ACK(1)); 605 S_006534_D1MODE_VBLANK_ACK(1));
554 } 606 }
555 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { 607 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
556 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 608 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
557 S_006D34_D2MODE_VBLANK_ACK(1)); 609 S_006D34_D2MODE_VBLANK_ACK(1));
558 } 610 }
559 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { 611 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
560 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 612 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
561 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 613 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
562 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 614 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
563 } 615 }
564 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { 616 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
565 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 617 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
566 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 618 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
567 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 619 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
568 } 620 }
569 } else { 621 } else {
570 *r500_disp_int = 0; 622 rdev->irq.stat_regs.r500.disp_int = 0;
571 } 623 }
572 624
573 if (irqs) { 625 if (irqs) {
@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
578 630
579void rs600_irq_disable(struct radeon_device *rdev) 631void rs600_irq_disable(struct radeon_device *rdev)
580{ 632{
581 u32 tmp;
582
583 WREG32(R_000040_GEN_INT_CNTL, 0); 633 WREG32(R_000040_GEN_INT_CNTL, 0);
584 WREG32(R_006540_DxMODE_INT_MASK, 0); 634 WREG32(R_006540_DxMODE_INT_MASK, 0);
585 /* Wait and acknowledge irq */ 635 /* Wait and acknowledge irq */
586 mdelay(1); 636 mdelay(1);
587 rs600_irq_ack(rdev, &tmp); 637 rs600_irq_ack(rdev);
588} 638}
589 639
590int rs600_irq_process(struct radeon_device *rdev) 640int rs600_irq_process(struct radeon_device *rdev)
591{ 641{
592 uint32_t status, msi_rearm; 642 u32 status, msi_rearm;
593 uint32_t r500_disp_int;
594 bool queue_hotplug = false; 643 bool queue_hotplug = false;
595 644
596 /* reset gui idle ack. the status bit is broken */ 645 /* reset gui idle ack. the status bit is broken */
597 rdev->irq.gui_idle_acked = false; 646 rdev->irq.gui_idle_acked = false;
598 647
599 status = rs600_irq_ack(rdev, &r500_disp_int); 648 status = rs600_irq_ack(rdev);
600 if (!status && !r500_disp_int) { 649 if (!status && !rdev->irq.stat_regs.r500.disp_int) {
601 return IRQ_NONE; 650 return IRQ_NONE;
602 } 651 }
603 while (status || r500_disp_int) { 652 while (status || rdev->irq.stat_regs.r500.disp_int) {
604 /* SW interrupt */ 653 /* SW interrupt */
605 if (G_000044_SW_INT(status)) 654 if (G_000044_SW_INT(status)) {
606 radeon_fence_process(rdev); 655 radeon_fence_process(rdev);
656 }
607 /* GUI idle */ 657 /* GUI idle */
608 if (G_000040_GUI_IDLE(status)) { 658 if (G_000040_GUI_IDLE(status)) {
609 rdev->irq.gui_idle_acked = true; 659 rdev->irq.gui_idle_acked = true;
@@ -611,25 +661,33 @@ int rs600_irq_process(struct radeon_device *rdev)
611 wake_up(&rdev->irq.idle_queue); 661 wake_up(&rdev->irq.idle_queue);
612 } 662 }
613 /* Vertical blank interrupts */ 663 /* Vertical blank interrupts */
614 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { 664 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
615 drm_handle_vblank(rdev->ddev, 0); 665 if (rdev->irq.pflip[0])
616 rdev->pm.vblank_sync = true; 666 radeon_crtc_handle_flip(rdev, 0);
617 wake_up(&rdev->irq.vblank_queue); 667 if (rdev->irq.crtc_vblank_int[0]) {
668 drm_handle_vblank(rdev->ddev, 0);
669 rdev->pm.vblank_sync = true;
670 wake_up(&rdev->irq.vblank_queue);
671 }
618 } 672 }
619 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { 673 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
620 drm_handle_vblank(rdev->ddev, 1); 674 if (rdev->irq.pflip[1])
621 rdev->pm.vblank_sync = true; 675 radeon_crtc_handle_flip(rdev, 1);
622 wake_up(&rdev->irq.vblank_queue); 676 if (rdev->irq.crtc_vblank_int[1]) {
677 drm_handle_vblank(rdev->ddev, 1);
678 rdev->pm.vblank_sync = true;
679 wake_up(&rdev->irq.vblank_queue);
680 }
623 } 681 }
624 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 682 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
625 queue_hotplug = true; 683 queue_hotplug = true;
626 DRM_DEBUG("HPD1\n"); 684 DRM_DEBUG("HPD1\n");
627 } 685 }
628 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { 686 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
629 queue_hotplug = true; 687 queue_hotplug = true;
630 DRM_DEBUG("HPD2\n"); 688 DRM_DEBUG("HPD2\n");
631 } 689 }
632 status = rs600_irq_ack(rdev, &r500_disp_int); 690 status = rs600_irq_ack(rdev);
633 } 691 }
634 /* reset gui idle ack. the status bit is broken */ 692 /* reset gui idle ack. the status bit is broken */
635 rdev->irq.gui_idle_acked = false; 693 rdev->irq.gui_idle_acked = false;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8cee33..42ff07893f3a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,40 @@
42static void rv770_gpu_init(struct radeon_device *rdev); 42static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44 44
45u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
46{
47 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
48 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
49
50 /* Lock the graphics update lock */
51 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
52 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
53
54 /* update the scanout addresses */
55 if (radeon_crtc->crtc_id) {
56 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
57 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
58 } else {
59 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
60 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
61 }
62 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
63 (u32)crtc_base);
64 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
65 (u32)crtc_base);
66
67 /* Wait for update_pending to go high. */
68 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
69 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
70
71 /* Unlock the lock, so double-buffering can take place inside vblank */
72 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
73 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
74
75 /* Return current update_pending status: */
76 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
77}
78
45/* get temperature in millidegrees */ 79/* get temperature in millidegrees */
46u32 rv770_get_temp(struct radeon_device *rdev) 80u32 rv770_get_temp(struct radeon_device *rdev)
47{ 81{
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20e81dc..11955c685ad1 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -351,4 +351,11 @@
351 351
352#define SRBM_STATUS 0x0E50 352#define SRBM_STATUS 0x0E50
353 353
354#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
355#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
356#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
357#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
358#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
359#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
360
354#endif 361#endif