aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-04-17 10:31:25 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-04-18 07:18:25 -0400
commita360bb1a83279243a0945a0e646fd6c66521864e (patch)
treef59d76a51ec5c08efb60b8b2925c21ef835de808 /drivers/gpu/drm/i915/i915_gem.c
parent06d9813157cca181e3ca0aff769767669afe8adf (diff)
drm/i915: Remove fence pipelining
Step 2 is then to replace the pipelined parameter with NULL and perform constant folding to remove dead code. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c155
1 files changed, 36 insertions, 119 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 40e080865463..5a9d90f117d3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2166,8 +2166,7 @@ int i915_gpu_idle(struct drm_device *dev, bool do_retire)
2166 return 0; 2166 return 0;
2167} 2167}
2168 2168
2169static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, 2169static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj)
2170 struct intel_ring_buffer *pipelined)
2171{ 2170{
2172 struct drm_device *dev = obj->base.dev; 2171 struct drm_device *dev = obj->base.dev;
2173 drm_i915_private_t *dev_priv = dev->dev_private; 2172 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2185,26 +2184,12 @@ static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2185 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2184 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2186 val |= I965_FENCE_REG_VALID; 2185 val |= I965_FENCE_REG_VALID;
2187 2186
2188 if (pipelined) { 2187 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2189 int ret = intel_ring_begin(pipelined, 6);
2190 if (ret)
2191 return ret;
2192
2193 intel_ring_emit(pipelined, MI_NOOP);
2194 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2195 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2196 intel_ring_emit(pipelined, (u32)val);
2197 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2198 intel_ring_emit(pipelined, (u32)(val >> 32));
2199 intel_ring_advance(pipelined);
2200 } else
2201 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2202 2188
2203 return 0; 2189 return 0;
2204} 2190}
2205 2191
2206static int i965_write_fence_reg(struct drm_i915_gem_object *obj, 2192static int i965_write_fence_reg(struct drm_i915_gem_object *obj)
2207 struct intel_ring_buffer *pipelined)
2208{ 2193{
2209 struct drm_device *dev = obj->base.dev; 2194 struct drm_device *dev = obj->base.dev;
2210 drm_i915_private_t *dev_priv = dev->dev_private; 2195 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2220,26 +2205,12 @@ static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2220 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2205 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2221 val |= I965_FENCE_REG_VALID; 2206 val |= I965_FENCE_REG_VALID;
2222 2207
2223 if (pipelined) { 2208 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2224 int ret = intel_ring_begin(pipelined, 6);
2225 if (ret)
2226 return ret;
2227
2228 intel_ring_emit(pipelined, MI_NOOP);
2229 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2230 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2231 intel_ring_emit(pipelined, (u32)val);
2232 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2233 intel_ring_emit(pipelined, (u32)(val >> 32));
2234 intel_ring_advance(pipelined);
2235 } else
2236 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2237 2209
2238 return 0; 2210 return 0;
2239} 2211}
2240 2212
2241static int i915_write_fence_reg(struct drm_i915_gem_object *obj, 2213static int i915_write_fence_reg(struct drm_i915_gem_object *obj)
2242 struct intel_ring_buffer *pipelined)
2243{ 2214{
2244 struct drm_device *dev = obj->base.dev; 2215 struct drm_device *dev = obj->base.dev;
2245 drm_i915_private_t *dev_priv = dev->dev_private; 2216 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2276,24 +2247,12 @@ static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2276 else 2247 else
2277 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; 2248 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2278 2249
2279 if (pipelined) { 2250 I915_WRITE(fence_reg, val);
2280 int ret = intel_ring_begin(pipelined, 4);
2281 if (ret)
2282 return ret;
2283
2284 intel_ring_emit(pipelined, MI_NOOP);
2285 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2286 intel_ring_emit(pipelined, fence_reg);
2287 intel_ring_emit(pipelined, val);
2288 intel_ring_advance(pipelined);
2289 } else
2290 I915_WRITE(fence_reg, val);
2291 2251
2292 return 0; 2252 return 0;
2293} 2253}
2294 2254
2295static int i830_write_fence_reg(struct drm_i915_gem_object *obj, 2255static int i830_write_fence_reg(struct drm_i915_gem_object *obj)
2296 struct intel_ring_buffer *pipelined)
2297{ 2256{
2298 struct drm_device *dev = obj->base.dev; 2257 struct drm_device *dev = obj->base.dev;
2299 drm_i915_private_t *dev_priv = dev->dev_private; 2258 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2319,18 +2278,7 @@ static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2319 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2278 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2320 val |= I830_FENCE_REG_VALID; 2279 val |= I830_FENCE_REG_VALID;
2321 2280
2322 if (pipelined) { 2281 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2323 int ret = intel_ring_begin(pipelined, 4);
2324 if (ret)
2325 return ret;
2326
2327 intel_ring_emit(pipelined, MI_NOOP);
2328 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2329 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2330 intel_ring_emit(pipelined, val);
2331 intel_ring_advance(pipelined);
2332 } else
2333 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2334 2282
2335 return 0; 2283 return 0;
2336} 2284}
@@ -2341,8 +2289,7 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2341} 2289}
2342 2290
2343static int 2291static int
2344i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, 2292i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2345 struct intel_ring_buffer *pipelined)
2346{ 2293{
2347 int ret; 2294 int ret;
2348 2295
@@ -2357,7 +2304,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2357 obj->fenced_gpu_access = false; 2304 obj->fenced_gpu_access = false;
2358 } 2305 }
2359 2306
2360 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { 2307 if (obj->last_fenced_seqno && NULL != obj->last_fenced_ring) {
2361 if (!ring_passed_seqno(obj->last_fenced_ring, 2308 if (!ring_passed_seqno(obj->last_fenced_ring,
2362 obj->last_fenced_seqno)) { 2309 obj->last_fenced_seqno)) {
2363 ret = i915_wait_request(obj->last_fenced_ring, 2310 ret = i915_wait_request(obj->last_fenced_ring,
@@ -2388,7 +2335,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2388 if (obj->tiling_mode) 2335 if (obj->tiling_mode)
2389 i915_gem_release_mmap(obj); 2336 i915_gem_release_mmap(obj);
2390 2337
2391 ret = i915_gem_object_flush_fence(obj, NULL); 2338 ret = i915_gem_object_flush_fence(obj);
2392 if (ret) 2339 if (ret)
2393 return ret; 2340 return ret;
2394 2341
@@ -2406,8 +2353,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2406} 2353}
2407 2354
2408static struct drm_i915_fence_reg * 2355static struct drm_i915_fence_reg *
2409i915_find_fence_reg(struct drm_device *dev, 2356i915_find_fence_reg(struct drm_device *dev)
2410 struct intel_ring_buffer *pipelined)
2411{ 2357{
2412 struct drm_i915_private *dev_priv = dev->dev_private; 2358 struct drm_i915_private *dev_priv = dev->dev_private;
2413 struct drm_i915_fence_reg *reg, *first, *avail; 2359 struct drm_i915_fence_reg *reg, *first, *avail;
@@ -2436,9 +2382,7 @@ i915_find_fence_reg(struct drm_device *dev,
2436 if (first == NULL) 2382 if (first == NULL)
2437 first = reg; 2383 first = reg;
2438 2384
2439 if (!pipelined || 2385 if (reg->obj->last_fenced_ring == NULL) {
2440 !reg->obj->last_fenced_ring ||
2441 reg->obj->last_fenced_ring == pipelined) {
2442 avail = reg; 2386 avail = reg;
2443 break; 2387 break;
2444 } 2388 }
@@ -2469,67 +2413,46 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2469{ 2413{
2470 struct drm_device *dev = obj->base.dev; 2414 struct drm_device *dev = obj->base.dev;
2471 struct drm_i915_private *dev_priv = dev->dev_private; 2415 struct drm_i915_private *dev_priv = dev->dev_private;
2472 struct intel_ring_buffer *pipelined;
2473 struct drm_i915_fence_reg *reg; 2416 struct drm_i915_fence_reg *reg;
2474 int ret; 2417 int ret;
2475 2418
2476 if (obj->tiling_mode == I915_TILING_NONE) 2419 if (obj->tiling_mode == I915_TILING_NONE)
2477 return i915_gem_object_put_fence(obj); 2420 return i915_gem_object_put_fence(obj);
2478 2421
2479 /* XXX disable pipelining. There are bugs. Shocking. */
2480 pipelined = NULL;
2481
2482 /* Just update our place in the LRU if our fence is getting reused. */ 2422 /* Just update our place in the LRU if our fence is getting reused. */
2483 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2423 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2484 reg = &dev_priv->fence_regs[obj->fence_reg]; 2424 reg = &dev_priv->fence_regs[obj->fence_reg];
2485 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2425 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2486 2426
2487 if (obj->tiling_changed) { 2427 if (obj->tiling_changed) {
2488 ret = i915_gem_object_flush_fence(obj, pipelined); 2428 ret = i915_gem_object_flush_fence(obj);
2489 if (ret) 2429 if (ret)
2490 return ret; 2430 return ret;
2491 2431
2492 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2493 pipelined = NULL;
2494
2495 if (pipelined) {
2496 reg->setup_seqno =
2497 i915_gem_next_request_seqno(pipelined);
2498 obj->last_fenced_seqno = reg->setup_seqno;
2499 obj->last_fenced_ring = pipelined;
2500 }
2501
2502 goto update; 2432 goto update;
2503 } 2433 }
2504 2434
2505 if (!pipelined) { 2435 if (reg->setup_seqno) {
2506 if (reg->setup_seqno) { 2436 if (!ring_passed_seqno(obj->last_fenced_ring,
2507 if (!ring_passed_seqno(obj->last_fenced_ring, 2437 reg->setup_seqno)) {
2508 reg->setup_seqno)) { 2438 ret = i915_wait_request(obj->last_fenced_ring,
2509 ret = i915_wait_request(obj->last_fenced_ring, 2439 reg->setup_seqno,
2510 reg->setup_seqno, 2440 true);
2511 true); 2441 if (ret)
2512 if (ret) 2442 return ret;
2513 return ret;
2514 }
2515
2516 reg->setup_seqno = 0;
2517 } 2443 }
2518 } else if (obj->last_fenced_ring && 2444
2519 obj->last_fenced_ring != pipelined) { 2445 reg->setup_seqno = 0;
2520 ret = i915_gem_object_flush_fence(obj, pipelined);
2521 if (ret)
2522 return ret;
2523 } 2446 }
2524 2447
2525 return 0; 2448 return 0;
2526 } 2449 }
2527 2450
2528 reg = i915_find_fence_reg(dev, pipelined); 2451 reg = i915_find_fence_reg(dev);
2529 if (reg == NULL) 2452 if (reg == NULL)
2530 return -EDEADLK; 2453 return -EDEADLK;
2531 2454
2532 ret = i915_gem_object_flush_fence(obj, pipelined); 2455 ret = i915_gem_object_flush_fence(obj);
2533 if (ret) 2456 if (ret)
2534 return ret; 2457 return ret;
2535 2458
@@ -2541,31 +2464,25 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2541 if (old->tiling_mode) 2464 if (old->tiling_mode)
2542 i915_gem_release_mmap(old); 2465 i915_gem_release_mmap(old);
2543 2466
2544 ret = i915_gem_object_flush_fence(old, pipelined); 2467 ret = i915_gem_object_flush_fence(old);
2545 if (ret) { 2468 if (ret) {
2546 drm_gem_object_unreference(&old->base); 2469 drm_gem_object_unreference(&old->base);
2547 return ret; 2470 return ret;
2548 } 2471 }
2549 2472
2550 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2551 pipelined = NULL;
2552
2553 old->fence_reg = I915_FENCE_REG_NONE; 2473 old->fence_reg = I915_FENCE_REG_NONE;
2554 old->last_fenced_ring = pipelined; 2474 old->last_fenced_ring = NULL;
2555 old->last_fenced_seqno = 2475 old->last_fenced_seqno = 0;
2556 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2557 2476
2558 drm_gem_object_unreference(&old->base); 2477 drm_gem_object_unreference(&old->base);
2559 } else if (obj->last_fenced_seqno == 0) 2478 }
2560 pipelined = NULL;
2561 2479
2562 reg->obj = obj; 2480 reg->obj = obj;
2563 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2481 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2564 obj->fence_reg = reg - dev_priv->fence_regs; 2482 obj->fence_reg = reg - dev_priv->fence_regs;
2565 obj->last_fenced_ring = pipelined; 2483 obj->last_fenced_ring = NULL;
2566 2484
2567 reg->setup_seqno = 2485 reg->setup_seqno = 0;
2568 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2569 obj->last_fenced_seqno = reg->setup_seqno; 2486 obj->last_fenced_seqno = reg->setup_seqno;
2570 2487
2571update: 2488update:
@@ -2573,17 +2490,17 @@ update:
2573 switch (INTEL_INFO(dev)->gen) { 2490 switch (INTEL_INFO(dev)->gen) {
2574 case 7: 2491 case 7:
2575 case 6: 2492 case 6:
2576 ret = sandybridge_write_fence_reg(obj, pipelined); 2493 ret = sandybridge_write_fence_reg(obj);
2577 break; 2494 break;
2578 case 5: 2495 case 5:
2579 case 4: 2496 case 4:
2580 ret = i965_write_fence_reg(obj, pipelined); 2497 ret = i965_write_fence_reg(obj);
2581 break; 2498 break;
2582 case 3: 2499 case 3:
2583 ret = i915_write_fence_reg(obj, pipelined); 2500 ret = i915_write_fence_reg(obj);
2584 break; 2501 break;
2585 case 2: 2502 case 2:
2586 ret = i830_write_fence_reg(obj, pipelined); 2503 ret = i830_write_fence_reg(obj);
2587 break; 2504 break;
2588 } 2505 }
2589 2506