diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/ni.c')
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index cc9aaeb104f5..2d809e62c4c6 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1049,7 +1049,7 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev) | |||
1049 | 1049 | ||
1050 | static int cayman_cp_start(struct radeon_device *rdev) | 1050 | static int cayman_cp_start(struct radeon_device *rdev) |
1051 | { | 1051 | { |
1052 | struct radeon_cp *cp = &rdev->cp; | 1052 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; |
1053 | int r, i; | 1053 | int r, i; |
1054 | 1054 | ||
1055 | r = radeon_ring_lock(rdev, cp, 7); | 1055 | r = radeon_ring_lock(rdev, cp, 7); |
@@ -1116,7 +1116,7 @@ static int cayman_cp_start(struct radeon_device *rdev) | |||
1116 | static void cayman_cp_fini(struct radeon_device *rdev) | 1116 | static void cayman_cp_fini(struct radeon_device *rdev) |
1117 | { | 1117 | { |
1118 | cayman_cp_enable(rdev, false); | 1118 | cayman_cp_enable(rdev, false); |
1119 | radeon_ring_fini(rdev, &rdev->cp); | 1119 | radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | int cayman_cp_resume(struct radeon_device *rdev) | 1122 | int cayman_cp_resume(struct radeon_device *rdev) |
@@ -1147,7 +1147,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1147 | 1147 | ||
1148 | /* ring 0 - compute and gfx */ | 1148 | /* ring 0 - compute and gfx */ |
1149 | /* Set ring buffer size */ | 1149 | /* Set ring buffer size */ |
1150 | cp = &rdev->cp; | 1150 | cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; |
1151 | rb_bufsz = drm_order(cp->ring_size / 8); | 1151 | rb_bufsz = drm_order(cp->ring_size / 8); |
1152 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1152 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1153 | #ifdef __BIG_ENDIAN | 1153 | #ifdef __BIG_ENDIAN |
@@ -1181,7 +1181,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1181 | 1181 | ||
1182 | /* ring1 - compute only */ | 1182 | /* ring1 - compute only */ |
1183 | /* Set ring buffer size */ | 1183 | /* Set ring buffer size */ |
1184 | cp = &rdev->cp1; | 1184 | cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX]; |
1185 | rb_bufsz = drm_order(cp->ring_size / 8); | 1185 | rb_bufsz = drm_order(cp->ring_size / 8); |
1186 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1186 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1187 | #ifdef __BIG_ENDIAN | 1187 | #ifdef __BIG_ENDIAN |
@@ -1207,7 +1207,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1207 | 1207 | ||
1208 | /* ring2 - compute only */ | 1208 | /* ring2 - compute only */ |
1209 | /* Set ring buffer size */ | 1209 | /* Set ring buffer size */ |
1210 | cp = &rdev->cp2; | 1210 | cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX]; |
1211 | rb_bufsz = drm_order(cp->ring_size / 8); | 1211 | rb_bufsz = drm_order(cp->ring_size / 8); |
1212 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1212 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1213 | #ifdef __BIG_ENDIAN | 1213 | #ifdef __BIG_ENDIAN |
@@ -1233,15 +1233,15 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1233 | 1233 | ||
1234 | /* start the rings */ | 1234 | /* start the rings */ |
1235 | cayman_cp_start(rdev); | 1235 | cayman_cp_start(rdev); |
1236 | rdev->cp.ready = true; | 1236 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true; |
1237 | rdev->cp1.ready = true; | 1237 | rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; |
1238 | rdev->cp2.ready = true; | 1238 | rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; |
1239 | /* this only test cp0 */ | 1239 | /* this only test cp0 */ |
1240 | r = radeon_ring_test(rdev, &rdev->cp); | 1240 | r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); |
1241 | if (r) { | 1241 | if (r) { |
1242 | rdev->cp.ready = false; | 1242 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1243 | rdev->cp1.ready = false; | 1243 | rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
1244 | rdev->cp2.ready = false; | 1244 | rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
1245 | return r; | 1245 | return r; |
1246 | } | 1246 | } |
1247 | 1247 | ||
@@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev) | |||
1343 | 1343 | ||
1344 | static int cayman_startup(struct radeon_device *rdev) | 1344 | static int cayman_startup(struct radeon_device *rdev) |
1345 | { | 1345 | { |
1346 | struct radeon_cp *cp = &rdev->cp; | 1346 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; |
1347 | int r; | 1347 | int r; |
1348 | 1348 | ||
1349 | /* enable pcie gen2 link */ | 1349 | /* enable pcie gen2 link */ |
@@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
1438 | { | 1438 | { |
1439 | /* FIXME: we should wait for ring to be empty */ | 1439 | /* FIXME: we should wait for ring to be empty */ |
1440 | cayman_cp_enable(rdev, false); | 1440 | cayman_cp_enable(rdev, false); |
1441 | rdev->cp.ready = false; | 1441 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1442 | evergreen_irq_suspend(rdev); | 1442 | evergreen_irq_suspend(rdev); |
1443 | radeon_wb_disable(rdev); | 1443 | radeon_wb_disable(rdev); |
1444 | cayman_pcie_gart_disable(rdev); | 1444 | cayman_pcie_gart_disable(rdev); |
@@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
1455 | */ | 1455 | */ |
1456 | int cayman_init(struct radeon_device *rdev) | 1456 | int cayman_init(struct radeon_device *rdev) |
1457 | { | 1457 | { |
1458 | struct radeon_cp *cp = &rdev->cp; | 1458 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; |
1459 | int r; | 1459 | int r; |
1460 | 1460 | ||
1461 | /* This don't do much */ | 1461 | /* This don't do much */ |