diff options
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen_cs.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 38 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r200.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r300.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600_cs.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 41 |
9 files changed, 93 insertions, 111 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 70089d32b80f..4e7dd2b4843d 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1057 | uint32_t header, h_idx, reg, wait_reg_mem_info; | 1057 | uint32_t header, h_idx, reg, wait_reg_mem_info; |
1058 | volatile uint32_t *ib; | 1058 | volatile uint32_t *ib; |
1059 | 1059 | ||
1060 | ib = p->ib->ptr; | 1060 | ib = p->ib.ptr; |
1061 | 1061 | ||
1062 | /* parse the WAIT_REG_MEM */ | 1062 | /* parse the WAIT_REG_MEM */ |
1063 | r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); | 1063 | r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); |
@@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1215 | if (!(evergreen_reg_safe_bm[i] & m)) | 1215 | if (!(evergreen_reg_safe_bm[i] & m)) |
1216 | return 0; | 1216 | return 0; |
1217 | } | 1217 | } |
1218 | ib = p->ib->ptr; | 1218 | ib = p->ib.ptr; |
1219 | switch (reg) { | 1219 | switch (reg) { |
1220 | /* force following reg to 0 in an attempt to disable out buffer | 1220 | /* force following reg to 0 in an attempt to disable out buffer |
1221 | * which will need us to better understand how it works to perform | 1221 | * which will need us to better understand how it works to perform |
@@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1896 | u32 idx_value; | 1896 | u32 idx_value; |
1897 | 1897 | ||
1898 | track = (struct evergreen_cs_track *)p->track; | 1898 | track = (struct evergreen_cs_track *)p->track; |
1899 | ib = p->ib->ptr; | 1899 | ib = p->ib.ptr; |
1900 | idx = pkt->idx + 1; | 1900 | idx = pkt->idx + 1; |
1901 | idx_value = radeon_get_ib_value(p, idx); | 1901 | idx_value = radeon_get_ib_value(p, idx); |
1902 | 1902 | ||
@@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) | |||
2610 | } | 2610 | } |
2611 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | 2611 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
2612 | #if 0 | 2612 | #if 0 |
2613 | for (r = 0; r < p->ib->length_dw; r++) { | 2613 | for (r = 0; r < p->ib.length_dw; r++) { |
2614 | printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); | 2614 | printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); |
2615 | mdelay(1); | 2615 | mdelay(1); |
2616 | } | 2616 | } |
2617 | #endif | 2617 | #endif |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ad6ceb731713..0874a6dd411f 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | tmp |= tile_flags; | 141 | tmp |= tile_flags; |
142 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; | 142 | p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; |
143 | } else | 143 | } else |
144 | p->ib->ptr[idx] = (value & 0xffc00000) | tmp; | 144 | p->ib.ptr[idx] = (value & 0xffc00000) | tmp; |
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | |||
156 | volatile uint32_t *ib; | 156 | volatile uint32_t *ib; |
157 | u32 idx_value; | 157 | u32 idx_value; |
158 | 158 | ||
159 | ib = p->ib->ptr; | 159 | ib = p->ib.ptr; |
160 | track = (struct r100_cs_track *)p->track; | 160 | track = (struct r100_cs_track *)p->track; |
161 | c = radeon_get_ib_value(p, idx++) & 0x1F; | 161 | c = radeon_get_ib_value(p, idx++) & 0x1F; |
162 | if (c > 16) { | 162 | if (c > 16) { |
@@ -1275,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p, | |||
1275 | unsigned i; | 1275 | unsigned i; |
1276 | unsigned idx; | 1276 | unsigned idx; |
1277 | 1277 | ||
1278 | ib = p->ib->ptr; | 1278 | ib = p->ib.ptr; |
1279 | idx = pkt->idx; | 1279 | idx = pkt->idx; |
1280 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { | 1280 | for (i = 0; i <= (pkt->count + 1); i++, idx++) { |
1281 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); | 1281 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); |
@@ -1354,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1354 | uint32_t header, h_idx, reg; | 1354 | uint32_t header, h_idx, reg; |
1355 | volatile uint32_t *ib; | 1355 | volatile uint32_t *ib; |
1356 | 1356 | ||
1357 | ib = p->ib->ptr; | 1357 | ib = p->ib.ptr; |
1358 | 1358 | ||
1359 | /* parse the wait until */ | 1359 | /* parse the wait until */ |
1360 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | 1360 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); |
@@ -1533,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1533 | u32 tile_flags = 0; | 1533 | u32 tile_flags = 0; |
1534 | u32 idx_value; | 1534 | u32 idx_value; |
1535 | 1535 | ||
1536 | ib = p->ib->ptr; | 1536 | ib = p->ib.ptr; |
1537 | track = (struct r100_cs_track *)p->track; | 1537 | track = (struct r100_cs_track *)p->track; |
1538 | 1538 | ||
1539 | idx_value = radeon_get_ib_value(p, idx); | 1539 | idx_value = radeon_get_ib_value(p, idx); |
@@ -1889,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1889 | volatile uint32_t *ib; | 1889 | volatile uint32_t *ib; |
1890 | int r; | 1890 | int r; |
1891 | 1891 | ||
1892 | ib = p->ib->ptr; | 1892 | ib = p->ib.ptr; |
1893 | idx = pkt->idx + 1; | 1893 | idx = pkt->idx + 1; |
1894 | track = (struct r100_cs_track *)p->track; | 1894 | track = (struct r100_cs_track *)p->track; |
1895 | switch (pkt->opcode) { | 1895 | switch (pkt->opcode) { |
@@ -3684,7 +3684,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
3684 | 3684 | ||
3685 | int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | 3685 | int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3686 | { | 3686 | { |
3687 | struct radeon_ib *ib; | 3687 | struct radeon_ib ib; |
3688 | uint32_t scratch; | 3688 | uint32_t scratch; |
3689 | uint32_t tmp = 0; | 3689 | uint32_t tmp = 0; |
3690 | unsigned i; | 3690 | unsigned i; |
@@ -3700,22 +3700,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
3700 | if (r) { | 3700 | if (r) { |
3701 | return r; | 3701 | return r; |
3702 | } | 3702 | } |
3703 | ib->ptr[0] = PACKET0(scratch, 0); | 3703 | ib.ptr[0] = PACKET0(scratch, 0); |
3704 | ib->ptr[1] = 0xDEADBEEF; | 3704 | ib.ptr[1] = 0xDEADBEEF; |
3705 | ib->ptr[2] = PACKET2(0); | 3705 | ib.ptr[2] = PACKET2(0); |
3706 | ib->ptr[3] = PACKET2(0); | 3706 | ib.ptr[3] = PACKET2(0); |
3707 | ib->ptr[4] = PACKET2(0); | 3707 | ib.ptr[4] = PACKET2(0); |
3708 | ib->ptr[5] = PACKET2(0); | 3708 | ib.ptr[5] = PACKET2(0); |
3709 | ib->ptr[6] = PACKET2(0); | 3709 | ib.ptr[6] = PACKET2(0); |
3710 | ib->ptr[7] = PACKET2(0); | 3710 | ib.ptr[7] = PACKET2(0); |
3711 | ib->length_dw = 8; | 3711 | ib.length_dw = 8; |
3712 | r = radeon_ib_schedule(rdev, ib); | 3712 | r = radeon_ib_schedule(rdev, &ib); |
3713 | if (r) { | 3713 | if (r) { |
3714 | radeon_scratch_free(rdev, scratch); | 3714 | radeon_scratch_free(rdev, scratch); |
3715 | radeon_ib_free(rdev, &ib); | 3715 | radeon_ib_free(rdev, &ib); |
3716 | return r; | 3716 | return r; |
3717 | } | 3717 | } |
3718 | r = radeon_fence_wait(ib->fence, false); | 3718 | r = radeon_fence_wait(ib.fence, false); |
3719 | if (r) { | 3719 | if (r) { |
3720 | return r; | 3720 | return r; |
3721 | } | 3721 | } |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index a59cc474d537..a26144d01207 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
154 | u32 tile_flags = 0; | 154 | u32 tile_flags = 0; |
155 | u32 idx_value; | 155 | u32 idx_value; |
156 | 156 | ||
157 | ib = p->ib->ptr; | 157 | ib = p->ib.ptr; |
158 | track = (struct r100_cs_track *)p->track; | 158 | track = (struct r100_cs_track *)p->track; |
159 | idx_value = radeon_get_ib_value(p, idx); | 159 | idx_value = radeon_get_ib_value(p, idx); |
160 | switch (reg) { | 160 | switch (reg) { |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 6419a5900e67..97722a33e513 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -604,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
604 | int r; | 604 | int r; |
605 | u32 idx_value; | 605 | u32 idx_value; |
606 | 606 | ||
607 | ib = p->ib->ptr; | 607 | ib = p->ib.ptr; |
608 | track = (struct r100_cs_track *)p->track; | 608 | track = (struct r100_cs_track *)p->track; |
609 | idx_value = radeon_get_ib_value(p, idx); | 609 | idx_value = radeon_get_ib_value(p, idx); |
610 | 610 | ||
@@ -1146,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1146 | unsigned idx; | 1146 | unsigned idx; |
1147 | int r; | 1147 | int r; |
1148 | 1148 | ||
1149 | ib = p->ib->ptr; | 1149 | ib = p->ib.ptr; |
1150 | idx = pkt->idx + 1; | 1150 | idx = pkt->idx + 1; |
1151 | track = (struct r100_cs_track *)p->track; | 1151 | track = (struct r100_cs_track *)p->track; |
1152 | switch(pkt->opcode) { | 1152 | switch(pkt->opcode) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 00b22385e3f8..4c0d8c96a0ec 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2681,7 +2681,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
2681 | 2681 | ||
2682 | int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | 2682 | int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2683 | { | 2683 | { |
2684 | struct radeon_ib *ib; | 2684 | struct radeon_ib ib; |
2685 | uint32_t scratch; | 2685 | uint32_t scratch; |
2686 | uint32_t tmp = 0; | 2686 | uint32_t tmp = 0; |
2687 | unsigned i; | 2687 | unsigned i; |
@@ -2699,18 +2699,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
2699 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | 2699 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); |
2700 | return r; | 2700 | return r; |
2701 | } | 2701 | } |
2702 | ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); | 2702 | ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); |
2703 | ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 2703 | ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
2704 | ib->ptr[2] = 0xDEADBEEF; | 2704 | ib.ptr[2] = 0xDEADBEEF; |
2705 | ib->length_dw = 3; | 2705 | ib.length_dw = 3; |
2706 | r = radeon_ib_schedule(rdev, ib); | 2706 | r = radeon_ib_schedule(rdev, &ib); |
2707 | if (r) { | 2707 | if (r) { |
2708 | radeon_scratch_free(rdev, scratch); | 2708 | radeon_scratch_free(rdev, scratch); |
2709 | radeon_ib_free(rdev, &ib); | 2709 | radeon_ib_free(rdev, &ib); |
2710 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | 2710 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
2711 | return r; | 2711 | return r; |
2712 | } | 2712 | } |
2713 | r = radeon_fence_wait(ib->fence, false); | 2713 | r = radeon_fence_wait(ib.fence, false); |
2714 | if (r) { | 2714 | if (r) { |
2715 | DRM_ERROR("radeon: fence wait failed (%d).\n", r); | 2715 | DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
2716 | return r; | 2716 | return r; |
@@ -2722,7 +2722,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
2722 | DRM_UDELAY(1); | 2722 | DRM_UDELAY(1); |
2723 | } | 2723 | } |
2724 | if (i < rdev->usec_timeout) { | 2724 | if (i < rdev->usec_timeout) { |
2725 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i); | 2725 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); |
2726 | } else { | 2726 | } else { |
2727 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", | 2727 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
2728 | scratch, tmp); | 2728 | scratch, tmp); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index b8e12af304a9..0133f5f09bd6 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
345 | u32 height, height_align, pitch, pitch_align, depth_align; | 345 | u32 height, height_align, pitch, pitch_align, depth_align; |
346 | u64 base_offset, base_align; | 346 | u64 base_offset, base_align; |
347 | struct array_mode_checker array_check; | 347 | struct array_mode_checker array_check; |
348 | volatile u32 *ib = p->ib->ptr; | 348 | volatile u32 *ib = p->ib.ptr; |
349 | unsigned array_mode; | 349 | unsigned array_mode; |
350 | u32 format; | 350 | u32 format; |
351 | 351 | ||
@@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) | |||
471 | u64 base_offset, base_align; | 471 | u64 base_offset, base_align; |
472 | struct array_mode_checker array_check; | 472 | struct array_mode_checker array_check; |
473 | int array_mode; | 473 | int array_mode; |
474 | volatile u32 *ib = p->ib->ptr; | 474 | volatile u32 *ib = p->ib.ptr; |
475 | 475 | ||
476 | 476 | ||
477 | if (track->db_bo == NULL) { | 477 | if (track->db_bo == NULL) { |
@@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
961 | uint32_t header, h_idx, reg, wait_reg_mem_info; | 961 | uint32_t header, h_idx, reg, wait_reg_mem_info; |
962 | volatile uint32_t *ib; | 962 | volatile uint32_t *ib; |
963 | 963 | ||
964 | ib = p->ib->ptr; | 964 | ib = p->ib.ptr; |
965 | 965 | ||
966 | /* parse the WAIT_REG_MEM */ | 966 | /* parse the WAIT_REG_MEM */ |
967 | r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); | 967 | r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); |
@@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1110 | m = 1 << ((reg >> 2) & 31); | 1110 | m = 1 << ((reg >> 2) & 31); |
1111 | if (!(r600_reg_safe_bm[i] & m)) | 1111 | if (!(r600_reg_safe_bm[i] & m)) |
1112 | return 0; | 1112 | return 0; |
1113 | ib = p->ib->ptr; | 1113 | ib = p->ib.ptr; |
1114 | switch (reg) { | 1114 | switch (reg) { |
1115 | /* force following reg to 0 in an attempt to disable out buffer | 1115 | /* force following reg to 0 in an attempt to disable out buffer |
1116 | * which will need us to better understand how it works to perform | 1116 | * which will need us to better understand how it works to perform |
@@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1714 | u32 idx_value; | 1714 | u32 idx_value; |
1715 | 1715 | ||
1716 | track = (struct r600_cs_track *)p->track; | 1716 | track = (struct r600_cs_track *)p->track; |
1717 | ib = p->ib->ptr; | 1717 | ib = p->ib.ptr; |
1718 | idx = pkt->idx + 1; | 1718 | idx = pkt->idx + 1; |
1719 | idx_value = radeon_get_ib_value(p, idx); | 1719 | idx_value = radeon_get_ib_value(p, idx); |
1720 | 1720 | ||
@@ -2249,8 +2249,8 @@ int r600_cs_parse(struct radeon_cs_parser *p) | |||
2249 | } | 2249 | } |
2250 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | 2250 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
2251 | #if 0 | 2251 | #if 0 |
2252 | for (r = 0; r < p->ib->length_dw; r++) { | 2252 | for (r = 0; r < p->ib.length_dw; r++) { |
2253 | printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); | 2253 | printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); |
2254 | mdelay(1); | 2254 | mdelay(1); |
2255 | } | 2255 | } |
2256 | #endif | 2256 | #endif |
@@ -2298,7 +2298,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
2298 | { | 2298 | { |
2299 | struct radeon_cs_parser parser; | 2299 | struct radeon_cs_parser parser; |
2300 | struct radeon_cs_chunk *ib_chunk; | 2300 | struct radeon_cs_chunk *ib_chunk; |
2301 | struct radeon_ib fake_ib; | ||
2302 | struct r600_cs_track *track; | 2301 | struct r600_cs_track *track; |
2303 | int r; | 2302 | int r; |
2304 | 2303 | ||
@@ -2314,9 +2313,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
2314 | parser.dev = &dev->pdev->dev; | 2313 | parser.dev = &dev->pdev->dev; |
2315 | parser.rdev = NULL; | 2314 | parser.rdev = NULL; |
2316 | parser.family = family; | 2315 | parser.family = family; |
2317 | parser.ib = &fake_ib; | ||
2318 | parser.track = track; | 2316 | parser.track = track; |
2319 | fake_ib.ptr = ib; | 2317 | parser.ib.ptr = ib; |
2320 | r = radeon_cs_parser_init(&parser, data); | 2318 | r = radeon_cs_parser_init(&parser, data); |
2321 | if (r) { | 2319 | if (r) { |
2322 | DRM_ERROR("Failed to initialize parser !\n"); | 2320 | DRM_ERROR("Failed to initialize parser !\n"); |
@@ -2333,8 +2331,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
2333 | * input memory (cached) and write to the IB (which can be | 2331 | * input memory (cached) and write to the IB (which can be |
2334 | * uncached). */ | 2332 | * uncached). */ |
2335 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; | 2333 | ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
2336 | parser.ib->length_dw = ib_chunk->length_dw; | 2334 | parser.ib.length_dw = ib_chunk->length_dw; |
2337 | *l = parser.ib->length_dw; | 2335 | *l = parser.ib.length_dw; |
2338 | r = r600_cs_parse(&parser); | 2336 | r = r600_cs_parse(&parser); |
2339 | if (r) { | 2337 | if (r) { |
2340 | DRM_ERROR("Invalid command stream !\n"); | 2338 | DRM_ERROR("Invalid command stream !\n"); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 659855a05053..60233d7a6f7d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -769,8 +769,8 @@ struct si_rlc { | |||
769 | }; | 769 | }; |
770 | 770 | ||
771 | int radeon_ib_get(struct radeon_device *rdev, int ring, | 771 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
772 | struct radeon_ib **ib, unsigned size); | 772 | struct radeon_ib *ib, unsigned size); |
773 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); | 773 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); |
774 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); | 774 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); |
775 | int radeon_ib_pool_init(struct radeon_device *rdev); | 775 | int radeon_ib_pool_init(struct radeon_device *rdev); |
776 | void radeon_ib_pool_fini(struct radeon_device *rdev); | 776 | void radeon_ib_pool_fini(struct radeon_device *rdev); |
@@ -838,8 +838,8 @@ struct radeon_cs_parser { | |||
838 | int chunk_relocs_idx; | 838 | int chunk_relocs_idx; |
839 | int chunk_flags_idx; | 839 | int chunk_flags_idx; |
840 | int chunk_const_ib_idx; | 840 | int chunk_const_ib_idx; |
841 | struct radeon_ib *ib; | 841 | struct radeon_ib ib; |
842 | struct radeon_ib *const_ib; | 842 | struct radeon_ib const_ib; |
843 | void *track; | 843 | void *track; |
844 | unsigned family; | 844 | unsigned family; |
845 | int parser_error; | 845 | int parser_error; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index dcfe2a0bcdc0..c7d64a739033 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -138,12 +138,12 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p) | |||
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
141 | r = radeon_semaphore_create(p->rdev, &p->ib->semaphore); | 141 | r = radeon_semaphore_create(p->rdev, &p->ib.semaphore); |
142 | if (r) { | 142 | if (r) { |
143 | return r; | 143 | return r; |
144 | } | 144 | } |
145 | 145 | ||
146 | return radeon_semaphore_sync_rings(p->rdev, p->ib->semaphore, | 146 | return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore, |
147 | sync_to_ring, p->ring); | 147 | sync_to_ring, p->ring); |
148 | } | 148 | } |
149 | 149 | ||
@@ -161,8 +161,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
161 | /* get chunks */ | 161 | /* get chunks */ |
162 | INIT_LIST_HEAD(&p->validated); | 162 | INIT_LIST_HEAD(&p->validated); |
163 | p->idx = 0; | 163 | p->idx = 0; |
164 | p->ib = NULL; | 164 | p->ib.sa_bo = NULL; |
165 | p->const_ib = NULL; | 165 | p->ib.semaphore = NULL; |
166 | p->const_ib.sa_bo = NULL; | ||
167 | p->const_ib.semaphore = NULL; | ||
166 | p->chunk_ib_idx = -1; | 168 | p->chunk_ib_idx = -1; |
167 | p->chunk_relocs_idx = -1; | 169 | p->chunk_relocs_idx = -1; |
168 | p->chunk_flags_idx = -1; | 170 | p->chunk_flags_idx = -1; |
@@ -301,10 +303,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
301 | { | 303 | { |
302 | unsigned i; | 304 | unsigned i; |
303 | 305 | ||
304 | 306 | if (!error) | |
305 | if (!error && parser->ib) | ||
306 | ttm_eu_fence_buffer_objects(&parser->validated, | 307 | ttm_eu_fence_buffer_objects(&parser->validated, |
307 | parser->ib->fence); | 308 | parser->ib.fence); |
308 | else | 309 | else |
309 | ttm_eu_backoff_reservation(&parser->validated); | 310 | ttm_eu_backoff_reservation(&parser->validated); |
310 | 311 | ||
@@ -327,9 +328,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
327 | kfree(parser->chunks); | 328 | kfree(parser->chunks); |
328 | kfree(parser->chunks_array); | 329 | kfree(parser->chunks_array); |
329 | radeon_ib_free(parser->rdev, &parser->ib); | 330 | radeon_ib_free(parser->rdev, &parser->ib); |
330 | if (parser->const_ib) { | 331 | radeon_ib_free(parser->rdev, &parser->const_ib); |
331 | radeon_ib_free(parser->rdev, &parser->const_ib); | ||
332 | } | ||
333 | } | 332 | } |
334 | 333 | ||
335 | static int radeon_cs_ib_chunk(struct radeon_device *rdev, | 334 | static int radeon_cs_ib_chunk(struct radeon_device *rdev, |
@@ -355,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, | |||
355 | DRM_ERROR("Failed to get ib !\n"); | 354 | DRM_ERROR("Failed to get ib !\n"); |
356 | return r; | 355 | return r; |
357 | } | 356 | } |
358 | parser->ib->length_dw = ib_chunk->length_dw; | 357 | parser->ib.length_dw = ib_chunk->length_dw; |
359 | r = radeon_cs_parse(rdev, parser->ring, parser); | 358 | r = radeon_cs_parse(rdev, parser->ring, parser); |
360 | if (r || parser->parser_error) { | 359 | if (r || parser->parser_error) { |
361 | DRM_ERROR("Invalid command stream !\n"); | 360 | DRM_ERROR("Invalid command stream !\n"); |
@@ -370,8 +369,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, | |||
370 | if (r) { | 369 | if (r) { |
371 | DRM_ERROR("Failed to synchronize rings !\n"); | 370 | DRM_ERROR("Failed to synchronize rings !\n"); |
372 | } | 371 | } |
373 | parser->ib->vm_id = 0; | 372 | parser->ib.vm_id = 0; |
374 | r = radeon_ib_schedule(rdev, parser->ib); | 373 | r = radeon_ib_schedule(rdev, &parser->ib); |
375 | if (r) { | 374 | if (r) { |
376 | DRM_ERROR("Failed to schedule IB !\n"); | 375 | DRM_ERROR("Failed to schedule IB !\n"); |
377 | } | 376 | } |
@@ -422,14 +421,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
422 | DRM_ERROR("Failed to get const ib !\n"); | 421 | DRM_ERROR("Failed to get const ib !\n"); |
423 | return r; | 422 | return r; |
424 | } | 423 | } |
425 | parser->const_ib->is_const_ib = true; | 424 | parser->const_ib.is_const_ib = true; |
426 | parser->const_ib->length_dw = ib_chunk->length_dw; | 425 | parser->const_ib.length_dw = ib_chunk->length_dw; |
427 | /* Copy the packet into the IB */ | 426 | /* Copy the packet into the IB */ |
428 | if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr, | 427 | if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr, |
429 | ib_chunk->length_dw * 4)) { | 428 | ib_chunk->length_dw * 4)) { |
430 | return -EFAULT; | 429 | return -EFAULT; |
431 | } | 430 | } |
432 | r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib); | 431 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); |
433 | if (r) { | 432 | if (r) { |
434 | return r; | 433 | return r; |
435 | } | 434 | } |
@@ -446,13 +445,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
446 | DRM_ERROR("Failed to get ib !\n"); | 445 | DRM_ERROR("Failed to get ib !\n"); |
447 | return r; | 446 | return r; |
448 | } | 447 | } |
449 | parser->ib->length_dw = ib_chunk->length_dw; | 448 | parser->ib.length_dw = ib_chunk->length_dw; |
450 | /* Copy the packet into the IB */ | 449 | /* Copy the packet into the IB */ |
451 | if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr, | 450 | if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, |
452 | ib_chunk->length_dw * 4)) { | 451 | ib_chunk->length_dw * 4)) { |
453 | return -EFAULT; | 452 | return -EFAULT; |
454 | } | 453 | } |
455 | r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib); | 454 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); |
456 | if (r) { | 455 | if (r) { |
457 | return r; | 456 | return r; |
458 | } | 457 | } |
@@ -473,29 +472,29 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
473 | 472 | ||
474 | if ((rdev->family >= CHIP_TAHITI) && | 473 | if ((rdev->family >= CHIP_TAHITI) && |
475 | (parser->chunk_const_ib_idx != -1)) { | 474 | (parser->chunk_const_ib_idx != -1)) { |
476 | parser->const_ib->vm_id = vm->id; | 475 | parser->const_ib.vm_id = vm->id; |
477 | /* ib pool is bind at 0 in virtual address space to gpu_addr is the | 476 | /* ib pool is bind at 0 in virtual address space to gpu_addr is the |
478 | * offset inside the pool bo | 477 | * offset inside the pool bo |
479 | */ | 478 | */ |
480 | parser->const_ib->gpu_addr = parser->const_ib->sa_bo->soffset; | 479 | parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset; |
481 | r = radeon_ib_schedule(rdev, parser->const_ib); | 480 | r = radeon_ib_schedule(rdev, &parser->const_ib); |
482 | if (r) | 481 | if (r) |
483 | goto out; | 482 | goto out; |
484 | } | 483 | } |
485 | 484 | ||
486 | parser->ib->vm_id = vm->id; | 485 | parser->ib.vm_id = vm->id; |
487 | /* ib pool is bind at 0 in virtual address space to gpu_addr is the | 486 | /* ib pool is bind at 0 in virtual address space to gpu_addr is the |
488 | * offset inside the pool bo | 487 | * offset inside the pool bo |
489 | */ | 488 | */ |
490 | parser->ib->gpu_addr = parser->ib->sa_bo->soffset; | 489 | parser->ib.gpu_addr = parser->ib.sa_bo->soffset; |
491 | parser->ib->is_const_ib = false; | 490 | parser->ib.is_const_ib = false; |
492 | r = radeon_ib_schedule(rdev, parser->ib); | 491 | r = radeon_ib_schedule(rdev, &parser->ib); |
493 | out: | 492 | out: |
494 | if (!r) { | 493 | if (!r) { |
495 | if (vm->fence) { | 494 | if (vm->fence) { |
496 | radeon_fence_unref(&vm->fence); | 495 | radeon_fence_unref(&vm->fence); |
497 | } | 496 | } |
498 | vm->fence = radeon_fence_ref(parser->ib->fence); | 497 | vm->fence = radeon_fence_ref(parser->ib.fence); |
499 | } | 498 | } |
500 | mutex_unlock(&fpriv->vm.mutex); | 499 | mutex_unlock(&fpriv->vm.mutex); |
501 | return r; | 500 | return r; |
@@ -573,7 +572,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p) | |||
573 | size = PAGE_SIZE; | 572 | size = PAGE_SIZE; |
574 | } | 573 | } |
575 | 574 | ||
576 | if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), | 575 | if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), |
577 | ibc->user_ptr + (i * PAGE_SIZE), | 576 | ibc->user_ptr + (i * PAGE_SIZE), |
578 | size)) | 577 | size)) |
579 | return -EFAULT; | 578 | return -EFAULT; |
@@ -590,7 +589,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) | |||
590 | bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; | 589 | bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; |
591 | 590 | ||
592 | for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { | 591 | for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { |
593 | if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)), | 592 | if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), |
594 | ibc->user_ptr + (i * PAGE_SIZE), | 593 | ibc->user_ptr + (i * PAGE_SIZE), |
595 | PAGE_SIZE)) { | 594 | PAGE_SIZE)) { |
596 | p->parser_error = -EFAULT; | 595 | p->parser_error = -EFAULT; |
@@ -606,7 +605,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) | |||
606 | 605 | ||
607 | new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; | 606 | new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1; |
608 | if (copy1) | 607 | if (copy1) |
609 | ibc->kpage[new_page] = p->ib->ptr + (pg_idx * (PAGE_SIZE / 4)); | 608 | ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4)); |
610 | 609 | ||
611 | if (DRM_COPY_FROM_USER(ibc->kpage[new_page], | 610 | if (DRM_COPY_FROM_USER(ibc->kpage[new_page], |
612 | ibc->user_ptr + (pg_idx * PAGE_SIZE), | 611 | ibc->user_ptr + (pg_idx * PAGE_SIZE), |
@@ -617,7 +616,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) | |||
617 | 616 | ||
618 | /* copy to IB for non single case */ | 617 | /* copy to IB for non single case */ |
619 | if (!copy1) | 618 | if (!copy1) |
620 | memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); | 619 | memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size); |
621 | 620 | ||
622 | ibc->last_copied_page = pg_idx; | 621 | ibc->last_copied_page = pg_idx; |
623 | ibc->kpage_idx[new_page] = pg_idx; | 622 | ibc->kpage_idx[new_page] = pg_idx; |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index af8e1ee1dc01..a5dee76f4ebb 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -65,51 +65,36 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | int radeon_ib_get(struct radeon_device *rdev, int ring, | 67 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
68 | struct radeon_ib **ib, unsigned size) | 68 | struct radeon_ib *ib, unsigned size) |
69 | { | 69 | { |
70 | int r; | 70 | int r; |
71 | 71 | ||
72 | *ib = kmalloc(sizeof(struct radeon_ib), GFP_KERNEL); | 72 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); |
73 | if (*ib == NULL) { | ||
74 | return -ENOMEM; | ||
75 | } | ||
76 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*ib)->sa_bo, size, 256, true); | ||
77 | if (r) { | 73 | if (r) { |
78 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); | 74 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); |
79 | kfree(*ib); | ||
80 | *ib = NULL; | ||
81 | return r; | 75 | return r; |
82 | } | 76 | } |
83 | r = radeon_fence_create(rdev, &(*ib)->fence, ring); | 77 | r = radeon_fence_create(rdev, &ib->fence, ring); |
84 | if (r) { | 78 | if (r) { |
85 | dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r); | 79 | dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r); |
86 | radeon_sa_bo_free(rdev, &(*ib)->sa_bo, NULL); | 80 | radeon_sa_bo_free(rdev, &ib->sa_bo, NULL); |
87 | kfree(*ib); | ||
88 | *ib = NULL; | ||
89 | return r; | 81 | return r; |
90 | } | 82 | } |
91 | 83 | ||
92 | (*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo); | 84 | ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); |
93 | (*ib)->gpu_addr = radeon_sa_bo_gpu_addr((*ib)->sa_bo); | 85 | ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); |
94 | (*ib)->vm_id = 0; | 86 | ib->vm_id = 0; |
95 | (*ib)->is_const_ib = false; | 87 | ib->is_const_ib = false; |
96 | (*ib)->semaphore = NULL; | 88 | ib->semaphore = NULL; |
97 | 89 | ||
98 | return 0; | 90 | return 0; |
99 | } | 91 | } |
100 | 92 | ||
101 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 93 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
102 | { | 94 | { |
103 | struct radeon_ib *tmp = *ib; | 95 | radeon_semaphore_free(rdev, ib->semaphore, ib->fence); |
104 | 96 | radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); | |
105 | *ib = NULL; | 97 | radeon_fence_unref(&ib->fence); |
106 | if (tmp == NULL) { | ||
107 | return; | ||
108 | } | ||
109 | radeon_semaphore_free(rdev, tmp->semaphore, tmp->fence); | ||
110 | radeon_sa_bo_free(rdev, &tmp->sa_bo, tmp->fence); | ||
111 | radeon_fence_unref(&tmp->fence); | ||
112 | kfree(tmp); | ||
113 | } | 98 | } |
114 | 99 | ||
115 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | 100 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |