aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c86
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c38
2 files changed, 62 insertions, 62 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7a445666e71f..ee4cff534f10 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2909 return -EINVAL; 2909 return -EINVAL;
2910 } 2910 }
2911 if (tiled) { 2911 if (tiled) {
2912 dst_offset = ib[idx+1]; 2912 dst_offset = radeon_get_ib_value(p, idx+1);
2913 dst_offset <<= 8; 2913 dst_offset <<= 8;
2914 2914
2915 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2915 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2916 p->idx += count + 7; 2916 p->idx += count + 7;
2917 } else { 2917 } else {
2918 dst_offset = ib[idx+1]; 2918 dst_offset = radeon_get_ib_value(p, idx+1);
2919 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; 2919 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2920 2920
2921 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2921 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2922 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2922 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2954 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); 2954 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2955 return -EINVAL; 2955 return -EINVAL;
2956 } 2956 }
2957 dst_offset = ib[idx+1]; 2957 dst_offset = radeon_get_ib_value(p, idx+1);
2958 dst_offset <<= 8; 2958 dst_offset <<= 8;
2959 dst2_offset = ib[idx+2]; 2959 dst2_offset = radeon_get_ib_value(p, idx+2);
2960 dst2_offset <<= 8; 2960 dst2_offset <<= 8;
2961 src_offset = ib[idx+8]; 2961 src_offset = radeon_get_ib_value(p, idx+8);
2962 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; 2962 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2964 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", 2964 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3014 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3014 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3015 return -EINVAL; 3015 return -EINVAL;
3016 } 3016 }
3017 dst_offset = ib[idx+1]; 3017 dst_offset = radeon_get_ib_value(p, idx+1);
3018 dst_offset <<= 8; 3018 dst_offset <<= 8;
3019 dst2_offset = ib[idx+2]; 3019 dst2_offset = radeon_get_ib_value(p, idx+2);
3020 dst2_offset <<= 8; 3020 dst2_offset <<= 8;
3021 src_offset = ib[idx+8]; 3021 src_offset = radeon_get_ib_value(p, idx+8);
3022 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; 3022 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3023 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3023 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3024 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3024 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3025 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3025 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3046 /* detile bit */ 3046 /* detile bit */
3047 if (idx_value & (1 << 31)) { 3047 if (idx_value & (1 << 31)) {
3048 /* tiled src, linear dst */ 3048 /* tiled src, linear dst */
3049 src_offset = ib[idx+1]; 3049 src_offset = radeon_get_ib_value(p, idx+1);
3050 src_offset <<= 8; 3050 src_offset <<= 8;
3051 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 3051 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3052 3052
3053 dst_offset = ib[idx+7]; 3053 dst_offset = radeon_get_ib_value(p, idx+7);
3054 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3054 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3055 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 3055 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3056 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 3056 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3057 } else { 3057 } else {
3058 /* linear src, tiled dst */ 3058 /* linear src, tiled dst */
3059 src_offset = ib[idx+7]; 3059 src_offset = radeon_get_ib_value(p, idx+7);
3060 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3060 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3061 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 3061 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3062 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 3062 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3063 3063
3064 dst_offset = ib[idx+1]; 3064 dst_offset = radeon_get_ib_value(p, idx+1);
3065 dst_offset <<= 8; 3065 dst_offset <<= 8;
3066 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3066 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3067 } 3067 }
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3098 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3098 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3099 return -EINVAL; 3099 return -EINVAL;
3100 } 3100 }
3101 dst_offset = ib[idx+1]; 3101 dst_offset = radeon_get_ib_value(p, idx+1);
3102 dst_offset <<= 8; 3102 dst_offset <<= 8;
3103 dst2_offset = ib[idx+2]; 3103 dst2_offset = radeon_get_ib_value(p, idx+2);
3104 dst2_offset <<= 8; 3104 dst2_offset <<= 8;
3105 src_offset = ib[idx+8]; 3105 src_offset = radeon_get_ib_value(p, idx+8);
3106 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; 3106 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3107 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3107 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3108 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3108 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3109 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3109 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3135 /* detile bit */ 3135 /* detile bit */
3136 if (idx_value & (1 << 31)) { 3136 if (idx_value & (1 << 31)) {
3137 /* tiled src, linear dst */ 3137 /* tiled src, linear dst */
3138 src_offset = ib[idx+1]; 3138 src_offset = radeon_get_ib_value(p, idx+1);
3139 src_offset <<= 8; 3139 src_offset <<= 8;
3140 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 3140 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3141 3141
3142 dst_offset = ib[idx+7]; 3142 dst_offset = radeon_get_ib_value(p, idx+7);
3143 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3143 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3144 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 3144 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3145 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 3145 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3146 } else { 3146 } else {
3147 /* linear src, tiled dst */ 3147 /* linear src, tiled dst */
3148 src_offset = ib[idx+7]; 3148 src_offset = radeon_get_ib_value(p, idx+7);
3149 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; 3149 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3150 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 3150 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3151 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 3151 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3152 3152
3153 dst_offset = ib[idx+1]; 3153 dst_offset = radeon_get_ib_value(p, idx+1);
3154 dst_offset <<= 8; 3154 dst_offset <<= 8;
3155 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 3155 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3156 } 3156 }
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3176 switch (misc) { 3176 switch (misc) {
3177 case 0: 3177 case 0:
3178 /* L2L, byte */ 3178 /* L2L, byte */
3179 src_offset = ib[idx+2]; 3179 src_offset = radeon_get_ib_value(p, idx+2);
3180 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 3180 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3181 dst_offset = ib[idx+1]; 3181 dst_offset = radeon_get_ib_value(p, idx+1);
3182 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 3182 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
3183 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { 3183 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
3184 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", 3184 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
3185 src_offset + count, radeon_bo_size(src_reloc->robj)); 3185 src_offset + count, radeon_bo_size(src_reloc->robj));
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3216 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); 3216 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
3217 return -EINVAL; 3217 return -EINVAL;
3218 } 3218 }
3219 dst_offset = ib[idx+1]; 3219 dst_offset = radeon_get_ib_value(p, idx+1);
3220 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 3220 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3221 dst2_offset = ib[idx+2]; 3221 dst2_offset = radeon_get_ib_value(p, idx+2);
3222 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; 3222 dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
3223 src_offset = ib[idx+3]; 3223 src_offset = radeon_get_ib_value(p, idx+3);
3224 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; 3224 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
3225 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3225 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3226 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", 3226 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
3227 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3227 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3251 } 3251 }
3252 } else { 3252 } else {
3253 /* L2L, dw */ 3253 /* L2L, dw */
3254 src_offset = ib[idx+2]; 3254 src_offset = radeon_get_ib_value(p, idx+2);
3255 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 3255 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3256 dst_offset = ib[idx+1]; 3256 dst_offset = radeon_get_ib_value(p, idx+1);
3257 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 3257 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
3258 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3258 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3259 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", 3259 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
3260 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3260 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3279 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); 3279 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3280 return -EINVAL; 3280 return -EINVAL;
3281 } 3281 }
3282 dst_offset = ib[idx+1]; 3282 dst_offset = radeon_get_ib_value(p, idx+1);
3283 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; 3283 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
3284 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3284 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3285 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 3285 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3286 dst_offset, radeon_bo_size(dst_reloc->robj)); 3286 dst_offset, radeon_bo_size(dst_reloc->robj));
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 69ec24ab8d63..9b2512bf1a46 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2623 return -EINVAL; 2623 return -EINVAL;
2624 } 2624 }
2625 if (tiled) { 2625 if (tiled) {
2626 dst_offset = ib[idx+1]; 2626 dst_offset = radeon_get_ib_value(p, idx+1);
2627 dst_offset <<= 8; 2627 dst_offset <<= 8;
2628 2628
2629 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2629 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2630 p->idx += count + 5; 2630 p->idx += count + 5;
2631 } else { 2631 } else {
2632 dst_offset = ib[idx+1]; 2632 dst_offset = radeon_get_ib_value(p, idx+1);
2633 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; 2633 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2634 2634
2635 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2635 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2636 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2636 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2658 /* detile bit */ 2658 /* detile bit */
2659 if (idx_value & (1 << 31)) { 2659 if (idx_value & (1 << 31)) {
2660 /* tiled src, linear dst */ 2660 /* tiled src, linear dst */
2661 src_offset = ib[idx+1]; 2661 src_offset = radeon_get_ib_value(p, idx+1);
2662 src_offset <<= 8; 2662 src_offset <<= 8;
2663 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2663 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2664 2664
2665 dst_offset = ib[idx+5]; 2665 dst_offset = radeon_get_ib_value(p, idx+5);
2666 dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; 2666 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2667 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2667 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2668 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2668 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2669 } else { 2669 } else {
2670 /* linear src, tiled dst */ 2670 /* linear src, tiled dst */
2671 src_offset = ib[idx+5]; 2671 src_offset = radeon_get_ib_value(p, idx+5);
2672 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; 2672 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2673 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2673 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2674 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2674 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2675 2675
2676 dst_offset = ib[idx+1]; 2676 dst_offset = radeon_get_ib_value(p, idx+1);
2677 dst_offset <<= 8; 2677 dst_offset <<= 8;
2678 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2678 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2679 } 2679 }
2680 p->idx += 7; 2680 p->idx += 7;
2681 } else { 2681 } else {
2682 if (p->family >= CHIP_RV770) { 2682 if (p->family >= CHIP_RV770) {
2683 src_offset = ib[idx+2]; 2683 src_offset = radeon_get_ib_value(p, idx+2);
2684 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 2684 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2685 dst_offset = ib[idx+1]; 2685 dst_offset = radeon_get_ib_value(p, idx+1);
2686 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 2686 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2687 2687
2688 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2688 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2689 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2689 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2691 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2691 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2692 p->idx += 5; 2692 p->idx += 5;
2693 } else { 2693 } else {
2694 src_offset = ib[idx+2]; 2694 src_offset = radeon_get_ib_value(p, idx+2);
2695 src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 2695 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2696 dst_offset = ib[idx+1]; 2696 dst_offset = radeon_get_ib_value(p, idx+1);
2697 dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; 2697 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2698 2698
2699 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2699 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2700 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2700 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2724 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2724 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2725 return -EINVAL; 2725 return -EINVAL;
2726 } 2726 }
2727 dst_offset = ib[idx+1]; 2727 dst_offset = radeon_get_ib_value(p, idx+1);
2728 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; 2728 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2729 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2729 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2730 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 2730 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2731 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2731 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));