aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vc4/vc4_validate_shaders.c
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2017-07-25 12:27:32 -0400
committerEric Anholt <eric@anholt.net>2017-08-08 16:20:11 -0400
commitfb95992af1d779806da9a380b14f76ad13764c2f (patch)
treefa288996ace299687cbe9b7392e0d8694c5e5448 /drivers/gpu/drm/vc4/vc4_validate_shaders.c
parent1d5494e965be7b512cc03d48f1bfc6a93a876121 (diff)
drm/vc4: Demote user-accessible DRM_ERROR paths to DRM_DEBUG.
Userspace shouldn't be able to spam dmesg by passing bad arguments. This has particularly become an issues since we started using a bad argument to set_tiling to detect if set_tiling was supported. Signed-off-by: Eric Anholt <eric@anholt.net> Fixes: 83753117f1de ("drm/vc4: Add get/set tiling ioctls.") Link: https://patchwork.freedesktop.org/patch/msgid/20170725162733.28007-1-eric@anholt.net Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_validate_shaders.c')
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 0b2df5c6efb4..d3f15bf60900 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
200 uint32_t clamp_reg, clamp_offset; 200 uint32_t clamp_reg, clamp_offset;
201 201
202 if (sig == QPU_SIG_SMALL_IMM) { 202 if (sig == QPU_SIG_SMALL_IMM) {
203 DRM_ERROR("direct TMU read used small immediate\n"); 203 DRM_DEBUG("direct TMU read used small immediate\n");
204 return false; 204 return false;
205 } 205 }
206 206
@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
209 */ 209 */
210 if (is_mul || 210 if (is_mul ||
211 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { 211 QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
212 DRM_ERROR("direct TMU load wasn't an add\n"); 212 DRM_DEBUG("direct TMU load wasn't an add\n");
213 return false; 213 return false;
214 } 214 }
215 215
@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
220 */ 220 */
221 clamp_reg = raddr_add_a_to_live_reg_index(inst); 221 clamp_reg = raddr_add_a_to_live_reg_index(inst);
222 if (clamp_reg == ~0) { 222 if (clamp_reg == ~0) {
223 DRM_ERROR("direct TMU load wasn't clamped\n"); 223 DRM_DEBUG("direct TMU load wasn't clamped\n");
224 return false; 224 return false;
225 } 225 }
226 226
227 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; 227 clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
228 if (clamp_offset == ~0) { 228 if (clamp_offset == ~0) {
229 DRM_ERROR("direct TMU load wasn't clamped\n"); 229 DRM_DEBUG("direct TMU load wasn't clamped\n");
230 return false; 230 return false;
231 } 231 }
232 232
@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
238 238
239 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && 239 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
240 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { 240 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
241 DRM_ERROR("direct TMU load didn't add to a uniform\n"); 241 DRM_DEBUG("direct TMU load didn't add to a uniform\n");
242 return false; 242 return false;
243 } 243 }
244 244
@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
246 } else { 246 } else {
247 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && 247 if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
248 raddr_b == QPU_R_UNIF)) { 248 raddr_b == QPU_R_UNIF)) {
249 DRM_ERROR("uniform read in the same instruction as " 249 DRM_DEBUG("uniform read in the same instruction as "
250 "texture setup.\n"); 250 "texture setup.\n");
251 return false; 251 return false;
252 } 252 }
253 } 253 }
254 254
255 if (validation_state->tmu_write_count[tmu] >= 4) { 255 if (validation_state->tmu_write_count[tmu] >= 4) {
256 DRM_ERROR("TMU%d got too many parameters before dispatch\n", 256 DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
257 tmu); 257 tmu);
258 return false; 258 return false;
259 } 259 }
@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
265 */ 265 */
266 if (!is_direct) { 266 if (!is_direct) {
267 if (validation_state->needs_uniform_address_update) { 267 if (validation_state->needs_uniform_address_update) {
268 DRM_ERROR("Texturing with undefined uniform address\n"); 268 DRM_DEBUG("Texturing with undefined uniform address\n");
269 return false; 269 return false;
270 } 270 }
271 271
@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
336 case QPU_SIG_LOAD_TMU1: 336 case QPU_SIG_LOAD_TMU1:
337 break; 337 break;
338 default: 338 default:
339 DRM_ERROR("uniforms address change must be " 339 DRM_DEBUG("uniforms address change must be "
340 "normal math\n"); 340 "normal math\n");
341 return false; 341 return false;
342 } 342 }
343 343
344 if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { 344 if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
345 DRM_ERROR("Uniform address reset must be an ADD.\n"); 345 DRM_DEBUG("Uniform address reset must be an ADD.\n");
346 return false; 346 return false;
347 } 347 }
348 348
349 if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) { 349 if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
350 DRM_ERROR("Uniform address reset must be unconditional.\n"); 350 DRM_DEBUG("Uniform address reset must be unconditional.\n");
351 return false; 351 return false;
352 } 352 }
353 353
354 if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP && 354 if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
355 !(inst & QPU_PM)) { 355 !(inst & QPU_PM)) {
356 DRM_ERROR("No packing allowed on uniforms reset\n"); 356 DRM_DEBUG("No packing allowed on uniforms reset\n");
357 return false; 357 return false;
358 } 358 }
359 359
360 if (add_lri == -1) { 360 if (add_lri == -1) {
361 DRM_ERROR("First argument of uniform address write must be " 361 DRM_DEBUG("First argument of uniform address write must be "
362 "an immediate value.\n"); 362 "an immediate value.\n");
363 return false; 363 return false;
364 } 364 }
365 365
366 if (validation_state->live_immediates[add_lri] != expected_offset) { 366 if (validation_state->live_immediates[add_lri] != expected_offset) {
367 DRM_ERROR("Resetting uniforms with offset %db instead of %db\n", 367 DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
368 validation_state->live_immediates[add_lri], 368 validation_state->live_immediates[add_lri],
369 expected_offset); 369 expected_offset);
370 return false; 370 return false;
@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
372 372
373 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && 373 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
374 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { 374 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
375 DRM_ERROR("Second argument of uniform address write must be " 375 DRM_DEBUG("Second argument of uniform address write must be "
376 "a uniform.\n"); 376 "a uniform.\n");
377 return false; 377 return false;
378 } 378 }
@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
417 switch (waddr) { 417 switch (waddr) {
418 case QPU_W_UNIFORMS_ADDRESS: 418 case QPU_W_UNIFORMS_ADDRESS:
419 if (is_b) { 419 if (is_b) {
420 DRM_ERROR("relative uniforms address change " 420 DRM_DEBUG("relative uniforms address change "
421 "unsupported\n"); 421 "unsupported\n");
422 return false; 422 return false;
423 } 423 }
@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
452 /* XXX: I haven't thought about these, so don't support them 452 /* XXX: I haven't thought about these, so don't support them
453 * for now. 453 * for now.
454 */ 454 */
455 DRM_ERROR("Unsupported waddr %d\n", waddr); 455 DRM_DEBUG("Unsupported waddr %d\n", waddr);
456 return false; 456 return false;
457 457
458 case QPU_W_VPM_ADDR: 458 case QPU_W_VPM_ADDR:
459 DRM_ERROR("General VPM DMA unsupported\n"); 459 DRM_DEBUG("General VPM DMA unsupported\n");
460 return false; 460 return false;
461 461
462 case QPU_W_VPM: 462 case QPU_W_VPM:
@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
559 bool ok; 559 bool ok;
560 560
561 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { 561 if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
562 DRM_ERROR("ADD and MUL both set up textures\n"); 562 DRM_DEBUG("ADD and MUL both set up textures\n");
563 return false; 563 return false;
564 } 564 }
565 565
@@ -588,7 +588,7 @@ check_branch(uint64_t inst,
588 * there's no need for it. 588 * there's no need for it.
589 */ 589 */
590 if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) { 590 if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
591 DRM_ERROR("branch instruction at %d wrote a register.\n", 591 DRM_DEBUG("branch instruction at %d wrote a register.\n",
592 validation_state->ip); 592 validation_state->ip);
593 return false; 593 return false;
594 } 594 }
@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
614 validated_shader->uniforms_size += 4; 614 validated_shader->uniforms_size += 4;
615 615
616 if (validation_state->needs_uniform_address_update) { 616 if (validation_state->needs_uniform_address_update) {
617 DRM_ERROR("Uniform read with undefined uniform " 617 DRM_DEBUG("Uniform read with undefined uniform "
618 "address\n"); 618 "address\n");
619 return false; 619 return false;
620 } 620 }
@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
660 continue; 660 continue;
661 661
662 if (ip - last_branch < 4) { 662 if (ip - last_branch < 4) {
663 DRM_ERROR("Branch at %d during delay slots\n", ip); 663 DRM_DEBUG("Branch at %d during delay slots\n", ip);
664 return false; 664 return false;
665 } 665 }
666 last_branch = ip; 666 last_branch = ip;
667 667
668 if (inst & QPU_BRANCH_REG) { 668 if (inst & QPU_BRANCH_REG) {
669 DRM_ERROR("branching from register relative " 669 DRM_DEBUG("branching from register relative "
670 "not supported\n"); 670 "not supported\n");
671 return false; 671 return false;
672 } 672 }
673 673
674 if (!(inst & QPU_BRANCH_REL)) { 674 if (!(inst & QPU_BRANCH_REL)) {
675 DRM_ERROR("relative branching required\n"); 675 DRM_DEBUG("relative branching required\n");
676 return false; 676 return false;
677 } 677 }
678 678
@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
682 * end of the shader object. 682 * end of the shader object.
683 */ 683 */
684 if (branch_imm % sizeof(inst) != 0) { 684 if (branch_imm % sizeof(inst) != 0) {
685 DRM_ERROR("branch target not aligned\n"); 685 DRM_DEBUG("branch target not aligned\n");
686 return false; 686 return false;
687 } 687 }
688 688
689 branch_target_ip = after_delay_ip + (branch_imm >> 3); 689 branch_target_ip = after_delay_ip + (branch_imm >> 3);
690 if (branch_target_ip >= validation_state->max_ip) { 690 if (branch_target_ip >= validation_state->max_ip) {
691 DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n", 691 DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
692 ip, branch_target_ip, 692 ip, branch_target_ip,
693 validation_state->max_ip); 693 validation_state->max_ip);
694 return false; 694 return false;
@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
699 * the shader. 699 * the shader.
700 */ 700 */
701 if (after_delay_ip >= validation_state->max_ip) { 701 if (after_delay_ip >= validation_state->max_ip) {
702 DRM_ERROR("Branch at %d continues past shader end " 702 DRM_DEBUG("Branch at %d continues past shader end "
703 "(%d/%d)\n", 703 "(%d/%d)\n",
704 ip, after_delay_ip, validation_state->max_ip); 704 ip, after_delay_ip, validation_state->max_ip);
705 return false; 705 return false;
@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
709 } 709 }
710 710
711 if (max_branch_target > validation_state->max_ip - 3) { 711 if (max_branch_target > validation_state->max_ip - 3) {
712 DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); 712 DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
713 return false; 713 return false;
714 } 714 }
715 715
@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
750 return true; 750 return true;
751 751
752 if (texturing_in_progress(validation_state)) { 752 if (texturing_in_progress(validation_state)) {
753 DRM_ERROR("Branch target landed during TMU setup\n"); 753 DRM_DEBUG("Branch target landed during TMU setup\n");
754 return false; 754 return false;
755 } 755 }
756 756
@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
837 case QPU_SIG_LAST_THREAD_SWITCH: 837 case QPU_SIG_LAST_THREAD_SWITCH:
838 if (!check_instruction_writes(validated_shader, 838 if (!check_instruction_writes(validated_shader,
839 &validation_state)) { 839 &validation_state)) {
840 DRM_ERROR("Bad write at ip %d\n", ip); 840 DRM_DEBUG("Bad write at ip %d\n", ip);
841 goto fail; 841 goto fail;
842 } 842 }
843 843
@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
855 validated_shader->is_threaded = true; 855 validated_shader->is_threaded = true;
856 856
857 if (ip < last_thread_switch_ip + 3) { 857 if (ip < last_thread_switch_ip + 3) {
858 DRM_ERROR("Thread switch too soon after " 858 DRM_DEBUG("Thread switch too soon after "
859 "last switch at ip %d\n", ip); 859 "last switch at ip %d\n", ip);
860 goto fail; 860 goto fail;
861 } 861 }
@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
867 case QPU_SIG_LOAD_IMM: 867 case QPU_SIG_LOAD_IMM:
868 if (!check_instruction_writes(validated_shader, 868 if (!check_instruction_writes(validated_shader,
869 &validation_state)) { 869 &validation_state)) {
870 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); 870 DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
871 goto fail; 871 goto fail;
872 } 872 }
873 break; 873 break;
@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
878 goto fail; 878 goto fail;
879 879
880 if (ip < last_thread_switch_ip + 3) { 880 if (ip < last_thread_switch_ip + 3) {
881 DRM_ERROR("Branch in thread switch at ip %d", 881 DRM_DEBUG("Branch in thread switch at ip %d",
882 ip); 882 ip);
883 goto fail; 883 goto fail;
884 } 884 }
885 885
886 break; 886 break;
887 default: 887 default:
888 DRM_ERROR("Unsupported QPU signal %d at " 888 DRM_DEBUG("Unsupported QPU signal %d at "
889 "instruction %d\n", sig, ip); 889 "instruction %d\n", sig, ip);
890 goto fail; 890 goto fail;
891 } 891 }
@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
898 } 898 }
899 899
900 if (ip == validation_state.max_ip) { 900 if (ip == validation_state.max_ip) {
901 DRM_ERROR("shader failed to terminate before " 901 DRM_DEBUG("shader failed to terminate before "
902 "shader BO end at %zd\n", 902 "shader BO end at %zd\n",
903 shader_obj->base.size); 903 shader_obj->base.size);
904 goto fail; 904 goto fail;
@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
907 /* Might corrupt other thread */ 907 /* Might corrupt other thread */
908 if (validated_shader->is_threaded && 908 if (validated_shader->is_threaded &&
909 validation_state.all_registers_used) { 909 validation_state.all_registers_used) {
910 DRM_ERROR("Shader uses threading, but uses the upper " 910 DRM_DEBUG("Shader uses threading, but uses the upper "
911 "half of the registers, too\n"); 911 "half of the registers, too\n");
912 goto fail; 912 goto fail;
913 } 913 }