diff options
author | Christian König <christian.koenig@amd.com> | 2015-11-04 09:44:39 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-11-16 11:05:52 -0500 |
commit | 7e52a81c2f0326a85d3ebc005829bcd604731c6d (patch) | |
tree | 6e651ae3f7cac9705cb2f752f705871d9db37900 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
parent | e4a58a28b50f30e72292b6659d94410cbf7355ad (diff) |
drm/amdgpu: cleanup amdgpu_cs_parser handling
No need any more to allocate that structure dynamically, just put it on the
stack. This is a start to cleanup some of the scheduler fallouts.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 88 |
1 files changed, 33 insertions, 55 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ecc82dfe83f8..bf32096b8eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||
131 | struct drm_file *filp, | ||
132 | struct amdgpu_ctx *ctx, | ||
133 | struct amdgpu_ib *ibs, | ||
134 | uint32_t num_ibs) | ||
135 | { | ||
136 | struct amdgpu_cs_parser *parser; | ||
137 | int i; | ||
138 | |||
139 | parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); | ||
140 | if (!parser) | ||
141 | return NULL; | ||
142 | |||
143 | parser->adev = adev; | ||
144 | parser->filp = filp; | ||
145 | parser->ctx = ctx; | ||
146 | parser->ibs = ibs; | ||
147 | parser->num_ibs = num_ibs; | ||
148 | for (i = 0; i < num_ibs; i++) | ||
149 | ibs[i].ctx = ctx; | ||
150 | |||
151 | return parser; | ||
152 | } | ||
153 | |||
154 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 130 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
155 | { | 131 | { |
156 | union drm_amdgpu_cs *cs = data; | 132 | union drm_amdgpu_cs *cs = data; |
@@ -490,6 +466,7 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err | |||
490 | static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | 466 | static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) |
491 | { | 467 | { |
492 | unsigned i; | 468 | unsigned i; |
469 | |||
493 | if (parser->ctx) | 470 | if (parser->ctx) |
494 | amdgpu_ctx_put(parser->ctx); | 471 | amdgpu_ctx_put(parser->ctx); |
495 | if (parser->bo_list) | 472 | if (parser->bo_list) |
@@ -505,7 +482,6 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | |||
505 | kfree(parser->ibs); | 482 | kfree(parser->ibs); |
506 | if (parser->uf.bo) | 483 | if (parser->uf.bo) |
507 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | 484 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); |
508 | kfree(parser); | ||
509 | } | 485 | } |
510 | 486 | ||
511 | /** | 487 | /** |
@@ -824,36 +800,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
824 | union drm_amdgpu_cs *cs = data; | 800 | union drm_amdgpu_cs *cs = data; |
825 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 801 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
826 | struct amdgpu_vm *vm = &fpriv->vm; | 802 | struct amdgpu_vm *vm = &fpriv->vm; |
827 | struct amdgpu_cs_parser *parser; | 803 | struct amdgpu_cs_parser parser = {}; |
828 | bool reserved_buffers = false; | 804 | bool reserved_buffers = false; |
829 | int i, r; | 805 | int i, r; |
830 | 806 | ||
831 | if (!adev->accel_working) | 807 | if (!adev->accel_working) |
832 | return -EBUSY; | 808 | return -EBUSY; |
833 | 809 | ||
834 | parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); | 810 | parser.adev = adev; |
835 | if (!parser) | 811 | parser.filp = filp; |
836 | return -ENOMEM; | 812 | |
837 | r = amdgpu_cs_parser_init(parser, data); | 813 | r = amdgpu_cs_parser_init(&parser, data); |
838 | if (r) { | 814 | if (r) { |
839 | DRM_ERROR("Failed to initialize parser !\n"); | 815 | DRM_ERROR("Failed to initialize parser !\n"); |
840 | amdgpu_cs_parser_fini(parser, r, false); | 816 | amdgpu_cs_parser_fini(&parser, r, false); |
841 | r = amdgpu_cs_handle_lockup(adev, r); | 817 | r = amdgpu_cs_handle_lockup(adev, r); |
842 | return r; | 818 | return r; |
843 | } | 819 | } |
844 | mutex_lock(&vm->mutex); | 820 | mutex_lock(&vm->mutex); |
845 | r = amdgpu_cs_parser_relocs(parser); | 821 | r = amdgpu_cs_parser_relocs(&parser); |
846 | if (r == -ENOMEM) | 822 | if (r == -ENOMEM) |
847 | DRM_ERROR("Not enough memory for command submission!\n"); | 823 | DRM_ERROR("Not enough memory for command submission!\n"); |
848 | else if (r && r != -ERESTARTSYS) | 824 | else if (r && r != -ERESTARTSYS) |
849 | DRM_ERROR("Failed to process the buffer list %d!\n", r); | 825 | DRM_ERROR("Failed to process the buffer list %d!\n", r); |
850 | else if (!r) { | 826 | else if (!r) { |
851 | reserved_buffers = true; | 827 | reserved_buffers = true; |
852 | r = amdgpu_cs_ib_fill(adev, parser); | 828 | r = amdgpu_cs_ib_fill(adev, &parser); |
853 | } | 829 | } |
854 | 830 | ||
855 | if (!r) { | 831 | if (!r) { |
856 | r = amdgpu_cs_dependencies(adev, parser); | 832 | r = amdgpu_cs_dependencies(adev, &parser); |
857 | if (r) | 833 | if (r) |
858 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | 834 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); |
859 | } | 835 | } |
@@ -861,36 +837,38 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
861 | if (r) | 837 | if (r) |
862 | goto out; | 838 | goto out; |
863 | 839 | ||
864 | for (i = 0; i < parser->num_ibs; i++) | 840 | for (i = 0; i < parser.num_ibs; i++) |
865 | trace_amdgpu_cs(parser, i); | 841 | trace_amdgpu_cs(&parser, i); |
866 | 842 | ||
867 | r = amdgpu_cs_ib_vm_chunk(adev, parser); | 843 | r = amdgpu_cs_ib_vm_chunk(adev, &parser); |
868 | if (r) | 844 | if (r) |
869 | goto out; | 845 | goto out; |
870 | 846 | ||
871 | if (amdgpu_enable_scheduler && parser->num_ibs) { | 847 | if (amdgpu_enable_scheduler && parser.num_ibs) { |
872 | struct amdgpu_job *job; | 848 | struct amdgpu_job *job; |
873 | struct amdgpu_ring * ring = parser->ibs->ring; | 849 | struct amdgpu_ring * ring = parser.ibs->ring; |
850 | |||
874 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | 851 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
875 | if (!job) { | 852 | if (!job) { |
876 | r = -ENOMEM; | 853 | r = -ENOMEM; |
877 | goto out; | 854 | goto out; |
878 | } | 855 | } |
856 | |||
879 | job->base.sched = &ring->sched; | 857 | job->base.sched = &ring->sched; |
880 | job->base.s_entity = &parser->ctx->rings[ring->idx].entity; | 858 | job->base.s_entity = &parser.ctx->rings[ring->idx].entity; |
881 | job->adev = parser->adev; | 859 | job->adev = parser.adev; |
882 | job->ibs = parser->ibs; | 860 | job->ibs = parser.ibs; |
883 | job->num_ibs = parser->num_ibs; | 861 | job->num_ibs = parser.num_ibs; |
884 | job->base.owner = parser->filp; | 862 | job->base.owner = parser.filp; |
885 | mutex_init(&job->job_lock); | 863 | mutex_init(&job->job_lock); |
886 | if (job->ibs[job->num_ibs - 1].user) { | 864 | if (job->ibs[job->num_ibs - 1].user) { |
887 | job->uf = parser->uf; | 865 | job->uf = parser.uf; |
888 | job->ibs[job->num_ibs - 1].user = &job->uf; | 866 | job->ibs[job->num_ibs - 1].user = &job->uf; |
889 | parser->uf.bo = NULL; | 867 | parser.uf.bo = NULL; |
890 | } | 868 | } |
891 | 869 | ||
892 | parser->ibs = NULL; | 870 | parser.ibs = NULL; |
893 | parser->num_ibs = 0; | 871 | parser.num_ibs = 0; |
894 | 872 | ||
895 | job->free_job = amdgpu_cs_free_job; | 873 | job->free_job = amdgpu_cs_free_job; |
896 | mutex_lock(&job->job_lock); | 874 | mutex_lock(&job->job_lock); |
@@ -902,24 +880,24 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
902 | goto out; | 880 | goto out; |
903 | } | 881 | } |
904 | cs->out.handle = | 882 | cs->out.handle = |
905 | amdgpu_ctx_add_fence(parser->ctx, ring, | 883 | amdgpu_ctx_add_fence(parser.ctx, ring, |
906 | &job->base.s_fence->base); | 884 | &job->base.s_fence->base); |
907 | job->ibs[job->num_ibs - 1].sequence = cs->out.handle; | 885 | job->ibs[job->num_ibs - 1].sequence = cs->out.handle; |
908 | 886 | ||
909 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | 887 | list_sort(NULL, &parser.validated, cmp_size_smaller_first); |
910 | ttm_eu_fence_buffer_objects(&parser->ticket, | 888 | ttm_eu_fence_buffer_objects(&parser.ticket, |
911 | &parser->validated, | 889 | &parser.validated, |
912 | &job->base.s_fence->base); | 890 | &job->base.s_fence->base); |
913 | 891 | ||
914 | mutex_unlock(&job->job_lock); | 892 | mutex_unlock(&job->job_lock); |
915 | amdgpu_cs_parser_fini_late(parser); | 893 | amdgpu_cs_parser_fini_late(&parser); |
916 | mutex_unlock(&vm->mutex); | 894 | mutex_unlock(&vm->mutex); |
917 | return 0; | 895 | return 0; |
918 | } | 896 | } |
919 | 897 | ||
920 | cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; | 898 | cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; |
921 | out: | 899 | out: |
922 | amdgpu_cs_parser_fini(parser, r, reserved_buffers); | 900 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
923 | mutex_unlock(&vm->mutex); | 901 | mutex_unlock(&vm->mutex); |
924 | r = amdgpu_cs_handle_lockup(adev, r); | 902 | r = amdgpu_cs_handle_lockup(adev, r); |
925 | return r; | 903 | return r; |