summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2014-10-29 07:55:32 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:12:06 -0400
commit6e22f39e8747a8ab9c720ef2e5236e5c94767f88 (patch)
treece34fca593c0dc1d4da4c7e0a0cf88962da84c3e /drivers/gpu/nvgpu
parenteb74267b833be530568625f5a1dcb248f0fcda1f (diff)
gpu: nvpgu: cde: fix timeout mgmt, use two lists
If a channel timeout occurs, reload only the particular context/channel where the timeout occurred, instead of destroying whole cde. Reloading happens by allocating a replacement context and marking the offending channel as soon-to-be-deleted. Clean up the code by using two separate lists for free and used contexts. Rename channel deallocation/allocation functions to better describe what they do, and annotate the functions that need locking. Also do not wait for channel idle before submitting, since the acquired context has a ready channel already. Bug 200046882 Change-Id: I4155a85ea0ed79e284309eb2ad0042df3938f1e2 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/591235 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c195
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.h9
2 files changed, 128 insertions, 76 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 053dc9d4..472cc81c 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -71,6 +71,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
71} 71}
72 72
73static void gk20a_cde_remove_ctx(struct gk20a_cde_ctx *cde_ctx) 73static void gk20a_cde_remove_ctx(struct gk20a_cde_ctx *cde_ctx)
74__must_hold(&cde_app->mutex)
74{ 75{
75 struct gk20a *g = cde_ctx->g; 76 struct gk20a *g = cde_ctx->g;
76 struct channel_gk20a *ch = cde_ctx->ch; 77 struct channel_gk20a *ch = cde_ctx->ch;
@@ -86,11 +87,13 @@ static void gk20a_cde_remove_ctx(struct gk20a_cde_ctx *cde_ctx)
86 87
87 /* housekeeping on app */ 88 /* housekeeping on app */
88 list_del(&cde_ctx->list); 89 list_del(&cde_ctx->list);
89 cde_ctx->g->cde_app.lru_len--; 90 cde_ctx->g->cde_app.ctx_count--;
90 kfree(cde_ctx); 91 kfree(cde_ctx);
91} 92}
92 93
93static void gk20a_cde_prepare_ctx_remove(struct gk20a_cde_ctx *cde_ctx) 94static void gk20a_cde_prepare_ctx_remove(struct gk20a_cde_ctx *cde_ctx)
95__releases(&cde_app->mutex)
96__acquires(&cde_app->mutex)
94{ 97{
95 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 98 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
96 99
@@ -110,29 +113,39 @@ static void gk20a_cde_prepare_ctx_remove(struct gk20a_cde_ctx *cde_ctx)
110 mutex_lock(&cde_app->mutex); 113 mutex_lock(&cde_app->mutex);
111} 114}
112 115
113static void gk20a_cde_deallocate_contexts(struct gk20a *g) 116static void gk20a_cde_remove_contexts(struct gk20a *g)
117__must_hold(&cde_app->mutex)
114{ 118{
115 struct gk20a_cde_app *cde_app = &g->cde_app; 119 struct gk20a_cde_app *cde_app = &g->cde_app;
116 struct gk20a_cde_ctx *cde_ctx, *cde_ctx_save; 120 struct gk20a_cde_ctx *cde_ctx, *cde_ctx_save;
117 121
118 list_for_each_entry_safe(cde_ctx, cde_ctx_save, 122 list_for_each_entry_safe(cde_ctx, cde_ctx_save,
119 &cde_app->cde_ctx_lru, list) { 123 &cde_app->free_contexts, list) {
124 gk20a_cde_prepare_ctx_remove(cde_ctx);
125 gk20a_cde_remove_ctx(cde_ctx);
126 }
127
128 list_for_each_entry_safe(cde_ctx, cde_ctx_save,
129 &cde_app->used_contexts, list) {
120 gk20a_cde_prepare_ctx_remove(cde_ctx); 130 gk20a_cde_prepare_ctx_remove(cde_ctx);
121 gk20a_cde_remove_ctx(cde_ctx); 131 gk20a_cde_remove_ctx(cde_ctx);
122 } 132 }
123} 133}
124 134
125static void gk20a_cde_stop(struct gk20a *g) 135static void gk20a_cde_stop(struct gk20a *g)
136__must_hold(&cde_app->mutex)
126{ 137{
127 struct gk20a_cde_app *cde_app = &g->cde_app; 138 struct gk20a_cde_app *cde_app = &g->cde_app;
128 139
129 /* prevent further conversions and delayed works from working */ 140 /* prevent further conversions and delayed works from working */
130 cde_app->initialised = false; 141 cde_app->initialised = false;
131 /* free all data, empty the list */ 142 /* free all data, empty the list */
132 gk20a_cde_deallocate_contexts(g); 143 gk20a_cde_remove_contexts(g);
133} 144}
134 145
135void gk20a_cde_destroy(struct gk20a *g) 146void gk20a_cde_destroy(struct gk20a *g)
147__acquires(&cde_app->mutex)
148__releases(&cde_app->mutex)
136{ 149{
137 struct gk20a_cde_app *cde_app = &g->cde_app; 150 struct gk20a_cde_app *cde_app = &g->cde_app;
138 151
@@ -145,48 +158,66 @@ void gk20a_cde_destroy(struct gk20a *g)
145} 158}
146 159
147void gk20a_cde_suspend(struct gk20a *g) 160void gk20a_cde_suspend(struct gk20a *g)
161__acquires(&cde_app->mutex)
162__releases(&cde_app->mutex)
148{ 163{
149
150 struct gk20a_cde_app *cde_app = &g->cde_app; 164 struct gk20a_cde_app *cde_app = &g->cde_app;
151 struct gk20a_cde_ctx *cde_ctx, *cde_ctx_save; 165 struct gk20a_cde_ctx *cde_ctx, *cde_ctx_save;
152 166
153 if (!cde_app->initialised) 167 if (!cde_app->initialised)
154 return; 168 return;
155 169
170 mutex_lock(&cde_app->mutex);
171
156 list_for_each_entry_safe(cde_ctx, cde_ctx_save, 172 list_for_each_entry_safe(cde_ctx, cde_ctx_save,
157 &cde_app->cde_ctx_lru, list) { 173 &cde_app->free_contexts, list) {
158 if (cde_ctx->is_temporary) { 174 if (cde_ctx->is_temporary)
159 mutex_lock(&cde_app->mutex);
160 cancel_delayed_work(&cde_ctx->ctx_deleter_work); 175 cancel_delayed_work(&cde_ctx->ctx_deleter_work);
161 mutex_unlock(&cde_app->mutex);
162 }
163 } 176 }
164 177
178 list_for_each_entry_safe(cde_ctx, cde_ctx_save,
179 &cde_app->used_contexts, list) {
180 if (cde_ctx->is_temporary)
181 cancel_delayed_work(&cde_ctx->ctx_deleter_work);
182 }
183
184 mutex_unlock(&cde_app->mutex);
185
165} 186}
166 187
167static int gk20a_cde_allocate_contexts(struct gk20a *g) 188static int gk20a_cde_create_context(struct gk20a *g)
189__must_hold(&cde_app->mutex)
168{ 190{
169 struct gk20a_cde_app *cde_app = &g->cde_app; 191 struct gk20a_cde_app *cde_app = &g->cde_app;
170 struct gk20a_cde_ctx *cde_ctx; 192 struct gk20a_cde_ctx *cde_ctx;
171 int err = 0; 193
194 cde_ctx = gk20a_cde_allocate_context(g);
195 if (IS_ERR(cde_ctx))
196 return PTR_ERR(cde_ctx);
197
198 list_add(&cde_ctx->list, &cde_app->free_contexts);
199 cde_app->ctx_count++;
200 if (cde_app->ctx_count > cde_app->ctx_count_top)
201 cde_app->ctx_count_top = cde_app->ctx_count;
202
203 return 0;
204}
205
206static int gk20a_cde_create_contexts(struct gk20a *g)
207__must_hold(&g->cde_app->mutex)
208{
209 int err;
172 int i; 210 int i;
173 211
174 for (i = 0; i < NUM_CDE_CONTEXTS; i++) { 212 for (i = 0; i < NUM_CDE_CONTEXTS; i++) {
175 cde_ctx = gk20a_cde_allocate_context(g); 213 err = gk20a_cde_create_context(g);
176 if (IS_ERR(cde_ctx)) { 214 if (err)
177 err = PTR_ERR(cde_ctx);
178 goto out; 215 goto out;
179 }
180
181 list_add(&cde_ctx->list, &cde_app->cde_ctx_lru);
182 cde_app->lru_len++;
183 if (cde_app->lru_len > cde_app->lru_max_len)
184 cde_app->lru_max_len = cde_app->lru_len;
185 } 216 }
186 217
187 return 0; 218 return 0;
188out: 219out:
189 gk20a_cde_deallocate_contexts(g); 220 gk20a_cde_remove_contexts(g);
190 return err; 221 return err;
191} 222}
192 223
@@ -682,6 +713,8 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
682} 713}
683 714
684static void gk20a_ctx_release(struct gk20a_cde_ctx *cde_ctx) 715static void gk20a_ctx_release(struct gk20a_cde_ctx *cde_ctx)
716__acquires(&cde_app->mutex)
717__releases(&cde_app->mutex)
685{ 718{
686 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app; 719 struct gk20a_cde_app *cde_app = &cde_ctx->g->cde_app;
687 720
@@ -690,13 +723,15 @@ static void gk20a_ctx_release(struct gk20a_cde_ctx *cde_ctx)
690 mutex_lock(&cde_app->mutex); 723 mutex_lock(&cde_app->mutex);
691 724
692 cde_ctx->in_use = false; 725 cde_ctx->in_use = false;
693 list_move(&cde_ctx->list, &cde_app->cde_ctx_lru); 726 list_move(&cde_ctx->list, &cde_app->free_contexts);
694 cde_app->lru_used--; 727 cde_app->ctx_usecount--;
695 728
696 mutex_unlock(&cde_app->mutex); 729 mutex_unlock(&cde_app->mutex);
697} 730}
698 731
699static void gk20a_cde_ctx_deleter_fn(struct work_struct *work) 732static void gk20a_cde_ctx_deleter_fn(struct work_struct *work)
733__acquires(&cde_app->mutex)
734__releases(&cde_app->mutex)
700{ 735{
701 struct delayed_work *delay_work = to_delayed_work(work); 736 struct delayed_work *delay_work = to_delayed_work(work);
702 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work, 737 struct gk20a_cde_ctx *cde_ctx = container_of(delay_work,
@@ -733,9 +768,9 @@ static void gk20a_cde_ctx_deleter_fn(struct work_struct *work)
733 768
734 gk20a_cde_remove_ctx(cde_ctx); 769 gk20a_cde_remove_ctx(cde_ctx);
735 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 770 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
736 "cde: destroyed %p len=%d use=%d max=%d", 771 "cde: destroyed %p count=%d use=%d max=%d",
737 cde_ctx, cde_app->lru_len, cde_app->lru_used, 772 cde_ctx, cde_app->ctx_count, cde_app->ctx_usecount,
738 cde_app->lru_max_len); 773 cde_app->ctx_count_top);
739 774
740out: 775out:
741 mutex_unlock(&cde_app->mutex); 776 mutex_unlock(&cde_app->mutex);
@@ -743,32 +778,33 @@ out:
743} 778}
744 779
745static struct gk20a_cde_ctx *gk20a_cde_get_context(struct gk20a *g) 780static struct gk20a_cde_ctx *gk20a_cde_get_context(struct gk20a *g)
781__must_hold(&cde_app->mutex)
746{ 782{
747 struct gk20a_cde_app *cde_app = &g->cde_app; 783 struct gk20a_cde_app *cde_app = &g->cde_app;
748 struct gk20a_cde_ctx *cde_ctx; 784 struct gk20a_cde_ctx *cde_ctx;
749 785
750 /* try to get a jobless context. list is in lru order */ 786 /* idle context available? */
751
752 cde_ctx = list_first_entry(&cde_app->cde_ctx_lru,
753 struct gk20a_cde_ctx, list);
754 787
755 if (!cde_ctx->in_use) { 788 if (!list_empty(&cde_app->free_contexts)) {
789 cde_ctx = list_first_entry(&cde_app->free_contexts,
790 struct gk20a_cde_ctx, list);
756 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 791 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
757 "cde: got free %p len=%d use=%d max=%d", 792 "cde: got free %p count=%d use=%d max=%d",
758 cde_ctx, cde_app->lru_len, cde_app->lru_used, 793 cde_ctx, cde_app->ctx_count,
759 cde_app->lru_max_len); 794 cde_app->ctx_usecount,
795 cde_app->ctx_count_top);
760 /* deleter work may be scheduled, but in_use prevents it */ 796 /* deleter work may be scheduled, but in_use prevents it */
761 cde_ctx->in_use = true; 797 cde_ctx->in_use = true;
762 list_move_tail(&cde_ctx->list, &cde_app->cde_ctx_lru); 798 list_move(&cde_ctx->list, &cde_app->used_contexts);
763 cde_app->lru_used++; 799 cde_app->ctx_usecount++;
764 return cde_ctx; 800 return cde_ctx;
765 } 801 }
766 802
767 /* no free contexts, get a temporary one */ 803 /* no free contexts, get a temporary one */
768 804
769 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, 805 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx,
770 "cde: no free contexts, list len=%d", 806 "cde: no free contexts, count=%d",
771 cde_app->lru_len); 807 cde_app->ctx_count);
772 808
773 cde_ctx = gk20a_cde_allocate_context(g); 809 cde_ctx = gk20a_cde_allocate_context(g);
774 if (IS_ERR(cde_ctx)) { 810 if (IS_ERR(cde_ctx)) {
@@ -779,11 +815,11 @@ static struct gk20a_cde_ctx *gk20a_cde_get_context(struct gk20a *g)
779 815
780 cde_ctx->in_use = true; 816 cde_ctx->in_use = true;
781 cde_ctx->is_temporary = true; 817 cde_ctx->is_temporary = true;
782 list_add_tail(&cde_ctx->list, &cde_app->cde_ctx_lru); 818 cde_app->ctx_usecount++;
783 cde_app->lru_used++; 819 cde_app->ctx_count++;
784 cde_app->lru_len++; 820 if (cde_app->ctx_count > cde_app->ctx_count_top)
785 if (cde_app->lru_len > cde_app->lru_max_len) 821 cde_app->ctx_count_top = cde_app->ctx_count;
786 cde_app->lru_max_len = cde_app->lru_len; 822 list_add(&cde_ctx->list, &cde_app->used_contexts);
787 823
788 return cde_ctx; 824 return cde_ctx;
789} 825}
@@ -822,6 +858,8 @@ int gk20a_cde_convert(struct gk20a *g,
822 u32 dst_size, struct nvgpu_fence *fence, 858 u32 dst_size, struct nvgpu_fence *fence,
823 u32 __flags, struct gk20a_cde_param *params, 859 u32 __flags, struct gk20a_cde_param *params,
824 int num_params, struct gk20a_fence **fence_out) 860 int num_params, struct gk20a_fence **fence_out)
861__acquires(&cde_app->mutex)
862__releases(&cde_app->mutex)
825{ 863{
826 struct gk20a_cde_app *cde_app = &g->cde_app; 864 struct gk20a_cde_app *cde_app = &g->cde_app;
827 struct gk20a_comptags comptags; 865 struct gk20a_comptags comptags;
@@ -866,23 +904,6 @@ int gk20a_cde_convert(struct gk20a *g,
866 if (!dst_size) 904 if (!dst_size)
867 dst_size = dst->size - dst_byte_offset; 905 dst_size = dst->size - dst_byte_offset;
868 906
869 /* reload buffer converter if it has failed */
870 if (cde_ctx->ch->has_timedout) {
871 mutex_unlock(&cde_app->mutex);
872 gk20a_warn(&cde_ctx->pdev->dev, "cde: had timed out, reloading");
873 err = gk20a_cde_reload(g);
874 if (err)
875 return err;
876 mutex_lock(&cde_app->mutex);
877 }
878
879 /* wait for channel idle */
880 err = gk20a_channel_finish(cde_ctx->ch, 2000);
881 if (err) {
882 gk20a_warn(&cde_ctx->pdev->dev, "cde: old work could not be finished");
883 goto exit_unlock;
884 }
885
886 /* store source buffer compression tags */ 907 /* store source buffer compression tags */
887 gk20a_get_comptags(&g->dev->dev, dst, &comptags); 908 gk20a_get_comptags(&g->dev->dev, dst, &comptags);
888 cde_ctx->src_vaddr = dst_vaddr; 909 cde_ctx->src_vaddr = dst_vaddr;
@@ -952,25 +973,50 @@ exit_unlock:
952} 973}
953 974
954static void gk20a_cde_finished_ctx_cb(struct channel_gk20a *ch, void *data) 975static void gk20a_cde_finished_ctx_cb(struct channel_gk20a *ch, void *data)
976__acquires(&cde_app->mutex)
977__releases(&cde_app->mutex)
955{ 978{
956 struct gk20a_cde_ctx *cde_ctx = data; 979 struct gk20a_cde_ctx *cde_ctx = data;
957 bool empty; 980 struct gk20a *g = cde_ctx->g;
981 struct gk20a_cde_app *cde_app = &g->cde_app;
982 bool channel_idle;
958 983
959 mutex_lock(&ch->jobs_lock); 984 mutex_lock(&ch->jobs_lock);
960 empty = list_empty(&ch->jobs); 985 channel_idle = list_empty(&ch->jobs);
961 mutex_unlock(&ch->jobs_lock); 986 mutex_unlock(&ch->jobs_lock);
962 987
963 if (!empty) 988 if (!channel_idle)
964 return; 989 return;
965 990
966 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx); 991 gk20a_dbg(gpu_dbg_fn | gpu_dbg_cde_ctx, "cde: finished %p", cde_ctx);
967 992
993 if (ch->has_timedout) {
994 if (cde_ctx->is_temporary) {
995 gk20a_warn(&cde_ctx->pdev->dev,
996 "cde: channel had timed out"
997 " (temporary channel)");
998 /* going to be deleted anyway */
999 } else {
1000 gk20a_warn(&cde_ctx->pdev->dev,
1001 "cde: channel had timed out"
1002 ", reloading");
1003 /* mark it to be deleted, replace with a new one */
1004 mutex_lock(&cde_app->mutex);
1005 cde_ctx->is_temporary = true;
1006 if (gk20a_cde_create_context(g)) {
1007 gk20a_err(&cde_ctx->pdev->dev,
1008 "cde: can't replace context");
1009 }
1010 mutex_unlock(&cde_app->mutex);
1011 }
1012 } else {
1013 gk20a_ctx_release(cde_ctx);
1014 }
1015
968 /* delete temporary contexts later */ 1016 /* delete temporary contexts later */
969 if (cde_ctx->is_temporary) 1017 if (cde_ctx->is_temporary)
970 schedule_delayed_work(&cde_ctx->ctx_deleter_work, 1018 schedule_delayed_work(&cde_ctx->ctx_deleter_work,
971 msecs_to_jiffies(CTX_DELETE_TIME)); 1019 msecs_to_jiffies(CTX_DELETE_TIME));
972
973 gk20a_ctx_release(cde_ctx);
974} 1020}
975 1021
976static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) 1022static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
@@ -1056,6 +1102,8 @@ err_get_gk20a_channel:
1056} 1102}
1057 1103
1058int gk20a_cde_reload(struct gk20a *g) 1104int gk20a_cde_reload(struct gk20a *g)
1105__acquires(&cde_app->mutex)
1106__releases(&cde_app->mutex)
1059{ 1107{
1060 struct gk20a_cde_app *cde_app = &g->cde_app; 1108 struct gk20a_cde_app *cde_app = &g->cde_app;
1061 int err; 1109 int err;
@@ -1071,7 +1119,7 @@ int gk20a_cde_reload(struct gk20a *g)
1071 1119
1072 gk20a_cde_stop(g); 1120 gk20a_cde_stop(g);
1073 1121
1074 err = gk20a_cde_allocate_contexts(g); 1122 err = gk20a_cde_create_contexts(g);
1075 if (!err) 1123 if (!err)
1076 cde_app->initialised = true; 1124 cde_app->initialised = true;
1077 1125
@@ -1082,6 +1130,8 @@ int gk20a_cde_reload(struct gk20a *g)
1082} 1130}
1083 1131
1084int gk20a_init_cde_support(struct gk20a *g) 1132int gk20a_init_cde_support(struct gk20a *g)
1133__acquires(&cde_app->mutex)
1134__releases(&cde_app->mutex)
1085{ 1135{
1086 struct gk20a_cde_app *cde_app = &g->cde_app; 1136 struct gk20a_cde_app *cde_app = &g->cde_app;
1087 int err; 1137 int err;
@@ -1094,12 +1144,13 @@ int gk20a_init_cde_support(struct gk20a *g)
1094 mutex_init(&cde_app->mutex); 1144 mutex_init(&cde_app->mutex);
1095 mutex_lock(&cde_app->mutex); 1145 mutex_lock(&cde_app->mutex);
1096 1146
1097 INIT_LIST_HEAD(&cde_app->cde_ctx_lru); 1147 INIT_LIST_HEAD(&cde_app->free_contexts);
1098 cde_app->lru_len = 0; 1148 INIT_LIST_HEAD(&cde_app->used_contexts);
1099 cde_app->lru_max_len = 0; 1149 cde_app->ctx_count = 0;
1100 cde_app->lru_used = 0; 1150 cde_app->ctx_count_top = 0;
1151 cde_app->ctx_usecount = 0;
1101 1152
1102 err = gk20a_cde_allocate_contexts(g); 1153 err = gk20a_cde_create_contexts(g);
1103 if (!err) 1154 if (!err)
1104 cde_app->initialised = true; 1155 cde_app->initialised = true;
1105 1156
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
index 9d7dbba6..3347490c 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
@@ -253,10 +253,11 @@ struct gk20a_cde_app {
253 bool initialised; 253 bool initialised;
254 struct mutex mutex; 254 struct mutex mutex;
255 255
256 struct list_head cde_ctx_lru; 256 struct list_head free_contexts;
257 int lru_len; 257 struct list_head used_contexts;
258 int lru_max_len; 258 int ctx_count;
259 int lru_used; 259 int ctx_usecount;
260 int ctx_count_top;
260 261
261 u32 shader_parameter; 262 u32 shader_parameter;
262}; 263};