diff options
author | Sami Kiminki <skiminki@nvidia.com> | 2014-10-23 13:23:46 -0400 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:11:51 -0400 |
commit | be48f4a4519cc285885f8cf886d73ef7675daca8 (patch) | |
tree | 6bc0959d922f6cd4e6f038b179c73bd2b8a55f10 /drivers/gpu/nvgpu | |
parent | 13ca1676ef9c43b137b0ca55e8143da9fa4b5032 (diff) |
gpu: nvgpu: Sanitize gk20a_gr_handle_notify_pending
Sanitize cyclestats portion of gk20a_gr_handle_notify_pending() a bit
and fix infinite loop and buffer overrun bugs in case of malformed
cyclestate element headers. Also, convert WARN_ON:s to gk20a_err:s for
malformed headers since they are userspace problems and not worth
kernel stack traces.
Bug 1566834
Change-Id: I69fbd85efdb042c5f0e745fac55eeff3aee0faa8
Signed-off-by: Sami Kiminki <skiminki@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 50 |
1 files changed, 32 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 30ea49a7..d568b1bc 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -5164,13 +5164,8 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5164 | void *virtual_address; | 5164 | void *virtual_address; |
5165 | u32 buffer_size; | 5165 | u32 buffer_size; |
5166 | u32 offset; | 5166 | u32 offset; |
5167 | u32 new_offset; | ||
5168 | bool exit; | 5167 | bool exit; |
5169 | struct share_buffer_head *sh_hdr; | 5168 | |
5170 | u32 raw_reg; | ||
5171 | u64 mask_orig; | ||
5172 | u64 v = 0; | ||
5173 | struct gk20a_cyclestate_buffer_elem *op_elem; | ||
5174 | /* GL will never use payload 0 for cycle state */ | 5169 | /* GL will never use payload 0 for cycle state */ |
5175 | if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) | 5170 | if ((ch->cyclestate.cyclestate_buffer == NULL) || (isr_data->data_lo == 0)) |
5176 | return 0; | 5171 | return 0; |
@@ -5182,19 +5177,36 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5182 | offset = isr_data->data_lo; | 5177 | offset = isr_data->data_lo; |
5183 | exit = false; | 5178 | exit = false; |
5184 | while (!exit) { | 5179 | while (!exit) { |
5185 | if (offset >= buffer_size) { | 5180 | struct share_buffer_head *sh_hdr; |
5186 | WARN_ON(1); | 5181 | u32 min_element_size; |
5182 | |||
5183 | /* validate offset */ | ||
5184 | if (offset + sizeof(struct share_buffer_head) > buffer_size || | ||
5185 | offset + sizeof(struct share_buffer_head) < offset) { | ||
5186 | gk20a_err(dev_from_gk20a(g), | ||
5187 | "cyclestats buffer overrun at offset 0x%x\n", | ||
5188 | offset); | ||
5187 | break; | 5189 | break; |
5188 | } | 5190 | } |
5189 | 5191 | ||
5190 | sh_hdr = (struct share_buffer_head *) | 5192 | sh_hdr = (struct share_buffer_head *) |
5191 | ((char *)virtual_address + offset); | 5193 | ((char *)virtual_address + offset); |
5192 | 5194 | ||
5193 | if (sh_hdr->size < sizeof(struct share_buffer_head)) { | 5195 | min_element_size = |
5194 | WARN_ON(1); | 5196 | (sh_hdr->operation == OP_END ? |
5197 | sizeof(struct share_buffer_head) : | ||
5198 | sizeof(struct gk20a_cyclestate_buffer_elem)); | ||
5199 | |||
5200 | /* validate sh_hdr->size */ | ||
5201 | if (sh_hdr->size < min_element_size || | ||
5202 | offset + sh_hdr->size > buffer_size || | ||
5203 | offset + sh_hdr->size < offset) { | ||
5204 | gk20a_err(dev_from_gk20a(g), | ||
5205 | "bad cyclestate buffer header size at offset 0x%x\n", | ||
5206 | offset); | ||
5207 | sh_hdr->failed = true; | ||
5195 | break; | 5208 | break; |
5196 | } | 5209 | } |
5197 | new_offset = offset + sh_hdr->size; | ||
5198 | 5210 | ||
5199 | switch (sh_hdr->operation) { | 5211 | switch (sh_hdr->operation) { |
5200 | case OP_END: | 5212 | case OP_END: |
@@ -5204,12 +5216,14 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5204 | case BAR0_READ32: | 5216 | case BAR0_READ32: |
5205 | case BAR0_WRITE32: | 5217 | case BAR0_WRITE32: |
5206 | { | 5218 | { |
5207 | bool valid; | 5219 | struct gk20a_cyclestate_buffer_elem *op_elem = |
5208 | op_elem = | 5220 | (struct gk20a_cyclestate_buffer_elem *)sh_hdr; |
5209 | (struct gk20a_cyclestate_buffer_elem *) | 5221 | bool valid = is_valid_cyclestats_bar0_offset_gk20a( |
5210 | sh_hdr; | 5222 | g, op_elem->offset_bar0); |
5211 | valid = is_valid_cyclestats_bar0_offset_gk20a(g, | 5223 | u32 raw_reg; |
5212 | op_elem->offset_bar0); | 5224 | u64 mask_orig; |
5225 | u64 v; | ||
5226 | |||
5213 | if (!valid) { | 5227 | if (!valid) { |
5214 | gk20a_err(dev_from_gk20a(g), | 5228 | gk20a_err(dev_from_gk20a(g), |
5215 | "invalid cycletstats op offset: 0x%x\n", | 5229 | "invalid cycletstats op offset: 0x%x\n", |
@@ -5266,7 +5280,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g, | |||
5266 | break; | 5280 | break; |
5267 | } | 5281 | } |
5268 | sh_hdr->completed = true; | 5282 | sh_hdr->completed = true; |
5269 | offset = new_offset; | 5283 | offset += sh_hdr->size; |
5270 | } | 5284 | } |
5271 | mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex); | 5285 | mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex); |
5272 | #endif | 5286 | #endif |