diff options
author | Tomas Winkler <tomas.winkler@intel.com> | 2015-02-10 03:39:36 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-03-01 22:36:59 -0500 |
commit | 3d33ff2457355a9dd3c3178b04ab6669882b306c (patch) | |
tree | 692168d2c6df72783349246c879995212d3f7dd3 /drivers/misc/mei/interrupt.c | |
parent | 3908be6f9aa5517bc717f8ffdaaafd89a1b78471 (diff) |
mei: fix device reset on mei_cl_irq_read_msg allocation failure
On memory allocation failure mei_cl_irq_read_msg will
return with error that will cause device reset.
Instead we should propagate error to caller and
just clean the read queues.
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc/mei/interrupt.c')
-rw-r--r-- | drivers/misc/mei/interrupt.c | 117 |
1 files changed, 59 insertions, 58 deletions
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 711cddfa9c99..587cb04a3cf5 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
@@ -69,85 +69,91 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, | |||
69 | cl->me_client_id == mei_hdr->me_addr; | 69 | cl->me_client_id == mei_hdr->me_addr; |
70 | } | 70 | } |
71 | /** | 71 | /** |
72 | * mei_cl_is_reading - checks if the client | 72 | * mei_cl_is_reading - checks if the client is in reading state |
73 | * is the one to read this message | ||
74 | * | 73 | * |
75 | * @cl: mei client | 74 | * @cl: mei client |
76 | * @mei_hdr: header of mei message | ||
77 | * | 75 | * |
78 | * Return: true on match and false otherwise | 76 | * Return: true if the client is reading |
79 | */ | 77 | */ |
80 | static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) | 78 | static bool mei_cl_is_reading(struct mei_cl *cl) |
81 | { | 79 | { |
82 | return mei_cl_hbm_equal(cl, mei_hdr) && | 80 | return cl->state == MEI_FILE_CONNECTED && |
83 | cl->state == MEI_FILE_CONNECTED && | ||
84 | cl->reading_state != MEI_READ_COMPLETE; | 81 | cl->reading_state != MEI_READ_COMPLETE; |
85 | } | 82 | } |
86 | 83 | ||
87 | /** | 84 | /** |
88 | * mei_cl_irq_read_msg - process client message | 85 | * mei_cl_irq_read_msg - process client message |
89 | * | 86 | * |
90 | * @dev: the device structure | 87 | * @cl: reading client |
91 | * @mei_hdr: header of mei client message | 88 | * @mei_hdr: header of mei client message |
92 | * @complete_list: An instance of our list structure | 89 | * @complete_list: completion list |
93 | * | 90 | * |
94 | * Return: 0 on success, <0 on failure. | 91 | * Return: always 0 |
95 | */ | 92 | */ |
96 | static int mei_cl_irq_read_msg(struct mei_device *dev, | 93 | static int mei_cl_irq_read_msg(struct mei_cl *cl, |
97 | struct mei_msg_hdr *mei_hdr, | 94 | struct mei_msg_hdr *mei_hdr, |
98 | struct mei_cl_cb *complete_list) | 95 | struct mei_cl_cb *complete_list) |
99 | { | 96 | { |
100 | struct mei_cl *cl; | 97 | struct mei_device *dev = cl->dev; |
101 | struct mei_cl_cb *cb, *next; | 98 | struct mei_cl_cb *cb; |
102 | unsigned char *buffer = NULL; | 99 | unsigned char *buffer = NULL; |
103 | 100 | ||
104 | list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { | 101 | list_for_each_entry(cb, &dev->read_list.list, list) { |
105 | cl = cb->cl; | 102 | if (cl == cb->cl) |
106 | if (!mei_cl_is_reading(cl, mei_hdr)) | 103 | break; |
107 | continue; | 104 | } |
108 | 105 | ||
109 | cl->reading_state = MEI_READING; | 106 | if (&cb->list == &dev->read_list.list) { |
107 | dev_err(dev->dev, "no reader found\n"); | ||
108 | goto out; | ||
109 | } | ||
110 | 110 | ||
111 | if (cb->response_buffer.size == 0 || | 111 | if (!mei_cl_is_reading(cl)) { |
112 | cb->response_buffer.data == NULL) { | 112 | cl_err(dev, cl, "cl is not reading state=%d reading state=%d\n", |
113 | cl_err(dev, cl, "response buffer is not allocated.\n"); | 113 | cl->state, cl->reading_state); |
114 | list_del(&cb->list); | 114 | goto out; |
115 | return -ENOMEM; | 115 | } |
116 | } | ||
117 | 116 | ||
118 | if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { | 117 | cl->reading_state = MEI_READING; |
119 | cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", | ||
120 | cb->response_buffer.size, | ||
121 | mei_hdr->length, cb->buf_idx); | ||
122 | buffer = krealloc(cb->response_buffer.data, | ||
123 | mei_hdr->length + cb->buf_idx, | ||
124 | GFP_KERNEL); | ||
125 | |||
126 | if (!buffer) { | ||
127 | list_del(&cb->list); | ||
128 | return -ENOMEM; | ||
129 | } | ||
130 | cb->response_buffer.data = buffer; | ||
131 | cb->response_buffer.size = | ||
132 | mei_hdr->length + cb->buf_idx; | ||
133 | } | ||
134 | 118 | ||
135 | buffer = cb->response_buffer.data + cb->buf_idx; | 119 | if (cb->response_buffer.size == 0 || |
136 | mei_read_slots(dev, buffer, mei_hdr->length); | 120 | cb->response_buffer.data == NULL) { |
121 | cl_err(dev, cl, "response buffer is not allocated.\n"); | ||
122 | list_move_tail(&cb->list, &complete_list->list); | ||
123 | cb->status = -ENOMEM; | ||
124 | goto out; | ||
125 | } | ||
137 | 126 | ||
138 | cb->buf_idx += mei_hdr->length; | 127 | if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { |
139 | if (mei_hdr->msg_complete) { | 128 | cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", |
140 | cl->status = 0; | 129 | cb->response_buffer.size, mei_hdr->length, cb->buf_idx); |
141 | list_del(&cb->list); | 130 | buffer = krealloc(cb->response_buffer.data, |
142 | cl_dbg(dev, cl, "completed read length = %lu\n", | 131 | mei_hdr->length + cb->buf_idx, |
143 | cb->buf_idx); | 132 | GFP_KERNEL); |
144 | list_add_tail(&cb->list, &complete_list->list); | 133 | |
134 | if (!buffer) { | ||
135 | cb->status = -ENOMEM; | ||
136 | list_move_tail(&cb->list, &complete_list->list); | ||
137 | goto out; | ||
145 | } | 138 | } |
146 | break; | 139 | cb->response_buffer.data = buffer; |
140 | cb->response_buffer.size = mei_hdr->length + cb->buf_idx; | ||
147 | } | 141 | } |
148 | 142 | ||
149 | dev_dbg(dev->dev, "message read\n"); | 143 | buffer = cb->response_buffer.data + cb->buf_idx; |
144 | mei_read_slots(dev, buffer, mei_hdr->length); | ||
145 | |||
146 | cb->buf_idx += mei_hdr->length; | ||
147 | if (mei_hdr->msg_complete) { | ||
148 | cl_dbg(dev, cl, "completed read length = %lu\n", | ||
149 | cb->buf_idx); | ||
150 | list_move_tail(&cb->list, &complete_list->list); | ||
151 | } | ||
152 | |||
153 | out: | ||
150 | if (!buffer) { | 154 | if (!buffer) { |
155 | /* assume that mei_hdr->length <= MEI_RD_MSG_BUF_SIZE */ | ||
156 | BUG_ON(mei_hdr->length > MEI_RD_MSG_BUF_SIZE); | ||
151 | mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); | 157 | mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); |
152 | dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", | 158 | dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", |
153 | MEI_HDR_PRM(mei_hdr)); | 159 | MEI_HDR_PRM(mei_hdr)); |
@@ -389,14 +395,10 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
389 | goto end; | 395 | goto end; |
390 | } | 396 | } |
391 | } else { | 397 | } else { |
392 | ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); | 398 | ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); |
393 | if (ret) { | ||
394 | dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n", | ||
395 | ret); | ||
396 | goto end; | ||
397 | } | ||
398 | } | 399 | } |
399 | 400 | ||
401 | |||
400 | reset_slots: | 402 | reset_slots: |
401 | /* reset the number of slots and header */ | 403 | /* reset the number of slots and header */ |
402 | *slots = mei_count_full_read_slots(dev); | 404 | *slots = mei_count_full_read_slots(dev); |
@@ -636,4 +638,3 @@ out: | |||
636 | schedule_delayed_work(&dev->timer_work, 2 * HZ); | 638 | schedule_delayed_work(&dev->timer_work, 2 * HZ); |
637 | mutex_unlock(&dev->device_lock); | 639 | mutex_unlock(&dev->device_lock); |
638 | } | 640 | } |
639 | |||