aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/mei/iorw.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/mei/iorw.c')
-rw-r--r--drivers/misc/mei/iorw.c455
1 files changed, 115 insertions, 340 deletions
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
index fcba98eb892..eb93a1b53b9 100644
--- a/drivers/misc/mei/iorw.c
+++ b/drivers/misc/mei/iorw.c
@@ -39,6 +39,95 @@
39#include "interface.h" 39#include "interface.h"
40 40
41/** 41/**
42 * mei_io_cb_free - free mei_cb_private related memory
43 *
44 * @cb: mei callback struct
45 */
46void mei_io_cb_free(struct mei_cl_cb *cb)
47{
48 if (cb == NULL)
49 return;
50
51 kfree(cb->request_buffer.data);
52 kfree(cb->response_buffer.data);
53 kfree(cb);
54}
55/**
56 * mei_io_cb_init - allocate and initialize io callback
57 *
58 * @cl - mei client
59 * @file: pointer to file structure
60 *
61 * returns mei_cl_cb pointer or NULL;
62 */
63struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
64{
65 struct mei_cl_cb *cb;
66
67 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
68 if (!cb)
69 return NULL;
70
71 mei_io_list_init(cb);
72
73 cb->file_object = fp;
74 cb->cl = cl;
75 cb->buf_idx = 0;
76 return cb;
77}
78
79
80/**
81 * mei_io_cb_alloc_req_buf - allocate request buffer
82 *
83 * @cb - io callback structure
84 * @size: size of the buffer
85 *
86 * returns 0 on success
87 * -EINVAL if cb is NULL
88 * -ENOMEM if allocation failed
89 */
90int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
91{
92 if (!cb)
93 return -EINVAL;
94
95 if (length == 0)
96 return 0;
97
98 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
99 if (!cb->request_buffer.data)
100 return -ENOMEM;
101 cb->request_buffer.size = length;
102 return 0;
103}
104/**
105 * mei_io_cb_alloc_req_buf - allocate respose buffer
106 *
107 * @cb - io callback structure
108 * @size: size of the buffer
109 *
110 * returns 0 on success
111 * -EINVAL if cb is NULL
112 * -ENOMEM if allocation failed
113 */
114int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
115{
116 if (!cb)
117 return -EINVAL;
118
119 if (length == 0)
120 return 0;
121
122 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
123 if (!cb->response_buffer.data)
124 return -ENOMEM;
125 cb->response_buffer.size = length;
126 return 0;
127}
128
129
130/**
42 * mei_me_cl_by_id return index to me_clients for client_id 131 * mei_me_cl_by_id return index to me_clients for client_id
43 * 132 *
44 * @dev: the device structure 133 * @dev: the device structure
@@ -82,9 +171,7 @@ int mei_ioctl_connect_client(struct file *file,
82 struct mei_cl_cb *cb; 171 struct mei_cl_cb *cb;
83 struct mei_client *client; 172 struct mei_client *client;
84 struct mei_cl *cl; 173 struct mei_cl *cl;
85 struct mei_cl *cl_pos = NULL; 174 long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
86 struct mei_cl *cl_next = NULL;
87 long timeout = CONNECT_TIMEOUT;
88 int i; 175 int i;
89 int err; 176 int err;
90 int rets; 177 int rets;
@@ -97,16 +184,14 @@ int mei_ioctl_connect_client(struct file *file,
97 184
98 dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n"); 185 dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
99 186
100
101 /* buffered ioctl cb */ 187 /* buffered ioctl cb */
102 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 188 cb = mei_io_cb_init(cl, file);
103 if (!cb) { 189 if (!cb) {
104 rets = -ENOMEM; 190 rets = -ENOMEM;
105 goto end; 191 goto end;
106 } 192 }
107 INIT_LIST_HEAD(&cb->cb_list);
108 193
109 cb->major_file_operations = MEI_IOCTL; 194 cb->fop_type = MEI_FOP_IOCTL;
110 195
111 if (dev->dev_state != MEI_DEV_ENABLED) { 196 if (dev->dev_state != MEI_DEV_ENABLED) {
112 rets = -ENODEV; 197 rets = -ENODEV;
@@ -142,21 +227,9 @@ int mei_ioctl_connect_client(struct file *file,
142 goto end; 227 goto end;
143 } 228 }
144 clear_bit(cl->host_client_id, dev->host_clients_map); 229 clear_bit(cl->host_client_id, dev->host_clients_map);
145 list_for_each_entry_safe(cl_pos, cl_next, 230 mei_me_cl_unlink(dev, cl);
146 &dev->file_list, link) {
147 if (mei_cl_cmp_id(cl, cl_pos)) {
148 dev_dbg(&dev->pdev->dev,
149 "remove file private data node host"
150 " client = %d, ME client = %d.\n",
151 cl_pos->host_client_id,
152 cl_pos->me_client_id);
153 list_del(&cl_pos->link);
154 }
155 231
156 }
157 dev_dbg(&dev->pdev->dev, "free file private data memory.\n");
158 kfree(cl); 232 kfree(cl);
159
160 cl = NULL; 233 cl = NULL;
161 file->private_data = &dev->iamthif_cl; 234 file->private_data = &dev->iamthif_cl;
162 235
@@ -192,25 +265,19 @@ int mei_ioctl_connect_client(struct file *file,
192 } else { 265 } else {
193 dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n"); 266 dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
194 cl->timer_count = MEI_CONNECT_TIMEOUT; 267 cl->timer_count = MEI_CONNECT_TIMEOUT;
195 cb->file_private = cl; 268 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
196 list_add_tail(&cb->cb_list,
197 &dev->ctrl_rd_list.mei_cb.
198 cb_list);
199 } 269 }
200 270
201 271
202 } else { 272 } else {
203 dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n"); 273 dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
204 cb->file_private = cl;
205 dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n"); 274 dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
206 list_add_tail(&cb->cb_list, 275 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
207 &dev->ctrl_wr_list.mei_cb.cb_list);
208 } 276 }
209 mutex_unlock(&dev->device_lock); 277 mutex_unlock(&dev->device_lock);
210 err = wait_event_timeout(dev->wait_recvd_msg, 278 err = wait_event_timeout(dev->wait_recvd_msg,
211 (MEI_FILE_CONNECTED == cl->state || 279 (MEI_FILE_CONNECTED == cl->state ||
212 MEI_FILE_DISCONNECTED == cl->state), 280 MEI_FILE_DISCONNECTED == cl->state), timeout);
213 timeout * HZ);
214 281
215 mutex_lock(&dev->device_lock); 282 mutex_lock(&dev->device_lock);
216 if (MEI_FILE_CONNECTED == cl->state) { 283 if (MEI_FILE_CONNECTED == cl->state) {
@@ -234,153 +301,7 @@ int mei_ioctl_connect_client(struct file *file,
234 rets = 0; 301 rets = 0;
235end: 302end:
236 dev_dbg(&dev->pdev->dev, "free connect cb memory."); 303 dev_dbg(&dev->pdev->dev, "free connect cb memory.");
237 kfree(cb); 304 mei_io_cb_free(cb);
238 return rets;
239}
240
241/**
242 * find_amthi_read_list_entry - finds a amthilist entry for current file
243 *
244 * @dev: the device structure
245 * @file: pointer to file object
246 *
247 * returns returned a list entry on success, NULL on failure.
248 */
249struct mei_cl_cb *find_amthi_read_list_entry(
250 struct mei_device *dev,
251 struct file *file)
252{
253 struct mei_cl *cl_temp;
254 struct mei_cl_cb *pos = NULL;
255 struct mei_cl_cb *next = NULL;
256
257 list_for_each_entry_safe(pos, next,
258 &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
259 cl_temp = (struct mei_cl *)pos->file_private;
260 if (cl_temp && cl_temp == &dev->iamthif_cl &&
261 pos->file_object == file)
262 return pos;
263 }
264 return NULL;
265}
266
267/**
268 * amthi_read - read data from AMTHI client
269 *
270 * @dev: the device structure
271 * @if_num: minor number
272 * @file: pointer to file object
273 * @*ubuf: pointer to user data in user space
274 * @length: data length to read
275 * @offset: data read offset
276 *
277 * Locking: called under "dev->device_lock" lock
278 *
279 * returns
280 * returned data length on success,
281 * zero if no data to read,
282 * negative on failure.
283 */
284int amthi_read(struct mei_device *dev, struct file *file,
285 char __user *ubuf, size_t length, loff_t *offset)
286{
287 int rets;
288 int wait_ret;
289 struct mei_cl_cb *cb = NULL;
290 struct mei_cl *cl = file->private_data;
291 unsigned long timeout;
292 int i;
293
294 /* Only Posible if we are in timeout */
295 if (!cl || cl != &dev->iamthif_cl) {
296 dev_dbg(&dev->pdev->dev, "bad file ext.\n");
297 return -ETIMEDOUT;
298 }
299
300 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
301
302 if (i < 0) {
303 dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
304 return -ENODEV;
305 }
306 dev_dbg(&dev->pdev->dev, "checking amthi data\n");
307 cb = find_amthi_read_list_entry(dev, file);
308
309 /* Check for if we can block or not*/
310 if (cb == NULL && file->f_flags & O_NONBLOCK)
311 return -EAGAIN;
312
313
314 dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
315 while (cb == NULL) {
316 /* unlock the Mutex */
317 mutex_unlock(&dev->device_lock);
318
319 wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
320 (cb = find_amthi_read_list_entry(dev, file)));
321
322 if (wait_ret)
323 return -ERESTARTSYS;
324
325 dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
326
327 /* Locking again the Mutex */
328 mutex_lock(&dev->device_lock);
329 }
330
331
332 dev_dbg(&dev->pdev->dev, "Got amthi data\n");
333 dev->iamthif_timer = 0;
334
335 if (cb) {
336 timeout = cb->read_time + msecs_to_jiffies(IAMTHIF_READ_TIMER);
337 dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
338 timeout);
339
340 if (time_after(jiffies, timeout)) {
341 dev_dbg(&dev->pdev->dev, "amthi Time out\n");
342 /* 15 sec for the message has expired */
343 list_del(&cb->cb_list);
344 rets = -ETIMEDOUT;
345 goto free;
346 }
347 }
348 /* if the whole message will fit remove it from the list */
349 if (cb->information >= *offset && length >= (cb->information - *offset))
350 list_del(&cb->cb_list);
351 else if (cb->information > 0 && cb->information <= *offset) {
352 /* end of the message has been reached */
353 list_del(&cb->cb_list);
354 rets = 0;
355 goto free;
356 }
357 /* else means that not full buffer will be read and do not
358 * remove message from deletion list
359 */
360
361 dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
362 cb->response_buffer.size);
363 dev_dbg(&dev->pdev->dev, "amthi cb->information - %lu\n",
364 cb->information);
365
366 /* length is being turncated to PAGE_SIZE, however,
367 * the information may be longer */
368 length = min_t(size_t, length, (cb->information - *offset));
369
370 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
371 rets = -EFAULT;
372 else {
373 rets = length;
374 if ((*offset + length) < cb->information) {
375 *offset += length;
376 goto out;
377 }
378 }
379free:
380 dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
381 *offset = 0;
382 mei_free_cb_private(cb);
383out:
384 return rets; 305 return rets;
385} 306}
386 307
@@ -396,7 +317,7 @@ out:
396int mei_start_read(struct mei_device *dev, struct mei_cl *cl) 317int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
397{ 318{
398 struct mei_cl_cb *cb; 319 struct mei_cl_cb *cb;
399 int rets = 0; 320 int rets;
400 int i; 321 int i;
401 322
402 if (cl->state != MEI_FILE_CONNECTED) 323 if (cl->state != MEI_FILE_CONNECTED)
@@ -405,187 +326,41 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
405 if (dev->dev_state != MEI_DEV_ENABLED) 326 if (dev->dev_state != MEI_DEV_ENABLED)
406 return -ENODEV; 327 return -ENODEV;
407 328
408 dev_dbg(&dev->pdev->dev, "check if read is pending.\n");
409 if (cl->read_pending || cl->read_cb) { 329 if (cl->read_pending || cl->read_cb) {
410 dev_dbg(&dev->pdev->dev, "read is pending.\n"); 330 dev_dbg(&dev->pdev->dev, "read is pending.\n");
411 return -EBUSY; 331 return -EBUSY;
412 } 332 }
333 i = mei_me_cl_by_id(dev, cl->me_client_id);
334 if (i < 0) {
335 dev_err(&dev->pdev->dev, "no such me client %d\n",
336 cl->me_client_id);
337 return -ENODEV;
338 }
413 339
414 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 340 cb = mei_io_cb_init(cl, NULL);
415 if (!cb) 341 if (!cb)
416 return -ENOMEM; 342 return -ENOMEM;
417 343
418 dev_dbg(&dev->pdev->dev, "allocation call back successful. host client = %d, ME client = %d\n", 344 rets = mei_io_cb_alloc_resp_buf(cb,
419 cl->host_client_id, cl->me_client_id); 345 dev->me_clients[i].props.max_msg_length);
420 i = mei_me_cl_by_id(dev, cl->me_client_id); 346 if (rets)
421 if (i < 0) { 347 goto err;
422 rets = -ENODEV;
423 goto unlock;
424 }
425 348
426 cb->response_buffer.size = dev->me_clients[i].props.max_msg_length; 349 cb->fop_type = MEI_FOP_READ;
427 cb->response_buffer.data =
428 kmalloc(cb->response_buffer.size, GFP_KERNEL);
429 if (!cb->response_buffer.data) {
430 rets = -ENOMEM;
431 goto unlock;
432 }
433 dev_dbg(&dev->pdev->dev, "allocation call back data success.\n");
434 cb->major_file_operations = MEI_READ;
435 /* make sure information is zero before we start */
436 cb->information = 0;
437 cb->file_private = (void *) cl;
438 cl->read_cb = cb; 350 cl->read_cb = cb;
439 if (dev->mei_host_buffer_is_empty) { 351 if (dev->mei_host_buffer_is_empty) {
440 dev->mei_host_buffer_is_empty = false; 352 dev->mei_host_buffer_is_empty = false;
441 if (mei_send_flow_control(dev, cl)) { 353 if (mei_send_flow_control(dev, cl)) {
442 rets = -ENODEV; 354 rets = -ENODEV;
443 goto unlock; 355 goto err;
444 } 356 }
445 list_add_tail(&cb->cb_list, &dev->read_list.mei_cb.cb_list); 357 list_add_tail(&cb->list, &dev->read_list.list);
446 } else { 358 } else {
447 list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list); 359 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
448 } 360 }
449 return rets; 361 return rets;
450unlock: 362err:
451 mei_free_cb_private(cb); 363 mei_io_cb_free(cb);
452 return rets; 364 return rets;
453} 365}
454 366
455/**
456 * amthi_write - write iamthif data to amthi client
457 *
458 * @dev: the device structure
459 * @cb: mei call back struct
460 *
461 * returns 0 on success, <0 on failure.
462 */
463int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
464{
465 struct mei_msg_hdr mei_hdr;
466 int ret;
467
468 if (!dev || !cb)
469 return -ENODEV;
470
471 dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
472
473 dev->iamthif_state = MEI_IAMTHIF_WRITING;
474 dev->iamthif_current_cb = cb;
475 dev->iamthif_file_object = cb->file_object;
476 dev->iamthif_canceled = false;
477 dev->iamthif_ioctl = true;
478 dev->iamthif_msg_buf_size = cb->request_buffer.size;
479 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
480 cb->request_buffer.size);
481
482 ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
483 if (ret < 0)
484 return ret;
485
486 if (ret && dev->mei_host_buffer_is_empty) {
487 ret = 0;
488 dev->mei_host_buffer_is_empty = false;
489 if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
490 mei_hdr.length = mei_hbuf_max_data(dev);
491 mei_hdr.msg_complete = 0;
492 } else {
493 mei_hdr.length = cb->request_buffer.size;
494 mei_hdr.msg_complete = 1;
495 }
496
497 mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
498 mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
499 mei_hdr.reserved = 0;
500 dev->iamthif_msg_buf_index += mei_hdr.length;
501 if (mei_write_message(dev, &mei_hdr,
502 (unsigned char *)(dev->iamthif_msg_buf),
503 mei_hdr.length))
504 return -ENODEV;
505
506 if (mei_hdr.msg_complete) {
507 if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
508 return -ENODEV;
509 dev->iamthif_flow_control_pending = true;
510 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
511 dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
512 dev->iamthif_current_cb = cb;
513 dev->iamthif_file_object = cb->file_object;
514 list_add_tail(&cb->cb_list,
515 &dev->write_waiting_list.mei_cb.cb_list);
516 } else {
517 dev_dbg(&dev->pdev->dev, "message does not complete, "
518 "so add amthi cb to write list.\n");
519 list_add_tail(&cb->cb_list,
520 &dev->write_list.mei_cb.cb_list);
521 }
522 } else {
523 if (!(dev->mei_host_buffer_is_empty))
524 dev_dbg(&dev->pdev->dev, "host buffer is not empty");
525
526 dev_dbg(&dev->pdev->dev, "No flow control credentials, "
527 "so add iamthif cb to write list.\n");
528 list_add_tail(&cb->cb_list, &dev->write_list.mei_cb.cb_list);
529 }
530 return 0;
531}
532
533/**
534 * iamthif_ioctl_send_msg - send cmd data to amthi client
535 *
536 * @dev: the device structure
537 *
538 * returns 0 on success, <0 on failure.
539 */
540void mei_run_next_iamthif_cmd(struct mei_device *dev)
541{
542 struct mei_cl *cl_tmp;
543 struct mei_cl_cb *pos = NULL;
544 struct mei_cl_cb *next = NULL;
545 int status;
546
547 if (!dev)
548 return;
549
550 dev->iamthif_msg_buf_size = 0;
551 dev->iamthif_msg_buf_index = 0;
552 dev->iamthif_canceled = false;
553 dev->iamthif_ioctl = true;
554 dev->iamthif_state = MEI_IAMTHIF_IDLE;
555 dev->iamthif_timer = 0;
556 dev->iamthif_file_object = NULL;
557
558 dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
559
560 list_for_each_entry_safe(pos, next,
561 &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
562 list_del(&pos->cb_list);
563 cl_tmp = (struct mei_cl *)pos->file_private;
564
565 if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
566 status = amthi_write(dev, pos);
567 if (status) {
568 dev_dbg(&dev->pdev->dev,
569 "amthi write failed status = %d\n",
570 status);
571 return;
572 }
573 break;
574 }
575 }
576}
577
578/**
579 * mei_free_cb_private - free mei_cb_private related memory
580 *
581 * @cb: mei callback struct
582 */
583void mei_free_cb_private(struct mei_cl_cb *cb)
584{
585 if (cb == NULL)
586 return;
587
588 kfree(cb->request_buffer.data);
589 kfree(cb->response_buffer.data);
590 kfree(cb);
591}