diff options
author | Tomas Winkler <tomas.winkler@intel.com> | 2013-02-06 07:06:42 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-02-06 14:24:33 -0500 |
commit | 06ecd6459800962155c485e27d9dd30268b579bf (patch) | |
tree | 76ddf71e529f45b7256d6b7bdf3efe49ec2a52db /drivers/misc/mei/interrupt.c | |
parent | 827eef51f8dd9a4ab62b4ad270c15472f46938f2 (diff) |
mei: move interrupt handlers to be me hw specific
interrupt handler are platform specifics so we move
them to hw-mei.c. For sake of that we need to export
write, read, and complete handlers from the interrupt.c
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc/mei/interrupt.c')
-rw-r--r-- | drivers/misc/mei/interrupt.c | 126 |
1 files changed, 8 insertions, 118 deletions
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 431aa91fd002..3535b2676c97 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
@@ -30,12 +30,12 @@ | |||
30 | 30 | ||
31 | 31 | ||
32 | /** | 32 | /** |
33 | * _mei_cmpl - processes completed operation. | 33 | * mei_complete_handler - processes completed operation. |
34 | * | 34 | * |
35 | * @cl: private data of the file object. | 35 | * @cl: private data of the file object. |
36 | * @cb_pos: callback block. | 36 | * @cb_pos: callback block. |
37 | */ | 37 | */ |
38 | static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos) | 38 | void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos) |
39 | { | 39 | { |
40 | if (cb_pos->fop_type == MEI_FOP_WRITE) { | 40 | if (cb_pos->fop_type == MEI_FOP_WRITE) { |
41 | mei_io_cb_free(cb_pos); | 41 | mei_io_cb_free(cb_pos); |
@@ -313,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots, | |||
313 | * mei_irq_thread_read_handler - bottom half read routine after ISR to | 313 | * mei_irq_thread_read_handler - bottom half read routine after ISR to |
314 | * handle the read processing. | 314 | * handle the read processing. |
315 | * | 315 | * |
316 | * @cmpl_list: An instance of our list structure | ||
317 | * @dev: the device structure | 316 | * @dev: the device structure |
317 | * @cmpl_list: An instance of our list structure | ||
318 | * @slots: slots to read. | 318 | * @slots: slots to read. |
319 | * | 319 | * |
320 | * returns 0 on success, <0 on failure. | 320 | * returns 0 on success, <0 on failure. |
321 | */ | 321 | */ |
322 | static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list, | 322 | int mei_irq_read_handler(struct mei_device *dev, |
323 | struct mei_device *dev, | 323 | struct mei_cl_cb *cmpl_list, s32 *slots) |
324 | s32 *slots) | ||
325 | { | 324 | { |
326 | struct mei_msg_hdr *mei_hdr; | 325 | struct mei_msg_hdr *mei_hdr; |
327 | struct mei_cl *cl_pos = NULL; | 326 | struct mei_cl *cl_pos = NULL; |
@@ -412,15 +411,15 @@ end: | |||
412 | 411 | ||
413 | 412 | ||
414 | /** | 413 | /** |
415 | * mei_irq_thread_write_handler - bottom half write routine after | 414 | * mei_irq_write_handler - dispatch write requests |
416 | * ISR to handle the write processing. | 415 | * after irq received |
417 | * | 416 | * |
418 | * @dev: the device structure | 417 | * @dev: the device structure |
419 | * @cmpl_list: An instance of our list structure | 418 | * @cmpl_list: An instance of our list structure |
420 | * | 419 | * |
421 | * returns 0 on success, <0 on failure. | 420 | * returns 0 on success, <0 on failure. |
422 | */ | 421 | */ |
423 | static int mei_irq_thread_write_handler(struct mei_device *dev, | 422 | int mei_irq_write_handler(struct mei_device *dev, |
424 | struct mei_cl_cb *cmpl_list) | 423 | struct mei_cl_cb *cmpl_list) |
425 | { | 424 | { |
426 | 425 | ||
@@ -666,112 +665,3 @@ out: | |||
666 | mutex_unlock(&dev->device_lock); | 665 | mutex_unlock(&dev->device_lock); |
667 | } | 666 | } |
668 | 667 | ||
669 | /** | ||
670 | * mei_interrupt_thread_handler - function called after ISR to handle the interrupt | ||
671 | * processing. | ||
672 | * | ||
673 | * @irq: The irq number | ||
674 | * @dev_id: pointer to the device structure | ||
675 | * | ||
676 | * returns irqreturn_t | ||
677 | * | ||
678 | */ | ||
679 | irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id) | ||
680 | { | ||
681 | struct mei_device *dev = (struct mei_device *) dev_id; | ||
682 | struct mei_cl_cb complete_list; | ||
683 | struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL; | ||
684 | struct mei_cl *cl; | ||
685 | s32 slots; | ||
686 | int rets; | ||
687 | bool bus_message_received; | ||
688 | |||
689 | |||
690 | dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); | ||
691 | /* initialize our complete list */ | ||
692 | mutex_lock(&dev->device_lock); | ||
693 | mei_io_list_init(&complete_list); | ||
694 | |||
695 | /* Ack the interrupt here | ||
696 | * In case of MSI we don't go through the quick handler */ | ||
697 | if (pci_dev_msi_enabled(dev->pdev)) | ||
698 | mei_clear_interrupts(dev); | ||
699 | |||
700 | /* check if ME wants a reset */ | ||
701 | if (!mei_hw_is_ready(dev) && | ||
702 | dev->dev_state != MEI_DEV_RESETING && | ||
703 | dev->dev_state != MEI_DEV_INITIALIZING) { | ||
704 | dev_dbg(&dev->pdev->dev, "FW not ready.\n"); | ||
705 | mei_reset(dev, 1); | ||
706 | mutex_unlock(&dev->device_lock); | ||
707 | return IRQ_HANDLED; | ||
708 | } | ||
709 | |||
710 | /* check if we need to start the dev */ | ||
711 | if (!mei_host_is_ready(dev)) { | ||
712 | if (mei_hw_is_ready(dev)) { | ||
713 | dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); | ||
714 | |||
715 | mei_host_set_ready(dev); | ||
716 | |||
717 | dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); | ||
718 | /* link is established * start sending messages. */ | ||
719 | |||
720 | dev->dev_state = MEI_DEV_INIT_CLIENTS; | ||
721 | |||
722 | mei_hbm_start_req(dev); | ||
723 | mutex_unlock(&dev->device_lock); | ||
724 | return IRQ_HANDLED; | ||
725 | } else { | ||
726 | dev_dbg(&dev->pdev->dev, "FW not ready.\n"); | ||
727 | mutex_unlock(&dev->device_lock); | ||
728 | return IRQ_HANDLED; | ||
729 | } | ||
730 | } | ||
731 | /* check slots available for reading */ | ||
732 | slots = mei_count_full_read_slots(dev); | ||
733 | while (slots > 0) { | ||
734 | /* we have urgent data to send so break the read */ | ||
735 | if (dev->wr_ext_msg.hdr.length) | ||
736 | break; | ||
737 | dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); | ||
738 | dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n"); | ||
739 | rets = mei_irq_thread_read_handler(&complete_list, dev, &slots); | ||
740 | if (rets) | ||
741 | goto end; | ||
742 | } | ||
743 | rets = mei_irq_thread_write_handler(dev, &complete_list); | ||
744 | end: | ||
745 | dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); | ||
746 | dev->mei_host_buffer_is_empty = mei_hbuf_is_ready(dev); | ||
747 | |||
748 | bus_message_received = false; | ||
749 | if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) { | ||
750 | dev_dbg(&dev->pdev->dev, "received waiting bus message\n"); | ||
751 | bus_message_received = true; | ||
752 | } | ||
753 | mutex_unlock(&dev->device_lock); | ||
754 | if (bus_message_received) { | ||
755 | dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n"); | ||
756 | wake_up_interruptible(&dev->wait_recvd_msg); | ||
757 | bus_message_received = false; | ||
758 | } | ||
759 | if (list_empty(&complete_list.list)) | ||
760 | return IRQ_HANDLED; | ||
761 | |||
762 | |||
763 | list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) { | ||
764 | cl = cb_pos->cl; | ||
765 | list_del(&cb_pos->list); | ||
766 | if (cl) { | ||
767 | if (cl != &dev->iamthif_cl) { | ||
768 | dev_dbg(&dev->pdev->dev, "completing call back.\n"); | ||
769 | _mei_cmpl(cl, cb_pos); | ||
770 | cb_pos = NULL; | ||
771 | } else if (cl == &dev->iamthif_cl) { | ||
772 | mei_amthif_complete(dev, cb_pos); | ||
773 | } | ||
774 | } | ||
775 | } | ||
776 | return IRQ_HANDLED; | ||
777 | } | ||