aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/hpilo.c138
-rw-r--r--drivers/misc/hpilo.h8
2 files changed, 101 insertions, 45 deletions
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 35ed12379cd0..9a370d5acf87 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/interrupt.h>
16#include <linux/ioport.h> 17#include <linux/ioport.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/file.h> 19#include <linux/file.h>
@@ -21,6 +22,7 @@
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/wait.h>
24#include "hpilo.h" 26#include "hpilo.h"
25 27
26static struct class *ilo_class; 28static struct class *ilo_class;
@@ -61,9 +63,10 @@ static inline int desc_mem_sz(int nr_entry)
61static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) 63static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
62{ 64{
63 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); 65 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
66 unsigned long flags;
64 int ret = 0; 67 int ret = 0;
65 68
66 spin_lock(&hw->fifo_lock); 69 spin_lock_irqsave(&hw->fifo_lock, flags);
67 if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] 70 if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
68 & ENTRY_MASK_O)) { 71 & ENTRY_MASK_O)) {
69 fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= 72 fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
@@ -71,7 +74,7 @@ static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
71 fifo_q->tail += 1; 74 fifo_q->tail += 1;
72 ret = 1; 75 ret = 1;
73 } 76 }
74 spin_unlock(&hw->fifo_lock); 77 spin_unlock_irqrestore(&hw->fifo_lock, flags);
75 78
76 return ret; 79 return ret;
77} 80}
@@ -79,10 +82,11 @@ static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
79static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) 82static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
80{ 83{
81 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); 84 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
85 unsigned long flags;
82 int ret = 0; 86 int ret = 0;
83 u64 c; 87 u64 c;
84 88
85 spin_lock(&hw->fifo_lock); 89 spin_lock_irqsave(&hw->fifo_lock, flags);
86 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; 90 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
87 if (c & ENTRY_MASK_C) { 91 if (c & ENTRY_MASK_C) {
88 if (entry) 92 if (entry)
@@ -93,7 +97,7 @@ static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
93 fifo_q->head += 1; 97 fifo_q->head += 1;
94 ret = 1; 98 ret = 1;
95 } 99 }
96 spin_unlock(&hw->fifo_lock); 100 spin_unlock_irqrestore(&hw->fifo_lock, flags);
97 101
98 return ret; 102 return ret;
99} 103}
@@ -374,7 +378,18 @@ static inline void clear_device(struct ilo_hwinfo *hw)
374 clear_pending_db(hw, -1); 378 clear_pending_db(hw, -1);
375} 379}
376 380
377static void ilo_locked_reset(struct ilo_hwinfo *hw) 381static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
382{
383 iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
384}
385
386static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
387{
388 iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
389 &hw->mmio_vaddr[DB_IRQ]);
390}
391
392static void ilo_set_reset(struct ilo_hwinfo *hw)
378{ 393{
379 int slot; 394 int slot;
380 395
@@ -387,19 +402,6 @@ static void ilo_locked_reset(struct ilo_hwinfo *hw)
387 continue; 402 continue;
388 set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); 403 set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
389 } 404 }
390
391 clear_device(hw);
392}
393
394static void ilo_reset(struct ilo_hwinfo *hw)
395{
396 spin_lock(&hw->alloc_lock);
397
398 /* reset might have been handled after lock was taken */
399 if (is_device_reset(hw))
400 ilo_locked_reset(hw);
401
402 spin_unlock(&hw->alloc_lock);
403} 405}
404 406
405static ssize_t ilo_read(struct file *fp, char __user *buf, 407static ssize_t ilo_read(struct file *fp, char __user *buf,
@@ -411,12 +413,11 @@ static ssize_t ilo_read(struct file *fp, char __user *buf,
411 struct ilo_hwinfo *hw = data->ilo_hw; 413 struct ilo_hwinfo *hw = data->ilo_hw;
412 void *pkt; 414 void *pkt;
413 415
414 if (is_device_reset(hw) || is_channel_reset(driver_ccb)) { 416 if (is_channel_reset(driver_ccb)) {
415 /* 417 /*
416 * If the device has been reset, applications 418 * If the device has been reset, applications
417 * need to close and reopen all ccbs. 419 * need to close and reopen all ccbs.
418 */ 420 */
419 ilo_reset(hw);
420 return -ENODEV; 421 return -ENODEV;
421 } 422 }
422 423
@@ -462,14 +463,8 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
462 struct ilo_hwinfo *hw = data->ilo_hw; 463 struct ilo_hwinfo *hw = data->ilo_hw;
463 void *pkt; 464 void *pkt;
464 465
465 if (is_device_reset(hw) || is_channel_reset(driver_ccb)) { 466 if (is_channel_reset(driver_ccb))
466 /*
467 * If the device has been reset, applications
468 * need to close and reopen all ccbs.
469 */
470 ilo_reset(hw);
471 return -ENODEV; 467 return -ENODEV;
472 }
473 468
474 /* get a packet to send the user command */ 469 /* get a packet to send the user command */
475 if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) 470 if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
@@ -496,27 +491,28 @@ static int ilo_close(struct inode *ip, struct file *fp)
496 int slot; 491 int slot;
497 struct ccb_data *data; 492 struct ccb_data *data;
498 struct ilo_hwinfo *hw; 493 struct ilo_hwinfo *hw;
494 unsigned long flags;
499 495
500 slot = iminor(ip) % MAX_CCB; 496 slot = iminor(ip) % MAX_CCB;
501 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); 497 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
502 498
503 spin_lock(&hw->alloc_lock); 499 spin_lock(&hw->open_lock);
504
505 if (is_device_reset(hw))
506 ilo_locked_reset(hw);
507 500
508 if (hw->ccb_alloc[slot]->ccb_cnt == 1) { 501 if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
509 502
510 data = fp->private_data; 503 data = fp->private_data;
511 504
505 spin_lock_irqsave(&hw->alloc_lock, flags);
506 hw->ccb_alloc[slot] = NULL;
507 spin_unlock_irqrestore(&hw->alloc_lock, flags);
508
512 ilo_ccb_close(hw->ilo_dev, data); 509 ilo_ccb_close(hw->ilo_dev, data);
513 510
514 kfree(data); 511 kfree(data);
515 hw->ccb_alloc[slot] = NULL;
516 } else 512 } else
517 hw->ccb_alloc[slot]->ccb_cnt--; 513 hw->ccb_alloc[slot]->ccb_cnt--;
518 514
519 spin_unlock(&hw->alloc_lock); 515 spin_unlock(&hw->open_lock);
520 516
521 return 0; 517 return 0;
522} 518}
@@ -526,6 +522,7 @@ static int ilo_open(struct inode *ip, struct file *fp)
526 int slot, error; 522 int slot, error;
527 struct ccb_data *data; 523 struct ccb_data *data;
528 struct ilo_hwinfo *hw; 524 struct ilo_hwinfo *hw;
525 unsigned long flags;
529 526
530 slot = iminor(ip) % MAX_CCB; 527 slot = iminor(ip) % MAX_CCB;
531 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); 528 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
@@ -535,10 +532,7 @@ static int ilo_open(struct inode *ip, struct file *fp)
535 if (!data) 532 if (!data)
536 return -ENOMEM; 533 return -ENOMEM;
537 534
538 spin_lock(&hw->alloc_lock); 535 spin_lock(&hw->open_lock);
539
540 if (is_device_reset(hw))
541 ilo_locked_reset(hw);
542 536
543 /* each fd private_data holds sw/hw view of ccb */ 537 /* each fd private_data holds sw/hw view of ccb */
544 if (hw->ccb_alloc[slot] == NULL) { 538 if (hw->ccb_alloc[slot] == NULL) {
@@ -549,22 +543,31 @@ static int ilo_open(struct inode *ip, struct file *fp)
549 goto out; 543 goto out;
550 } 544 }
551 545
546 data->ccb_cnt = 1;
547 data->ccb_excl = fp->f_flags & O_EXCL;
548 data->ilo_hw = hw;
549 init_waitqueue_head(&data->ccb_waitq);
550
552 /* write the ccb to hw */ 551 /* write the ccb to hw */
552 spin_lock_irqsave(&hw->alloc_lock, flags);
553 ilo_ccb_open(hw, data, slot); 553 ilo_ccb_open(hw, data, slot);
554 hw->ccb_alloc[slot] = data;
555 spin_unlock_irqrestore(&hw->alloc_lock, flags);
554 556
555 /* make sure the channel is functional */ 557 /* make sure the channel is functional */
556 error = ilo_ccb_verify(hw, data); 558 error = ilo_ccb_verify(hw, data);
557 if (error) { 559 if (error) {
560
561 spin_lock_irqsave(&hw->alloc_lock, flags);
562 hw->ccb_alloc[slot] = NULL;
563 spin_unlock_irqrestore(&hw->alloc_lock, flags);
564
558 ilo_ccb_close(hw->ilo_dev, data); 565 ilo_ccb_close(hw->ilo_dev, data);
566
559 kfree(data); 567 kfree(data);
560 goto out; 568 goto out;
561 } 569 }
562 570
563 data->ccb_cnt = 1;
564 data->ccb_excl = fp->f_flags & O_EXCL;
565 data->ilo_hw = hw;
566 hw->ccb_alloc[slot] = data;
567
568 } else { 571 } else {
569 kfree(data); 572 kfree(data);
570 if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { 573 if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
@@ -580,7 +583,7 @@ static int ilo_open(struct inode *ip, struct file *fp)
580 } 583 }
581 } 584 }
582out: 585out:
583 spin_unlock(&hw->alloc_lock); 586 spin_unlock(&hw->open_lock);
584 587
585 if (!error) 588 if (!error)
586 fp->private_data = hw->ccb_alloc[slot]; 589 fp->private_data = hw->ccb_alloc[slot];
@@ -596,6 +599,41 @@ static const struct file_operations ilo_fops = {
596 .release = ilo_close, 599 .release = ilo_close,
597}; 600};
598 601
602static irqreturn_t ilo_isr(int irq, void *data)
603{
604 struct ilo_hwinfo *hw = data;
605 int pending, i;
606
607 spin_lock(&hw->alloc_lock);
608
609 /* check for ccbs which have data */
610 pending = get_device_outbound(hw);
611 if (!pending) {
612 spin_unlock(&hw->alloc_lock);
613 return IRQ_NONE;
614 }
615
616 if (is_db_reset(pending)) {
617 /* wake up all ccbs if the device was reset */
618 pending = -1;
619 ilo_set_reset(hw);
620 }
621
622 for (i = 0; i < MAX_CCB; i++) {
623 if (!hw->ccb_alloc[i])
624 continue;
625 if (pending & (1 << i))
626 wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
627 }
628
629 /* clear the device of the channels that have been handled */
630 clear_pending_db(hw, pending);
631
632 spin_unlock(&hw->alloc_lock);
633
634 return IRQ_HANDLED;
635}
636
599static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) 637static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
600{ 638{
601 pci_iounmap(pdev, hw->db_vaddr); 639 pci_iounmap(pdev, hw->db_vaddr);
@@ -649,6 +687,8 @@ static void ilo_remove(struct pci_dev *pdev)
649 device_destroy(ilo_class, MKDEV(ilo_major, i)); 687 device_destroy(ilo_class, MKDEV(ilo_major, i));
650 688
651 cdev_del(&ilo_hw->cdev); 689 cdev_del(&ilo_hw->cdev);
690 ilo_disable_interrupts(ilo_hw);
691 free_irq(pdev->irq, ilo_hw);
652 ilo_unmap_device(pdev, ilo_hw); 692 ilo_unmap_device(pdev, ilo_hw);
653 pci_release_regions(pdev); 693 pci_release_regions(pdev);
654 pci_disable_device(pdev); 694 pci_disable_device(pdev);
@@ -684,6 +724,7 @@ static int __devinit ilo_probe(struct pci_dev *pdev,
684 ilo_hw->ilo_dev = pdev; 724 ilo_hw->ilo_dev = pdev;
685 spin_lock_init(&ilo_hw->alloc_lock); 725 spin_lock_init(&ilo_hw->alloc_lock);
686 spin_lock_init(&ilo_hw->fifo_lock); 726 spin_lock_init(&ilo_hw->fifo_lock);
727 spin_lock_init(&ilo_hw->open_lock);
687 728
688 error = pci_enable_device(pdev); 729 error = pci_enable_device(pdev);
689 if (error) 730 if (error)
@@ -702,13 +743,19 @@ static int __devinit ilo_probe(struct pci_dev *pdev,
702 pci_set_drvdata(pdev, ilo_hw); 743 pci_set_drvdata(pdev, ilo_hw);
703 clear_device(ilo_hw); 744 clear_device(ilo_hw);
704 745
746 error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
747 if (error)
748 goto unmap;
749
750 ilo_enable_interrupts(ilo_hw);
751
705 cdev_init(&ilo_hw->cdev, &ilo_fops); 752 cdev_init(&ilo_hw->cdev, &ilo_fops);
706 ilo_hw->cdev.owner = THIS_MODULE; 753 ilo_hw->cdev.owner = THIS_MODULE;
707 start = devnum * MAX_CCB; 754 start = devnum * MAX_CCB;
708 error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB); 755 error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
709 if (error) { 756 if (error) {
710 dev_err(&pdev->dev, "Could not add cdev\n"); 757 dev_err(&pdev->dev, "Could not add cdev\n");
711 goto unmap; 758 goto remove_isr;
712 } 759 }
713 760
714 for (minor = 0 ; minor < MAX_CCB; minor++) { 761 for (minor = 0 ; minor < MAX_CCB; minor++) {
@@ -721,6 +768,9 @@ static int __devinit ilo_probe(struct pci_dev *pdev,
721 } 768 }
722 769
723 return 0; 770 return 0;
771remove_isr:
772 ilo_disable_interrupts(ilo_hw);
773 free_irq(pdev->irq, ilo_hw);
724unmap: 774unmap:
725 ilo_unmap_device(pdev, ilo_hw); 775 ilo_unmap_device(pdev, ilo_hw);
726free_regions: 776free_regions:
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 03a14c82aad9..38576050776a 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -46,11 +46,14 @@ struct ilo_hwinfo {
46 46
47 spinlock_t alloc_lock; 47 spinlock_t alloc_lock;
48 spinlock_t fifo_lock; 48 spinlock_t fifo_lock;
49 spinlock_t open_lock;
49 50
50 struct cdev cdev; 51 struct cdev cdev;
51}; 52};
52 53
53/* offset from mmio_vaddr */ 54/* offset from mmio_vaddr for enabling doorbell interrupts */
55#define DB_IRQ 0xB2
56/* offset from mmio_vaddr for outbound communications */
54#define DB_OUT 0xD4 57#define DB_OUT 0xD4
55/* DB_OUT reset bit */ 58/* DB_OUT reset bit */
56#define DB_RESET 26 59#define DB_RESET 26
@@ -131,6 +134,9 @@ struct ccb_data {
131 /* pointer to hardware device info */ 134 /* pointer to hardware device info */
132 struct ilo_hwinfo *ilo_hw; 135 struct ilo_hwinfo *ilo_hw;
133 136
137 /* queue for this ccb to wait for recv data */
138 wait_queue_head_t ccb_waitq;
139
134 /* usage count, to allow for shared ccb's */ 140 /* usage count, to allow for shared ccb's */
135 int ccb_cnt; 141 int ccb_cnt;
136 142