diff options
author | Hoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com> | 2006-11-20 17:54:12 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-11-29 18:33:08 -0500 |
commit | 2771e9ed4702e46c3f4c305eb2e047c251c2ad2b (patch) | |
tree | da6ffee90c3725fb84949cf44ea78f8e8c8965c1 /drivers/infiniband/hw | |
parent | 9ab1ffa8775d9c677b1301cccce8a7d91e5163d0 (diff) |
IB/ehca: Use WQE offset instead of WQE addr for pending work reqs
This is a patch for ehca to fix a bug in prepare_sqe_to_rts(), which
used WQE address to iterate pending work requests. This might cause
an access violation since the queue pages can not be assumed to follow
each other consecutively. Thus, this patch introduces a few queue
functions to determine WQE offset based on its address and uses WQE
offset to iterate the pending work requests.
Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ipz_pt_fn.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ipz_pt_fn.h | 15 |
4 files changed, 39 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 01f5aa9cb56d..3d1c1c535038 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | 54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); |
55 | MODULE_VERSION("SVNEHCA_0018"); | 55 | MODULE_VERSION("SVNEHCA_0019"); |
56 | 56 | ||
57 | int ehca_open_aqp1 = 0; | 57 | int ehca_open_aqp1 = 0; |
58 | int ehca_debug_level = 0; | 58 | int ehca_debug_level = 0; |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void) | |||
790 | int ret; | 790 | int ret; |
791 | 791 | ||
792 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 792 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
793 | "(Rel.: SVNEHCA_0018)\n"); | 793 | "(Rel.: SVNEHCA_0019)\n"); |
794 | idr_init(&ehca_qp_idr); | 794 | idr_init(&ehca_qp_idr); |
795 | idr_init(&ehca_cq_idr); | 795 | idr_init(&ehca_cq_idr); |
796 | spin_lock_init(&ehca_qp_idr_lock); | 796 | spin_lock_init(&ehca_qp_idr_lock); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index cf3e50ee2d06..8682aa50c707 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
732 | u64 h_ret; | 732 | u64 h_ret; |
733 | struct ipz_queue *squeue; | 733 | struct ipz_queue *squeue; |
734 | void *bad_send_wqe_p, *bad_send_wqe_v; | 734 | void *bad_send_wqe_p, *bad_send_wqe_v; |
735 | void *squeue_start_p, *squeue_end_p; | 735 | u64 q_ofs; |
736 | void *squeue_start_v, *squeue_end_v; | ||
737 | struct ehca_wqe *wqe; | 736 | struct ehca_wqe *wqe; |
738 | int qp_num = my_qp->ib_qp.qp_num; | 737 | int qp_num = my_qp->ib_qp.qp_num; |
739 | 738 | ||
@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
755 | if (ehca_debug_level) | 754 | if (ehca_debug_level) |
756 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); | 755 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); |
757 | squeue = &my_qp->ipz_squeue; | 756 | squeue = &my_qp->ipz_squeue; |
758 | squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); | 757 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { |
759 | squeue_end_p = squeue_start_p+squeue->queue_length; | 758 | ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x" |
760 | squeue_start_v = abs_to_virt((u64)squeue_start_p); | 759 | " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p); |
761 | squeue_end_v = abs_to_virt((u64)squeue_end_p); | 760 | return -EFAULT; |
762 | ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", | 761 | } |
763 | qp_num, squeue_start_v, squeue_end_v); | ||
764 | 762 | ||
765 | /* loop sets wqe's purge bit */ | 763 | /* loop sets wqe's purge bit */ |
766 | wqe = (struct ehca_wqe*)bad_send_wqe_v; | 764 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); |
767 | *bad_wqe_cnt = 0; | 765 | *bad_wqe_cnt = 0; |
768 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | 766 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { |
769 | if (ehca_debug_level) | 767 | if (ehca_debug_level) |
770 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); | 768 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); |
771 | wqe->nr_of_data_seg = 0; /* suppress data access */ | 769 | wqe->nr_of_data_seg = 0; /* suppress data access */ |
772 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | 770 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ |
773 | wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); | 771 | q_ofs = ipz_queue_advance_offset(squeue, q_ofs); |
772 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); | ||
774 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; | 773 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; |
775 | if ((void*)wqe >= squeue_end_v) { | ||
776 | wqe = squeue_start_v; | ||
777 | } | ||
778 | } | 774 | } |
779 | /* | 775 | /* |
780 | * bad wqe will be reprocessed and ignored when pol_cq() is called, | 776 | * bad wqe will be reprocessed and ignored when pol_cq() is called, |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index e028ff1588cc..bf7a40088f61 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c | |||
@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) | |||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset) | ||
74 | { | ||
75 | int i; | ||
76 | for (i = 0; i < queue->queue_length / queue->pagesize; i++) { | ||
77 | u64 page = (u64)virt_to_abs(queue->queue_pages[i]); | ||
78 | if (addr >= page && addr < page + queue->pagesize) { | ||
79 | *q_offset = addr - page + i * queue->pagesize; | ||
80 | return 0; | ||
81 | } | ||
82 | } | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
73 | int ipz_queue_ctor(struct ipz_queue *queue, | 86 | int ipz_queue_ctor(struct ipz_queue *queue, |
74 | const u32 nr_of_pages, | 87 | const u32 nr_of_pages, |
75 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) | 88 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index 2f13509d5257..dc3bda2634b7 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h | |||
@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue) | |||
150 | return ipz_qeit_get(queue); | 150 | return ipz_qeit_get(queue); |
151 | } | 151 | } |
152 | 152 | ||
153 | /* | ||
154 | * return the q_offset corresponding to an absolute address | ||
155 | */ | ||
156 | int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset); | ||
157 | |||
158 | /* | ||
159 | * return the next queue offset. don't modify the queue. | ||
160 | */ | ||
161 | static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset) | ||
162 | { | ||
163 | offset += queue->qe_size; | ||
164 | if (offset >= queue->queue_length) offset = 0; | ||
165 | return offset; | ||
166 | } | ||
167 | |||
153 | /* struct generic page table */ | 168 | /* struct generic page table */ |
154 | struct ipz_pt { | 169 | struct ipz_pt { |
155 | u64 entries[EHCA_PT_ENTRIES]; | 170 | u64 entries[EHCA_PT_ENTRIES]; |