diff options
Diffstat (limited to 'drivers')
56 files changed, 1951 insertions, 1307 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 7916f4b86d23..ae01d86070bb 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -84,4 +84,5 @@ source "drivers/auxdisplay/Kconfig" | |||
84 | 84 | ||
85 | source "drivers/kvm/Kconfig" | 85 | source "drivers/kvm/Kconfig" |
86 | 86 | ||
87 | source "drivers/uio/Kconfig" | ||
87 | endmenu | 88 | endmenu |
diff --git a/drivers/Makefile b/drivers/Makefile index 6d9d7fab77f5..c34c8efff609 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -40,6 +40,7 @@ obj-$(CONFIG_ATA) += ata/ | |||
40 | obj-$(CONFIG_FUSION) += message/ | 40 | obj-$(CONFIG_FUSION) += message/ |
41 | obj-$(CONFIG_FIREWIRE) += firewire/ | 41 | obj-$(CONFIG_FIREWIRE) += firewire/ |
42 | obj-$(CONFIG_IEEE1394) += ieee1394/ | 42 | obj-$(CONFIG_IEEE1394) += ieee1394/ |
43 | obj-$(CONFIG_UIO) += uio/ | ||
43 | obj-y += cdrom/ | 44 | obj-y += cdrom/ |
44 | obj-y += auxdisplay/ | 45 | obj-y += auxdisplay/ |
45 | obj-$(CONFIG_MTD) += mtd/ | 46 | obj-$(CONFIG_MTD) += mtd/ |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 41476abc0693..db703758db98 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -224,6 +224,7 @@ ohci_update_phy_reg(struct fw_card *card, int addr, | |||
224 | u32 val, old; | 224 | u32 val, old; |
225 | 225 | ||
226 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); | 226 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); |
227 | flush_writes(ohci); | ||
227 | msleep(2); | 228 | msleep(2); |
228 | val = reg_read(ohci, OHCI1394_PhyControl); | 229 | val = reg_read(ohci, OHCI1394_PhyControl); |
229 | if ((val & OHCI1394_PhyControl_ReadDone) == 0) { | 230 | if ((val & OHCI1394_PhyControl_ReadDone) == 0) { |
@@ -586,7 +587,7 @@ static void context_stop(struct context *ctx) | |||
586 | break; | 587 | break; |
587 | 588 | ||
588 | fw_notify("context_stop: still active (0x%08x)\n", reg); | 589 | fw_notify("context_stop: still active (0x%08x)\n", reg); |
589 | msleep(1); | 590 | mdelay(1); |
590 | } | 591 | } |
591 | } | 592 | } |
592 | 593 | ||
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 7c53be0387fb..fc984474162c 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -840,7 +840,6 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
840 | container_of(base_orb, struct sbp2_command_orb, base); | 840 | container_of(base_orb, struct sbp2_command_orb, base); |
841 | struct fw_unit *unit = orb->unit; | 841 | struct fw_unit *unit = orb->unit; |
842 | struct fw_device *device = fw_device(unit->device.parent); | 842 | struct fw_device *device = fw_device(unit->device.parent); |
843 | struct scatterlist *sg; | ||
844 | int result; | 843 | int result; |
845 | 844 | ||
846 | if (status != NULL) { | 845 | if (status != NULL) { |
@@ -876,11 +875,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
876 | dma_unmap_single(device->card->device, orb->base.request_bus, | 875 | dma_unmap_single(device->card->device, orb->base.request_bus, |
877 | sizeof(orb->request), DMA_TO_DEVICE); | 876 | sizeof(orb->request), DMA_TO_DEVICE); |
878 | 877 | ||
879 | if (orb->cmd->use_sg > 0) { | 878 | if (scsi_sg_count(orb->cmd) > 0) |
880 | sg = (struct scatterlist *)orb->cmd->request_buffer; | 879 | dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd), |
881 | dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg, | 880 | scsi_sg_count(orb->cmd), |
882 | orb->cmd->sc_data_direction); | 881 | orb->cmd->sc_data_direction); |
883 | } | ||
884 | 882 | ||
885 | if (orb->page_table_bus != 0) | 883 | if (orb->page_table_bus != 0) |
886 | dma_unmap_single(device->card->device, orb->page_table_bus, | 884 | dma_unmap_single(device->card->device, orb->page_table_bus, |
@@ -901,8 +899,8 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) | |||
901 | int sg_len, l, i, j, count; | 899 | int sg_len, l, i, j, count; |
902 | dma_addr_t sg_addr; | 900 | dma_addr_t sg_addr; |
903 | 901 | ||
904 | sg = (struct scatterlist *)orb->cmd->request_buffer; | 902 | sg = scsi_sglist(orb->cmd); |
905 | count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg, | 903 | count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd), |
906 | orb->cmd->sc_data_direction); | 904 | orb->cmd->sc_data_direction); |
907 | if (count == 0) | 905 | if (count == 0) |
908 | goto fail; | 906 | goto fail; |
@@ -971,7 +969,7 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) | |||
971 | return 0; | 969 | return 0; |
972 | 970 | ||
973 | fail_page_table: | 971 | fail_page_table: |
974 | dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg, | 972 | dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd), |
975 | orb->cmd->sc_data_direction); | 973 | orb->cmd->sc_data_direction); |
976 | fail: | 974 | fail: |
977 | return -ENOMEM; | 975 | return -ENOMEM; |
@@ -1031,7 +1029,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1031 | orb->request.misc |= | 1029 | orb->request.misc |= |
1032 | COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); | 1030 | COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); |
1033 | 1031 | ||
1034 | if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0) | 1032 | if (scsi_sg_count(cmd) && sbp2_command_orb_map_scatterlist(orb) < 0) |
1035 | goto fail_mapping; | 1033 | goto fail_mapping; |
1036 | 1034 | ||
1037 | fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); | 1035 | fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); |
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 80d0121463d0..3ce8e2fbe15f 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c | |||
@@ -605,8 +605,10 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) | |||
605 | * check is sufficient to ensure we don't send response to | 605 | * check is sufficient to ensure we don't send response to |
606 | * broadcast packets or posted writes. | 606 | * broadcast packets or posted writes. |
607 | */ | 607 | */ |
608 | if (request->ack != ACK_PENDING) | 608 | if (request->ack != ACK_PENDING) { |
609 | kfree(request); | ||
609 | return; | 610 | return; |
611 | } | ||
610 | 612 | ||
611 | if (rcode == RCODE_COMPLETE) | 613 | if (rcode == RCODE_COMPLETE) |
612 | fw_fill_response(&request->response, request->request_header, | 614 | fw_fill_response(&request->response, request->request_header, |
@@ -628,11 +630,6 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | |||
628 | unsigned long flags; | 630 | unsigned long flags; |
629 | int tcode, destination, source; | 631 | int tcode, destination, source; |
630 | 632 | ||
631 | if (p->payload_length > 2048) { | ||
632 | /* FIXME: send error response. */ | ||
633 | return; | ||
634 | } | ||
635 | |||
636 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) | 633 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) |
637 | return; | 634 | return; |
638 | 635 | ||
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 5abed193f4a6..5ceaccd10564 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -123,6 +123,10 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | |||
123 | size_t length, | 123 | size_t length, |
124 | void *callback_data); | 124 | void *callback_data); |
125 | 125 | ||
126 | /* | ||
127 | * Important note: The callback must guarantee that either fw_send_response() | ||
128 | * or kfree() is called on the @request. | ||
129 | */ | ||
126 | typedef void (*fw_address_callback_t)(struct fw_card *card, | 130 | typedef void (*fw_address_callback_t)(struct fw_card *card, |
127 | struct fw_request *request, | 131 | struct fw_request *request, |
128 | int tcode, int destination, int source, | 132 | int tcode, int destination, int source, |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 9820c67ba47d..4df269f5d9ac 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -3374,7 +3374,7 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, | |||
3374 | } | 3374 | } |
3375 | EXPORT_SYMBOL(ib_cm_init_qp_attr); | 3375 | EXPORT_SYMBOL(ib_cm_init_qp_attr); |
3376 | 3376 | ||
3377 | void cm_get_ack_delay(struct cm_device *cm_dev) | 3377 | static void cm_get_ack_delay(struct cm_device *cm_dev) |
3378 | { | 3378 | { |
3379 | struct ib_device_attr attr; | 3379 | struct ib_device_attr attr; |
3380 | 3380 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 23af7a032a03..9ffb9987450a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -573,7 +573,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, | |||
573 | break; | 573 | break; |
574 | case RDMA_TRANSPORT_IWARP: | 574 | case RDMA_TRANSPORT_IWARP: |
575 | if (!id_priv->cm_id.iw) { | 575 | if (!id_priv->cm_id.iw) { |
576 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; | 576 | qp_attr->qp_access_flags = 0; |
577 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; | 577 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; |
578 | } else | 578 | } else |
579 | ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, | 579 | ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 3b41dc0c39dd..5dc68cd5621b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -1914,6 +1914,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
1914 | fail3: | 1914 | fail3: |
1915 | cxgb3_free_stid(ep->com.tdev, ep->stid); | 1915 | cxgb3_free_stid(ep->com.tdev, ep->stid); |
1916 | fail2: | 1916 | fail2: |
1917 | cm_id->rem_ref(cm_id); | ||
1917 | put_ep(&ep->com); | 1918 | put_ep(&ep->com); |
1918 | fail1: | 1919 | fail1: |
1919 | out: | 1920 | out: |
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c index 3cd6bf3402d1..e53a97af1260 100644 --- a/drivers/infiniband/hw/ehca/ehca_av.c +++ b/drivers/infiniband/hw/ehca/ehca_av.c | |||
@@ -79,7 +79,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | |||
79 | av->av.ipd = (ah_mult > 0) ? | 79 | av->av.ipd = (ah_mult > 0) ? |
80 | ((ehca_mult - 1) / ah_mult) : 0; | 80 | ((ehca_mult - 1) / ah_mult) : 0; |
81 | } else | 81 | } else |
82 | av->av.ipd = ehca_static_rate; | 82 | av->av.ipd = ehca_static_rate; |
83 | 83 | ||
84 | av->av.lnh = ah_attr->ah_flags; | 84 | av->av.lnh = ah_attr->ah_flags; |
85 | av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6); | 85 | av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6); |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index daf823ea1ace..043e4fb23fb0 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -204,11 +204,11 @@ struct ehca_mr { | |||
204 | spinlock_t mrlock; | 204 | spinlock_t mrlock; |
205 | 205 | ||
206 | enum ehca_mr_flag flags; | 206 | enum ehca_mr_flag flags; |
207 | u32 num_pages; /* number of MR pages */ | 207 | u32 num_kpages; /* number of kernel pages */ |
208 | u32 num_4k; /* number of 4k "page" portions to form MR */ | 208 | u32 num_hwpages; /* number of hw pages to form MR */ |
209 | int acl; /* ACL (stored here for usage in reregister) */ | 209 | int acl; /* ACL (stored here for usage in reregister) */ |
210 | u64 *start; /* virtual start address (stored here for */ | 210 | u64 *start; /* virtual start address (stored here for */ |
211 | /* usage in reregister) */ | 211 | /* usage in reregister) */ |
212 | u64 size; /* size (stored here for usage in reregister) */ | 212 | u64 size; /* size (stored here for usage in reregister) */ |
213 | u32 fmr_page_size; /* page size for FMR */ | 213 | u32 fmr_page_size; /* page size for FMR */ |
214 | u32 fmr_max_pages; /* max pages for FMR */ | 214 | u32 fmr_max_pages; /* max pages for FMR */ |
@@ -217,9 +217,6 @@ struct ehca_mr { | |||
217 | /* fw specific data */ | 217 | /* fw specific data */ |
218 | struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ | 218 | struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ |
219 | struct h_galpas galpas; | 219 | struct h_galpas galpas; |
220 | /* data for userspace bridge */ | ||
221 | u32 nr_of_pages; | ||
222 | void *pagearray; | ||
223 | }; | 220 | }; |
224 | 221 | ||
225 | struct ehca_mw { | 222 | struct ehca_mw { |
@@ -241,26 +238,29 @@ enum ehca_mr_pgi_type { | |||
241 | 238 | ||
242 | struct ehca_mr_pginfo { | 239 | struct ehca_mr_pginfo { |
243 | enum ehca_mr_pgi_type type; | 240 | enum ehca_mr_pgi_type type; |
244 | u64 num_pages; | 241 | u64 num_kpages; |
245 | u64 page_cnt; | 242 | u64 kpage_cnt; |
246 | u64 num_4k; /* number of 4k "page" portions */ | 243 | u64 num_hwpages; /* number of hw pages */ |
247 | u64 page_4k_cnt; /* counter for 4k "page" portions */ | 244 | u64 hwpage_cnt; /* counter for hw pages */ |
248 | u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */ | 245 | u64 next_hwpage; /* next hw page in buffer/chunk/listelem */ |
249 | 246 | ||
250 | /* type EHCA_MR_PGI_PHYS section */ | 247 | union { |
251 | int num_phys_buf; | 248 | struct { /* type EHCA_MR_PGI_PHYS section */ |
252 | struct ib_phys_buf *phys_buf_array; | 249 | int num_phys_buf; |
253 | u64 next_buf; | 250 | struct ib_phys_buf *phys_buf_array; |
254 | 251 | u64 next_buf; | |
255 | /* type EHCA_MR_PGI_USER section */ | 252 | } phy; |
256 | struct ib_umem *region; | 253 | struct { /* type EHCA_MR_PGI_USER section */ |
257 | struct ib_umem_chunk *next_chunk; | 254 | struct ib_umem *region; |
258 | u64 next_nmap; | 255 | struct ib_umem_chunk *next_chunk; |
259 | 256 | u64 next_nmap; | |
260 | /* type EHCA_MR_PGI_FMR section */ | 257 | } usr; |
261 | u64 *page_list; | 258 | struct { /* type EHCA_MR_PGI_FMR section */ |
262 | u64 next_listelem; | 259 | u64 fmr_pgsize; |
263 | /* next_4k also used within EHCA_MR_PGI_FMR */ | 260 | u64 *page_list; |
261 | u64 next_listelem; | ||
262 | } fmr; | ||
263 | } u; | ||
264 | }; | 264 | }; |
265 | 265 | ||
266 | /* output parameters for MR/FMR hipz calls */ | 266 | /* output parameters for MR/FMR hipz calls */ |
@@ -391,6 +391,6 @@ struct ehca_alloc_qp_parms { | |||
391 | 391 | ||
392 | int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); | 392 | int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); |
393 | int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); | 393 | int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); |
394 | struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); | 394 | struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); |
395 | 395 | ||
396 | #endif | 396 | #endif |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h index fb3df5c271e7..1798e6466bd0 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h | |||
@@ -154,83 +154,83 @@ struct hcp_modify_qp_control_block { | |||
154 | u32 reserved_70_127[58]; /* 70 */ | 154 | u32 reserved_70_127[58]; /* 70 */ |
155 | }; | 155 | }; |
156 | 156 | ||
157 | #define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0) | 157 | #define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0) |
158 | #define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2) | 158 | #define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2) |
159 | #define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3) | 159 | #define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3) |
160 | #define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4) | 160 | #define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4) |
161 | #define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31) | 161 | #define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31) |
162 | #define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5) | 162 | #define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5) |
163 | #define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6) | 163 | #define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6) |
164 | #define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31) | 164 | #define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31) |
165 | #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7) | 165 | #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) |
166 | #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8) | 166 | #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) |
167 | #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9) | 167 | #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) |
168 | #define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31) | 168 | #define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31) |
169 | #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11) | 169 | #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) |
170 | #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12) | 170 | #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) |
171 | #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13) | 171 | #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) |
172 | #define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14) | 172 | #define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14) |
173 | #define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15) | 173 | #define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15) |
174 | #define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16) | 174 | #define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16) |
175 | #define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17) | 175 | #define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17) |
176 | #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18) | 176 | #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) |
177 | #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19) | 177 | #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) |
178 | #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20) | 178 | #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) |
179 | #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31) | 179 | #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31) |
180 | #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21) | 180 | #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) |
181 | #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31) | 181 | #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31) |
182 | #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22) | 182 | #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) |
183 | #define MQPCB_DLID EHCA_BMASK_IBM(16,31) | 183 | #define MQPCB_DLID EHCA_BMASK_IBM(16, 31) |
184 | #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23) | 184 | #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) |
185 | #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31) | 185 | #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31) |
186 | #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24) | 186 | #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) |
187 | #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31) | 187 | #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31) |
188 | #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25) | 188 | #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) |
189 | #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31) | 189 | #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31) |
190 | #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26) | 190 | #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) |
191 | #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31) | 191 | #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31) |
192 | #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27) | 192 | #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) |
193 | #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31) | 193 | #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31) |
194 | #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28) | 194 | #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) |
195 | #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31) | 195 | #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31) |
196 | #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30) | 196 | #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) |
197 | #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31) | 197 | #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) |
198 | #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31) | 198 | #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31) |
199 | #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32) | 199 | #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) |
200 | #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31) | 200 | #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31) |
201 | #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33) | 201 | #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) |
202 | #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) | 202 | #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) |
203 | #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34) | 203 | #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) |
204 | #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31) | 204 | #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31) |
205 | #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35) | 205 | #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) |
206 | #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31) | 206 | #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31) |
207 | #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36) | 207 | #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) |
208 | #define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31) | 208 | #define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31) |
209 | #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37) | 209 | #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) |
210 | #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) | 210 | #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) |
211 | #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38) | 211 | #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) |
212 | #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31) | 212 | #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31) |
213 | #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39) | 213 | #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) |
214 | #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31) | 214 | #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31) |
215 | #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40) | 215 | #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) |
216 | #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31) | 216 | #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31) |
217 | #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41) | 217 | #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) |
218 | #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31) | 218 | #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31) |
219 | #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42) | 219 | #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) |
220 | #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31) | 220 | #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31) |
221 | #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44) | 221 | #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) |
222 | #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45) | 222 | #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) |
223 | #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31) | 223 | #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) |
224 | #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46) | 224 | #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) |
225 | #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31) | 225 | #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) |
226 | #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47) | 226 | #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) |
227 | #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31) | 227 | #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31) |
228 | #define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31) | 228 | #define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31) |
229 | #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48) | 229 | #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) |
230 | #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31) | 230 | #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31) |
231 | #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49,49) | 231 | #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) |
232 | #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16,31) | 232 | #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31) |
233 | #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50) | 233 | #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) |
234 | #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51) | 234 | #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) |
235 | 235 | ||
236 | #endif /* __EHCA_CLASSES_PSERIES_H__ */ | 236 | #endif /* __EHCA_CLASSES_PSERIES_H__ */ |
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 01d4a148bd71..9e87883b561a 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
@@ -97,7 +97,7 @@ int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num) | |||
97 | return ret; | 97 | return ret; |
98 | } | 98 | } |
99 | 99 | ||
100 | struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) | 100 | struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) |
101 | { | 101 | { |
102 | struct ehca_qp *ret = NULL; | 102 | struct ehca_qp *ret = NULL; |
103 | unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); | 103 | unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); |
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 4961eb88827c..4825975f88cf 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
@@ -96,7 +96,8 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
96 | for (i = 0; i < nr_pages; i++) { | 96 | for (i = 0; i < nr_pages; i++) { |
97 | u64 rpage; | 97 | u64 rpage; |
98 | 98 | ||
99 | if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) { | 99 | vpage = ipz_qpageit_get_inc(&eq->ipz_queue); |
100 | if (!vpage) { | ||
100 | ret = H_RESOURCE; | 101 | ret = H_RESOURCE; |
101 | goto create_eq_exit2; | 102 | goto create_eq_exit2; |
102 | } | 103 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index bbd3c6a5822f..fc19ef9fd963 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -127,6 +127,7 @@ int ehca_query_port(struct ib_device *ibdev, | |||
127 | u8 port, struct ib_port_attr *props) | 127 | u8 port, struct ib_port_attr *props) |
128 | { | 128 | { |
129 | int ret = 0; | 129 | int ret = 0; |
130 | u64 h_ret; | ||
130 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, | 131 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, |
131 | ib_device); | 132 | ib_device); |
132 | struct hipz_query_port *rblock; | 133 | struct hipz_query_port *rblock; |
@@ -137,7 +138,8 @@ int ehca_query_port(struct ib_device *ibdev, | |||
137 | return -ENOMEM; | 138 | return -ENOMEM; |
138 | } | 139 | } |
139 | 140 | ||
140 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | 141 | h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); |
142 | if (h_ret != H_SUCCESS) { | ||
141 | ehca_err(&shca->ib_device, "Can't query port properties"); | 143 | ehca_err(&shca->ib_device, "Can't query port properties"); |
142 | ret = -EINVAL; | 144 | ret = -EINVAL; |
143 | goto query_port1; | 145 | goto query_port1; |
@@ -197,6 +199,7 @@ int ehca_query_sma_attr(struct ehca_shca *shca, | |||
197 | u8 port, struct ehca_sma_attr *attr) | 199 | u8 port, struct ehca_sma_attr *attr) |
198 | { | 200 | { |
199 | int ret = 0; | 201 | int ret = 0; |
202 | u64 h_ret; | ||
200 | struct hipz_query_port *rblock; | 203 | struct hipz_query_port *rblock; |
201 | 204 | ||
202 | rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC); | 205 | rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC); |
@@ -205,7 +208,8 @@ int ehca_query_sma_attr(struct ehca_shca *shca, | |||
205 | return -ENOMEM; | 208 | return -ENOMEM; |
206 | } | 209 | } |
207 | 210 | ||
208 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | 211 | h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); |
212 | if (h_ret != H_SUCCESS) { | ||
209 | ehca_err(&shca->ib_device, "Can't query port properties"); | 213 | ehca_err(&shca->ib_device, "Can't query port properties"); |
210 | ret = -EINVAL; | 214 | ret = -EINVAL; |
211 | goto query_sma_attr1; | 215 | goto query_sma_attr1; |
@@ -230,9 +234,11 @@ query_sma_attr1: | |||
230 | int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | 234 | int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) |
231 | { | 235 | { |
232 | int ret = 0; | 236 | int ret = 0; |
233 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); | 237 | u64 h_ret; |
238 | struct ehca_shca *shca; | ||
234 | struct hipz_query_port *rblock; | 239 | struct hipz_query_port *rblock; |
235 | 240 | ||
241 | shca = container_of(ibdev, struct ehca_shca, ib_device); | ||
236 | if (index > 16) { | 242 | if (index > 16) { |
237 | ehca_err(&shca->ib_device, "Invalid index: %x.", index); | 243 | ehca_err(&shca->ib_device, "Invalid index: %x.", index); |
238 | return -EINVAL; | 244 | return -EINVAL; |
@@ -244,7 +250,8 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |||
244 | return -ENOMEM; | 250 | return -ENOMEM; |
245 | } | 251 | } |
246 | 252 | ||
247 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | 253 | h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); |
254 | if (h_ret != H_SUCCESS) { | ||
248 | ehca_err(&shca->ib_device, "Can't query port properties"); | 255 | ehca_err(&shca->ib_device, "Can't query port properties"); |
249 | ret = -EINVAL; | 256 | ret = -EINVAL; |
250 | goto query_pkey1; | 257 | goto query_pkey1; |
@@ -262,6 +269,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, | |||
262 | int index, union ib_gid *gid) | 269 | int index, union ib_gid *gid) |
263 | { | 270 | { |
264 | int ret = 0; | 271 | int ret = 0; |
272 | u64 h_ret; | ||
265 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, | 273 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, |
266 | ib_device); | 274 | ib_device); |
267 | struct hipz_query_port *rblock; | 275 | struct hipz_query_port *rblock; |
@@ -277,7 +285,8 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port, | |||
277 | return -ENOMEM; | 285 | return -ENOMEM; |
278 | } | 286 | } |
279 | 287 | ||
280 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | 288 | h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); |
289 | if (h_ret != H_SUCCESS) { | ||
281 | ehca_err(&shca->ib_device, "Can't query port properties"); | 290 | ehca_err(&shca->ib_device, "Can't query port properties"); |
282 | ret = -EINVAL; | 291 | ret = -EINVAL; |
283 | goto query_gid1; | 292 | goto query_gid1; |
@@ -302,11 +311,12 @@ int ehca_modify_port(struct ib_device *ibdev, | |||
302 | struct ib_port_modify *props) | 311 | struct ib_port_modify *props) |
303 | { | 312 | { |
304 | int ret = 0; | 313 | int ret = 0; |
305 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); | 314 | struct ehca_shca *shca; |
306 | struct hipz_query_port *rblock; | 315 | struct hipz_query_port *rblock; |
307 | u32 cap; | 316 | u32 cap; |
308 | u64 hret; | 317 | u64 hret; |
309 | 318 | ||
319 | shca = container_of(ibdev, struct ehca_shca, ib_device); | ||
310 | if ((props->set_port_cap_mask | props->clr_port_cap_mask) | 320 | if ((props->set_port_cap_mask | props->clr_port_cap_mask) |
311 | & ~allowed_port_caps) { | 321 | & ~allowed_port_caps) { |
312 | ehca_err(&shca->ib_device, "Non-changeable bits set in masks " | 322 | ehca_err(&shca->ib_device, "Non-changeable bits set in masks " |
@@ -325,7 +335,8 @@ int ehca_modify_port(struct ib_device *ibdev, | |||
325 | goto modify_port1; | 335 | goto modify_port1; |
326 | } | 336 | } |
327 | 337 | ||
328 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | 338 | hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); |
339 | if (hret != H_SUCCESS) { | ||
329 | ehca_err(&shca->ib_device, "Can't query port properties"); | 340 | ehca_err(&shca->ib_device, "Can't query port properties"); |
330 | ret = -EINVAL; | 341 | ret = -EINVAL; |
331 | goto modify_port2; | 342 | goto modify_port2; |
@@ -337,7 +348,8 @@ int ehca_modify_port(struct ib_device *ibdev, | |||
337 | hret = hipz_h_modify_port(shca->ipz_hca_handle, port, | 348 | hret = hipz_h_modify_port(shca->ipz_hca_handle, port, |
338 | cap, props->init_type, port_modify_mask); | 349 | cap, props->init_type, port_modify_mask); |
339 | if (hret != H_SUCCESS) { | 350 | if (hret != H_SUCCESS) { |
340 | ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret); | 351 | ehca_err(&shca->ib_device, "Modify port failed hret=%lx", |
352 | hret); | ||
341 | ret = -EINVAL; | 353 | ret = -EINVAL; |
342 | } | 354 | } |
343 | 355 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 96eba3830754..4fb01fcb63ae 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -49,26 +49,26 @@ | |||
49 | #include "hipz_fns.h" | 49 | #include "hipz_fns.h" |
50 | #include "ipz_pt_fn.h" | 50 | #include "ipz_pt_fn.h" |
51 | 51 | ||
52 | #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) | 52 | #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1) |
53 | #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) | 53 | #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31) |
54 | #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7) | 54 | #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7) |
55 | #define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31) | 55 | #define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31) |
56 | #define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31) | 56 | #define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31) |
57 | #define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63) | 57 | #define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63) |
58 | #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63) | 58 | #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63) |
59 | 59 | ||
60 | #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) | 60 | #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1) |
61 | #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7) | 61 | #define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7) |
62 | #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15) | 62 | #define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15) |
63 | #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16) | 63 | #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16) |
64 | #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16,16) | 64 | #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16) |
65 | 65 | ||
66 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) | 66 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63) |
67 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) | 67 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7) |
68 | 68 | ||
69 | static void queue_comp_task(struct ehca_cq *__cq); | 69 | static void queue_comp_task(struct ehca_cq *__cq); |
70 | 70 | ||
71 | static struct ehca_comp_pool* pool; | 71 | static struct ehca_comp_pool *pool; |
72 | #ifdef CONFIG_HOTPLUG_CPU | 72 | #ifdef CONFIG_HOTPLUG_CPU |
73 | static struct notifier_block comp_pool_callback_nb; | 73 | static struct notifier_block comp_pool_callback_nb; |
74 | #endif | 74 | #endif |
@@ -85,8 +85,8 @@ static inline void comp_event_callback(struct ehca_cq *cq) | |||
85 | return; | 85 | return; |
86 | } | 86 | } |
87 | 87 | ||
88 | static void print_error_data(struct ehca_shca * shca, void* data, | 88 | static void print_error_data(struct ehca_shca *shca, void *data, |
89 | u64* rblock, int length) | 89 | u64 *rblock, int length) |
90 | { | 90 | { |
91 | u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]); | 91 | u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]); |
92 | u64 resource = rblock[1]; | 92 | u64 resource = rblock[1]; |
@@ -94,7 +94,7 @@ static void print_error_data(struct ehca_shca * shca, void* data, | |||
94 | switch (type) { | 94 | switch (type) { |
95 | case 0x1: /* Queue Pair */ | 95 | case 0x1: /* Queue Pair */ |
96 | { | 96 | { |
97 | struct ehca_qp *qp = (struct ehca_qp*)data; | 97 | struct ehca_qp *qp = (struct ehca_qp *)data; |
98 | 98 | ||
99 | /* only print error data if AER is set */ | 99 | /* only print error data if AER is set */ |
100 | if (rblock[6] == 0) | 100 | if (rblock[6] == 0) |
@@ -107,7 +107,7 @@ static void print_error_data(struct ehca_shca * shca, void* data, | |||
107 | } | 107 | } |
108 | case 0x4: /* Completion Queue */ | 108 | case 0x4: /* Completion Queue */ |
109 | { | 109 | { |
110 | struct ehca_cq *cq = (struct ehca_cq*)data; | 110 | struct ehca_cq *cq = (struct ehca_cq *)data; |
111 | 111 | ||
112 | ehca_err(&shca->ib_device, | 112 | ehca_err(&shca->ib_device, |
113 | "CQ 0x%x (resource=%lx) has errors.", | 113 | "CQ 0x%x (resource=%lx) has errors.", |
@@ -572,7 +572,7 @@ void ehca_tasklet_eq(unsigned long data) | |||
572 | ehca_process_eq((struct ehca_shca*)data, 1); | 572 | ehca_process_eq((struct ehca_shca*)data, 1); |
573 | } | 573 | } |
574 | 574 | ||
575 | static inline int find_next_online_cpu(struct ehca_comp_pool* pool) | 575 | static inline int find_next_online_cpu(struct ehca_comp_pool *pool) |
576 | { | 576 | { |
577 | int cpu; | 577 | int cpu; |
578 | unsigned long flags; | 578 | unsigned long flags; |
@@ -636,7 +636,7 @@ static void queue_comp_task(struct ehca_cq *__cq) | |||
636 | __queue_comp_task(__cq, cct); | 636 | __queue_comp_task(__cq, cct); |
637 | } | 637 | } |
638 | 638 | ||
639 | static void run_comp_task(struct ehca_cpu_comp_task* cct) | 639 | static void run_comp_task(struct ehca_cpu_comp_task *cct) |
640 | { | 640 | { |
641 | struct ehca_cq *cq; | 641 | struct ehca_cq *cq; |
642 | unsigned long flags; | 642 | unsigned long flags; |
@@ -666,12 +666,12 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct) | |||
666 | 666 | ||
667 | static int comp_task(void *__cct) | 667 | static int comp_task(void *__cct) |
668 | { | 668 | { |
669 | struct ehca_cpu_comp_task* cct = __cct; | 669 | struct ehca_cpu_comp_task *cct = __cct; |
670 | int cql_empty; | 670 | int cql_empty; |
671 | DECLARE_WAITQUEUE(wait, current); | 671 | DECLARE_WAITQUEUE(wait, current); |
672 | 672 | ||
673 | set_current_state(TASK_INTERRUPTIBLE); | 673 | set_current_state(TASK_INTERRUPTIBLE); |
674 | while(!kthread_should_stop()) { | 674 | while (!kthread_should_stop()) { |
675 | add_wait_queue(&cct->wait_queue, &wait); | 675 | add_wait_queue(&cct->wait_queue, &wait); |
676 | 676 | ||
677 | spin_lock_irq(&cct->task_lock); | 677 | spin_lock_irq(&cct->task_lock); |
@@ -745,7 +745,7 @@ static void take_over_work(struct ehca_comp_pool *pool, | |||
745 | 745 | ||
746 | list_splice_init(&cct->cq_list, &list); | 746 | list_splice_init(&cct->cq_list, &list); |
747 | 747 | ||
748 | while(!list_empty(&list)) { | 748 | while (!list_empty(&list)) { |
749 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 749 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
750 | 750 | ||
751 | list_del(&cq->entry); | 751 | list_del(&cq->entry); |
@@ -768,7 +768,7 @@ static int comp_pool_callback(struct notifier_block *nfb, | |||
768 | case CPU_UP_PREPARE: | 768 | case CPU_UP_PREPARE: |
769 | case CPU_UP_PREPARE_FROZEN: | 769 | case CPU_UP_PREPARE_FROZEN: |
770 | ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); | 770 | ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); |
771 | if(!create_comp_task(pool, cpu)) { | 771 | if (!create_comp_task(pool, cpu)) { |
772 | ehca_gen_err("Can't create comp_task for cpu: %x", cpu); | 772 | ehca_gen_err("Can't create comp_task for cpu: %x", cpu); |
773 | return NOTIFY_BAD; | 773 | return NOTIFY_BAD; |
774 | } | 774 | } |
@@ -838,7 +838,7 @@ int ehca_create_comp_pool(void) | |||
838 | 838 | ||
839 | #ifdef CONFIG_HOTPLUG_CPU | 839 | #ifdef CONFIG_HOTPLUG_CPU |
840 | comp_pool_callback_nb.notifier_call = comp_pool_callback; | 840 | comp_pool_callback_nb.notifier_call = comp_pool_callback; |
841 | comp_pool_callback_nb.priority =0; | 841 | comp_pool_callback_nb.priority = 0; |
842 | register_cpu_notifier(&comp_pool_callback_nb); | 842 | register_cpu_notifier(&comp_pool_callback_nb); |
843 | #endif | 843 | #endif |
844 | 844 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 77aeca6a2c2f..dce503bb7d6b 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
@@ -81,8 +81,9 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | |||
81 | int num_phys_buf, | 81 | int num_phys_buf, |
82 | int mr_access_flags, u64 *iova_start); | 82 | int mr_access_flags, u64 *iova_start); |
83 | 83 | ||
84 | struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, | 84 | struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
85 | int mr_access_flags, struct ib_udata *udata); | 85 | u64 virt, int mr_access_flags, |
86 | struct ib_udata *udata); | ||
86 | 87 | ||
87 | int ehca_rereg_phys_mr(struct ib_mr *mr, | 88 | int ehca_rereg_phys_mr(struct ib_mr *mr, |
88 | int mr_rereg_mask, | 89 | int mr_rereg_mask, |
@@ -192,7 +193,7 @@ void ehca_poll_eqs(unsigned long data); | |||
192 | void *ehca_alloc_fw_ctrlblock(gfp_t flags); | 193 | void *ehca_alloc_fw_ctrlblock(gfp_t flags); |
193 | void ehca_free_fw_ctrlblock(void *ptr); | 194 | void ehca_free_fw_ctrlblock(void *ptr); |
194 | #else | 195 | #else |
195 | #define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags)) | 196 | #define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags)) |
196 | #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) | 197 | #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) |
197 | #endif | 198 | #endif |
198 | 199 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 28ba2dd24216..36377c6db3d4 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -107,7 +107,7 @@ static DEFINE_SPINLOCK(shca_list_lock); | |||
107 | static struct timer_list poll_eqs_timer; | 107 | static struct timer_list poll_eqs_timer; |
108 | 108 | ||
109 | #ifdef CONFIG_PPC_64K_PAGES | 109 | #ifdef CONFIG_PPC_64K_PAGES |
110 | static struct kmem_cache *ctblk_cache = NULL; | 110 | static struct kmem_cache *ctblk_cache; |
111 | 111 | ||
112 | void *ehca_alloc_fw_ctrlblock(gfp_t flags) | 112 | void *ehca_alloc_fw_ctrlblock(gfp_t flags) |
113 | { | 113 | { |
@@ -200,8 +200,8 @@ static void ehca_destroy_slab_caches(void) | |||
200 | #endif | 200 | #endif |
201 | } | 201 | } |
202 | 202 | ||
203 | #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) | 203 | #define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39) |
204 | #define EHCA_REVID EHCA_BMASK_IBM(40,63) | 204 | #define EHCA_REVID EHCA_BMASK_IBM(40, 63) |
205 | 205 | ||
206 | static struct cap_descr { | 206 | static struct cap_descr { |
207 | u64 mask; | 207 | u64 mask; |
@@ -263,22 +263,27 @@ int ehca_sense_attributes(struct ehca_shca *shca) | |||
263 | 263 | ||
264 | ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid); | 264 | ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid); |
265 | 265 | ||
266 | if ((hcaaver == 1) && (revid == 0)) | 266 | if (hcaaver == 1) { |
267 | shca->hw_level = 0x11; | 267 | if (revid <= 3) |
268 | else if ((hcaaver == 1) && (revid == 1)) | 268 | shca->hw_level = 0x10 | (revid + 1); |
269 | shca->hw_level = 0x12; | 269 | else |
270 | else if ((hcaaver == 1) && (revid == 2)) | 270 | shca->hw_level = 0x14; |
271 | shca->hw_level = 0x13; | 271 | } else if (hcaaver == 2) { |
272 | else if ((hcaaver == 2) && (revid == 0)) | 272 | if (revid == 0) |
273 | shca->hw_level = 0x21; | 273 | shca->hw_level = 0x21; |
274 | else if ((hcaaver == 2) && (revid == 0x10)) | 274 | else if (revid == 0x10) |
275 | shca->hw_level = 0x22; | 275 | shca->hw_level = 0x22; |
276 | else { | 276 | else if (revid == 0x20 || revid == 0x21) |
277 | shca->hw_level = 0x23; | ||
278 | } | ||
279 | |||
280 | if (!shca->hw_level) { | ||
277 | ehca_gen_warn("unknown hardware version" | 281 | ehca_gen_warn("unknown hardware version" |
278 | " - assuming default level"); | 282 | " - assuming default level"); |
279 | shca->hw_level = 0x22; | 283 | shca->hw_level = 0x22; |
280 | } | 284 | } |
281 | } | 285 | } else |
286 | shca->hw_level = ehca_hw_level; | ||
282 | ehca_gen_dbg(" ... hardware level=%x", shca->hw_level); | 287 | ehca_gen_dbg(" ... hardware level=%x", shca->hw_level); |
283 | 288 | ||
284 | shca->sport[0].rate = IB_RATE_30_GBPS; | 289 | shca->sport[0].rate = IB_RATE_30_GBPS; |
@@ -290,7 +295,7 @@ int ehca_sense_attributes(struct ehca_shca *shca) | |||
290 | if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap)) | 295 | if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap)) |
291 | ehca_gen_dbg(" %s", hca_cap_descr[i].descr); | 296 | ehca_gen_dbg(" %s", hca_cap_descr[i].descr); |
292 | 297 | ||
293 | port = (struct hipz_query_port *) rblock; | 298 | port = (struct hipz_query_port *)rblock; |
294 | h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); | 299 | h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); |
295 | if (h_ret != H_SUCCESS) { | 300 | if (h_ret != H_SUCCESS) { |
296 | ehca_gen_err("Cannot query port properties. h_ret=%lx", | 301 | ehca_gen_err("Cannot query port properties. h_ret=%lx", |
@@ -439,7 +444,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) | |||
439 | return -EPERM; | 444 | return -EPERM; |
440 | } | 445 | } |
441 | 446 | ||
442 | ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0); | 447 | ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); |
443 | if (IS_ERR(ibcq)) { | 448 | if (IS_ERR(ibcq)) { |
444 | ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); | 449 | ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); |
445 | return PTR_ERR(ibcq); | 450 | return PTR_ERR(ibcq); |
@@ -666,7 +671,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, | |||
666 | } | 671 | } |
667 | 672 | ||
668 | /* create internal protection domain */ | 673 | /* create internal protection domain */ |
669 | ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL); | 674 | ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL); |
670 | if (IS_ERR(ibpd)) { | 675 | if (IS_ERR(ibpd)) { |
671 | ehca_err(&shca->ib_device, "Cannot create internal PD."); | 676 | ehca_err(&shca->ib_device, "Cannot create internal PD."); |
672 | ret = PTR_ERR(ibpd); | 677 | ret = PTR_ERR(ibpd); |
@@ -863,18 +868,21 @@ int __init ehca_module_init(void) | |||
863 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 868 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
864 | "(Rel.: SVNEHCA_0023)\n"); | 869 | "(Rel.: SVNEHCA_0023)\n"); |
865 | 870 | ||
866 | if ((ret = ehca_create_comp_pool())) { | 871 | ret = ehca_create_comp_pool(); |
872 | if (ret) { | ||
867 | ehca_gen_err("Cannot create comp pool."); | 873 | ehca_gen_err("Cannot create comp pool."); |
868 | return ret; | 874 | return ret; |
869 | } | 875 | } |
870 | 876 | ||
871 | if ((ret = ehca_create_slab_caches())) { | 877 | ret = ehca_create_slab_caches(); |
878 | if (ret) { | ||
872 | ehca_gen_err("Cannot create SLAB caches"); | 879 | ehca_gen_err("Cannot create SLAB caches"); |
873 | ret = -ENOMEM; | 880 | ret = -ENOMEM; |
874 | goto module_init1; | 881 | goto module_init1; |
875 | } | 882 | } |
876 | 883 | ||
877 | if ((ret = ibmebus_register_driver(&ehca_driver))) { | 884 | ret = ibmebus_register_driver(&ehca_driver); |
885 | if (ret) { | ||
878 | ehca_gen_err("Cannot register eHCA device driver"); | 886 | ehca_gen_err("Cannot register eHCA device driver"); |
879 | ret = -EINVAL; | 887 | ret = -EINVAL; |
880 | goto module_init2; | 888 | goto module_init2; |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index add79bd44e39..6262c5462d50 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -48,6 +48,11 @@ | |||
48 | #include "hcp_if.h" | 48 | #include "hcp_if.h" |
49 | #include "hipz_hw.h" | 49 | #include "hipz_hw.h" |
50 | 50 | ||
51 | #define NUM_CHUNKS(length, chunk_size) \ | ||
52 | (((length) + (chunk_size - 1)) / (chunk_size)) | ||
53 | /* max number of rpages (per hcall register_rpages) */ | ||
54 | #define MAX_RPAGES 512 | ||
55 | |||
51 | static struct kmem_cache *mr_cache; | 56 | static struct kmem_cache *mr_cache; |
52 | static struct kmem_cache *mw_cache; | 57 | static struct kmem_cache *mw_cache; |
53 | 58 | ||
@@ -56,9 +61,9 @@ static struct ehca_mr *ehca_mr_new(void) | |||
56 | struct ehca_mr *me; | 61 | struct ehca_mr *me; |
57 | 62 | ||
58 | me = kmem_cache_zalloc(mr_cache, GFP_KERNEL); | 63 | me = kmem_cache_zalloc(mr_cache, GFP_KERNEL); |
59 | if (me) { | 64 | if (me) |
60 | spin_lock_init(&me->mrlock); | 65 | spin_lock_init(&me->mrlock); |
61 | } else | 66 | else |
62 | ehca_gen_err("alloc failed"); | 67 | ehca_gen_err("alloc failed"); |
63 | 68 | ||
64 | return me; | 69 | return me; |
@@ -74,9 +79,9 @@ static struct ehca_mw *ehca_mw_new(void) | |||
74 | struct ehca_mw *me; | 79 | struct ehca_mw *me; |
75 | 80 | ||
76 | me = kmem_cache_zalloc(mw_cache, GFP_KERNEL); | 81 | me = kmem_cache_zalloc(mw_cache, GFP_KERNEL); |
77 | if (me) { | 82 | if (me) |
78 | spin_lock_init(&me->mwlock); | 83 | spin_lock_init(&me->mwlock); |
79 | } else | 84 | else |
80 | ehca_gen_err("alloc failed"); | 85 | ehca_gen_err("alloc failed"); |
81 | 86 | ||
82 | return me; | 87 | return me; |
@@ -106,11 +111,12 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) | |||
106 | goto get_dma_mr_exit0; | 111 | goto get_dma_mr_exit0; |
107 | } | 112 | } |
108 | 113 | ||
109 | ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE, | 114 | ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE, |
110 | mr_access_flags, e_pd, | 115 | mr_access_flags, e_pd, |
111 | &e_maxmr->ib.ib_mr.lkey, | 116 | &e_maxmr->ib.ib_mr.lkey, |
112 | &e_maxmr->ib.ib_mr.rkey); | 117 | &e_maxmr->ib.ib_mr.rkey); |
113 | if (ret) { | 118 | if (ret) { |
119 | ehca_mr_delete(e_maxmr); | ||
114 | ib_mr = ERR_PTR(ret); | 120 | ib_mr = ERR_PTR(ret); |
115 | goto get_dma_mr_exit0; | 121 | goto get_dma_mr_exit0; |
116 | } | 122 | } |
@@ -144,9 +150,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | |||
144 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | 150 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); |
145 | 151 | ||
146 | u64 size; | 152 | u64 size; |
147 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
148 | u32 num_pages_mr; | ||
149 | u32 num_pages_4k; /* 4k portion "pages" */ | ||
150 | 153 | ||
151 | if ((num_phys_buf <= 0) || !phys_buf_array) { | 154 | if ((num_phys_buf <= 0) || !phys_buf_array) { |
152 | ehca_err(pd->device, "bad input values: num_phys_buf=%x " | 155 | ehca_err(pd->device, "bad input values: num_phys_buf=%x " |
@@ -190,12 +193,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | |||
190 | goto reg_phys_mr_exit0; | 193 | goto reg_phys_mr_exit0; |
191 | } | 194 | } |
192 | 195 | ||
193 | /* determine number of MR pages */ | ||
194 | num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size + | ||
195 | PAGE_SIZE - 1) / PAGE_SIZE); | ||
196 | num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size + | ||
197 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | ||
198 | |||
199 | /* register MR on HCA */ | 196 | /* register MR on HCA */ |
200 | if (ehca_mr_is_maxmr(size, iova_start)) { | 197 | if (ehca_mr_is_maxmr(size, iova_start)) { |
201 | e_mr->flags |= EHCA_MR_FLAG_MAXMR; | 198 | e_mr->flags |= EHCA_MR_FLAG_MAXMR; |
@@ -207,13 +204,22 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | |||
207 | goto reg_phys_mr_exit1; | 204 | goto reg_phys_mr_exit1; |
208 | } | 205 | } |
209 | } else { | 206 | } else { |
210 | pginfo.type = EHCA_MR_PGI_PHYS; | 207 | struct ehca_mr_pginfo pginfo; |
211 | pginfo.num_pages = num_pages_mr; | 208 | u32 num_kpages; |
212 | pginfo.num_4k = num_pages_4k; | 209 | u32 num_hwpages; |
213 | pginfo.num_phys_buf = num_phys_buf; | 210 | |
214 | pginfo.phys_buf_array = phys_buf_array; | 211 | num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size, |
215 | pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / | 212 | PAGE_SIZE); |
216 | EHCA_PAGESIZE); | 213 | num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + |
214 | size, EHCA_PAGESIZE); | ||
215 | memset(&pginfo, 0, sizeof(pginfo)); | ||
216 | pginfo.type = EHCA_MR_PGI_PHYS; | ||
217 | pginfo.num_kpages = num_kpages; | ||
218 | pginfo.num_hwpages = num_hwpages; | ||
219 | pginfo.u.phy.num_phys_buf = num_phys_buf; | ||
220 | pginfo.u.phy.phys_buf_array = phys_buf_array; | ||
221 | pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) / | ||
222 | EHCA_PAGESIZE); | ||
217 | 223 | ||
218 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, | 224 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, |
219 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, | 225 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, |
@@ -240,18 +246,19 @@ reg_phys_mr_exit0: | |||
240 | 246 | ||
241 | /*----------------------------------------------------------------------*/ | 247 | /*----------------------------------------------------------------------*/ |
242 | 248 | ||
243 | struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, | 249 | struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
244 | int mr_access_flags, struct ib_udata *udata) | 250 | u64 virt, int mr_access_flags, |
251 | struct ib_udata *udata) | ||
245 | { | 252 | { |
246 | struct ib_mr *ib_mr; | 253 | struct ib_mr *ib_mr; |
247 | struct ehca_mr *e_mr; | 254 | struct ehca_mr *e_mr; |
248 | struct ehca_shca *shca = | 255 | struct ehca_shca *shca = |
249 | container_of(pd->device, struct ehca_shca, ib_device); | 256 | container_of(pd->device, struct ehca_shca, ib_device); |
250 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | 257 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); |
251 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | 258 | struct ehca_mr_pginfo pginfo; |
252 | int ret; | 259 | int ret; |
253 | u32 num_pages_mr; | 260 | u32 num_kpages; |
254 | u32 num_pages_4k; /* 4k portion "pages" */ | 261 | u32 num_hwpages; |
255 | 262 | ||
256 | if (!pd) { | 263 | if (!pd) { |
257 | ehca_gen_err("bad pd=%p", pd); | 264 | ehca_gen_err("bad pd=%p", pd); |
@@ -289,7 +296,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt | |||
289 | e_mr->umem = ib_umem_get(pd->uobject->context, start, length, | 296 | e_mr->umem = ib_umem_get(pd->uobject->context, start, length, |
290 | mr_access_flags); | 297 | mr_access_flags); |
291 | if (IS_ERR(e_mr->umem)) { | 298 | if (IS_ERR(e_mr->umem)) { |
292 | ib_mr = (void *) e_mr->umem; | 299 | ib_mr = (void *)e_mr->umem; |
293 | goto reg_user_mr_exit1; | 300 | goto reg_user_mr_exit1; |
294 | } | 301 | } |
295 | 302 | ||
@@ -301,23 +308,24 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt | |||
301 | } | 308 | } |
302 | 309 | ||
303 | /* determine number of MR pages */ | 310 | /* determine number of MR pages */ |
304 | num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) / | 311 | num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); |
305 | PAGE_SIZE); | 312 | num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length, |
306 | num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) / | 313 | EHCA_PAGESIZE); |
307 | EHCA_PAGESIZE); | ||
308 | 314 | ||
309 | /* register MR on HCA */ | 315 | /* register MR on HCA */ |
310 | pginfo.type = EHCA_MR_PGI_USER; | 316 | memset(&pginfo, 0, sizeof(pginfo)); |
311 | pginfo.num_pages = num_pages_mr; | 317 | pginfo.type = EHCA_MR_PGI_USER; |
312 | pginfo.num_4k = num_pages_4k; | 318 | pginfo.num_kpages = num_kpages; |
313 | pginfo.region = e_mr->umem; | 319 | pginfo.num_hwpages = num_hwpages; |
314 | pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE; | 320 | pginfo.u.usr.region = e_mr->umem; |
315 | pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, | 321 | pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE; |
316 | (&e_mr->umem->chunk_list), | 322 | pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, |
317 | list); | 323 | (&e_mr->umem->chunk_list), |
318 | 324 | list); | |
319 | ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd, | 325 | |
320 | &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); | 326 | ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, |
327 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, | ||
328 | &e_mr->ib.ib_mr.rkey); | ||
321 | if (ret) { | 329 | if (ret) { |
322 | ib_mr = ERR_PTR(ret); | 330 | ib_mr = ERR_PTR(ret); |
323 | goto reg_user_mr_exit2; | 331 | goto reg_user_mr_exit2; |
@@ -360,9 +368,9 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, | |||
360 | struct ehca_pd *new_pd; | 368 | struct ehca_pd *new_pd; |
361 | u32 tmp_lkey, tmp_rkey; | 369 | u32 tmp_lkey, tmp_rkey; |
362 | unsigned long sl_flags; | 370 | unsigned long sl_flags; |
363 | u32 num_pages_mr = 0; | 371 | u32 num_kpages = 0; |
364 | u32 num_pages_4k = 0; /* 4k portion "pages" */ | 372 | u32 num_hwpages = 0; |
365 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | 373 | struct ehca_mr_pginfo pginfo; |
366 | u32 cur_pid = current->tgid; | 374 | u32 cur_pid = current->tgid; |
367 | 375 | ||
368 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | 376 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && |
@@ -414,7 +422,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, | |||
414 | goto rereg_phys_mr_exit0; | 422 | goto rereg_phys_mr_exit0; |
415 | } | 423 | } |
416 | if (!phys_buf_array || num_phys_buf <= 0) { | 424 | if (!phys_buf_array || num_phys_buf <= 0) { |
417 | ehca_err(mr->device, "bad input values: mr_rereg_mask=%x" | 425 | ehca_err(mr->device, "bad input values mr_rereg_mask=%x" |
418 | " phys_buf_array=%p num_phys_buf=%x", | 426 | " phys_buf_array=%p num_phys_buf=%x", |
419 | mr_rereg_mask, phys_buf_array, num_phys_buf); | 427 | mr_rereg_mask, phys_buf_array, num_phys_buf); |
420 | ret = -EINVAL; | 428 | ret = -EINVAL; |
@@ -438,10 +446,10 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, | |||
438 | 446 | ||
439 | /* set requested values dependent on rereg request */ | 447 | /* set requested values dependent on rereg request */ |
440 | spin_lock_irqsave(&e_mr->mrlock, sl_flags); | 448 | spin_lock_irqsave(&e_mr->mrlock, sl_flags); |
441 | new_start = e_mr->start; /* new == old address */ | 449 | new_start = e_mr->start; |
442 | new_size = e_mr->size; /* new == old length */ | 450 | new_size = e_mr->size; |
443 | new_acl = e_mr->acl; /* new == old access control */ | 451 | new_acl = e_mr->acl; |
444 | new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/ | 452 | new_pd = container_of(mr->pd, struct ehca_pd, ib_pd); |
445 | 453 | ||
446 | if (mr_rereg_mask & IB_MR_REREG_TRANS) { | 454 | if (mr_rereg_mask & IB_MR_REREG_TRANS) { |
447 | new_start = iova_start; /* change address */ | 455 | new_start = iova_start; /* change address */ |
@@ -458,17 +466,18 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, | |||
458 | ret = -EINVAL; | 466 | ret = -EINVAL; |
459 | goto rereg_phys_mr_exit1; | 467 | goto rereg_phys_mr_exit1; |
460 | } | 468 | } |
461 | num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size + | 469 | num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + |
462 | PAGE_SIZE - 1) / PAGE_SIZE); | 470 | new_size, PAGE_SIZE); |
463 | num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size + | 471 | num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) + |
464 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | 472 | new_size, EHCA_PAGESIZE); |
465 | pginfo.type = EHCA_MR_PGI_PHYS; | 473 | memset(&pginfo, 0, sizeof(pginfo)); |
466 | pginfo.num_pages = num_pages_mr; | 474 | pginfo.type = EHCA_MR_PGI_PHYS; |
467 | pginfo.num_4k = num_pages_4k; | 475 | pginfo.num_kpages = num_kpages; |
468 | pginfo.num_phys_buf = num_phys_buf; | 476 | pginfo.num_hwpages = num_hwpages; |
469 | pginfo.phys_buf_array = phys_buf_array; | 477 | pginfo.u.phy.num_phys_buf = num_phys_buf; |
470 | pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / | 478 | pginfo.u.phy.phys_buf_array = phys_buf_array; |
471 | EHCA_PAGESIZE); | 479 | pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) / |
480 | EHCA_PAGESIZE); | ||
472 | } | 481 | } |
473 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) | 482 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) |
474 | new_acl = mr_access_flags; | 483 | new_acl = mr_access_flags; |
@@ -510,7 +519,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) | |||
510 | struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); | 519 | struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); |
511 | u32 cur_pid = current->tgid; | 520 | u32 cur_pid = current->tgid; |
512 | unsigned long sl_flags; | 521 | unsigned long sl_flags; |
513 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | 522 | struct ehca_mr_hipzout_parms hipzout; |
514 | 523 | ||
515 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | 524 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && |
516 | (my_pd->ownpid != cur_pid)) { | 525 | (my_pd->ownpid != cur_pid)) { |
@@ -536,14 +545,14 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) | |||
536 | "hca_hndl=%lx mr_hndl=%lx lkey=%x", | 545 | "hca_hndl=%lx mr_hndl=%lx lkey=%x", |
537 | h_ret, mr, shca->ipz_hca_handle.handle, | 546 | h_ret, mr, shca->ipz_hca_handle.handle, |
538 | e_mr->ipz_mr_handle.handle, mr->lkey); | 547 | e_mr->ipz_mr_handle.handle, mr->lkey); |
539 | ret = ehca_mrmw_map_hrc_query_mr(h_ret); | 548 | ret = ehca2ib_return_code(h_ret); |
540 | goto query_mr_exit1; | 549 | goto query_mr_exit1; |
541 | } | 550 | } |
542 | mr_attr->pd = mr->pd; | 551 | mr_attr->pd = mr->pd; |
543 | mr_attr->device_virt_addr = hipzout.vaddr; | 552 | mr_attr->device_virt_addr = hipzout.vaddr; |
544 | mr_attr->size = hipzout.len; | 553 | mr_attr->size = hipzout.len; |
545 | mr_attr->lkey = hipzout.lkey; | 554 | mr_attr->lkey = hipzout.lkey; |
546 | mr_attr->rkey = hipzout.rkey; | 555 | mr_attr->rkey = hipzout.rkey; |
547 | ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); | 556 | ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); |
548 | 557 | ||
549 | query_mr_exit1: | 558 | query_mr_exit1: |
@@ -596,7 +605,7 @@ int ehca_dereg_mr(struct ib_mr *mr) | |||
596 | "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", | 605 | "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", |
597 | h_ret, shca, e_mr, shca->ipz_hca_handle.handle, | 606 | h_ret, shca, e_mr, shca->ipz_hca_handle.handle, |
598 | e_mr->ipz_mr_handle.handle, mr->lkey); | 607 | e_mr->ipz_mr_handle.handle, mr->lkey); |
599 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | 608 | ret = ehca2ib_return_code(h_ret); |
600 | goto dereg_mr_exit0; | 609 | goto dereg_mr_exit0; |
601 | } | 610 | } |
602 | 611 | ||
@@ -622,7 +631,7 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | |||
622 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | 631 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); |
623 | struct ehca_shca *shca = | 632 | struct ehca_shca *shca = |
624 | container_of(pd->device, struct ehca_shca, ib_device); | 633 | container_of(pd->device, struct ehca_shca, ib_device); |
625 | struct ehca_mw_hipzout_parms hipzout = {{0},0}; | 634 | struct ehca_mw_hipzout_parms hipzout; |
626 | 635 | ||
627 | e_mw = ehca_mw_new(); | 636 | e_mw = ehca_mw_new(); |
628 | if (!e_mw) { | 637 | if (!e_mw) { |
@@ -636,7 +645,7 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | |||
636 | ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx " | 645 | ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx " |
637 | "shca=%p hca_hndl=%lx mw=%p", | 646 | "shca=%p hca_hndl=%lx mw=%p", |
638 | h_ret, shca, shca->ipz_hca_handle.handle, e_mw); | 647 | h_ret, shca, shca->ipz_hca_handle.handle, e_mw); |
639 | ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret)); | 648 | ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); |
640 | goto alloc_mw_exit1; | 649 | goto alloc_mw_exit1; |
641 | } | 650 | } |
642 | /* successful MW allocation */ | 651 | /* successful MW allocation */ |
@@ -679,7 +688,7 @@ int ehca_dealloc_mw(struct ib_mw *mw) | |||
679 | "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", | 688 | "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", |
680 | h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, | 689 | h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, |
681 | e_mw->ipz_mw_handle.handle); | 690 | e_mw->ipz_mw_handle.handle); |
682 | return ehca_mrmw_map_hrc_free_mw(h_ret); | 691 | return ehca2ib_return_code(h_ret); |
683 | } | 692 | } |
684 | /* successful deallocation */ | 693 | /* successful deallocation */ |
685 | ehca_mw_delete(e_mw); | 694 | ehca_mw_delete(e_mw); |
@@ -699,7 +708,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, | |||
699 | struct ehca_mr *e_fmr; | 708 | struct ehca_mr *e_fmr; |
700 | int ret; | 709 | int ret; |
701 | u32 tmp_lkey, tmp_rkey; | 710 | u32 tmp_lkey, tmp_rkey; |
702 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | 711 | struct ehca_mr_pginfo pginfo; |
703 | 712 | ||
704 | /* check other parameters */ | 713 | /* check other parameters */ |
705 | if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && | 714 | if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && |
@@ -745,6 +754,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, | |||
745 | e_fmr->flags |= EHCA_MR_FLAG_FMR; | 754 | e_fmr->flags |= EHCA_MR_FLAG_FMR; |
746 | 755 | ||
747 | /* register MR on HCA */ | 756 | /* register MR on HCA */ |
757 | memset(&pginfo, 0, sizeof(pginfo)); | ||
748 | ret = ehca_reg_mr(shca, e_fmr, NULL, | 758 | ret = ehca_reg_mr(shca, e_fmr, NULL, |
749 | fmr_attr->max_pages * (1 << fmr_attr->page_shift), | 759 | fmr_attr->max_pages * (1 << fmr_attr->page_shift), |
750 | mr_access_flags, e_pd, &pginfo, | 760 | mr_access_flags, e_pd, &pginfo, |
@@ -783,7 +793,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, | |||
783 | container_of(fmr->device, struct ehca_shca, ib_device); | 793 | container_of(fmr->device, struct ehca_shca, ib_device); |
784 | struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); | 794 | struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); |
785 | struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); | 795 | struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); |
786 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | 796 | struct ehca_mr_pginfo pginfo; |
787 | u32 tmp_lkey, tmp_rkey; | 797 | u32 tmp_lkey, tmp_rkey; |
788 | 798 | ||
789 | if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { | 799 | if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { |
@@ -809,14 +819,16 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, | |||
809 | fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); | 819 | fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); |
810 | } | 820 | } |
811 | 821 | ||
812 | pginfo.type = EHCA_MR_PGI_FMR; | 822 | memset(&pginfo, 0, sizeof(pginfo)); |
813 | pginfo.num_pages = list_len; | 823 | pginfo.type = EHCA_MR_PGI_FMR; |
814 | pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); | 824 | pginfo.num_kpages = list_len; |
815 | pginfo.page_list = page_list; | 825 | pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); |
816 | pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) / | 826 | pginfo.u.fmr.page_list = page_list; |
817 | EHCA_PAGESIZE); | 827 | pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) / |
828 | EHCA_PAGESIZE); | ||
829 | pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; | ||
818 | 830 | ||
819 | ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, | 831 | ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, |
820 | list_len * e_fmr->fmr_page_size, | 832 | list_len * e_fmr->fmr_page_size, |
821 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); | 833 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); |
822 | if (ret) | 834 | if (ret) |
@@ -831,8 +843,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, | |||
831 | map_phys_fmr_exit0: | 843 | map_phys_fmr_exit0: |
832 | if (ret) | 844 | if (ret) |
833 | ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x " | 845 | ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x " |
834 | "iova=%lx", | 846 | "iova=%lx", ret, fmr, page_list, list_len, iova); |
835 | ret, fmr, page_list, list_len, iova); | ||
836 | return ret; | 847 | return ret; |
837 | } /* end ehca_map_phys_fmr() */ | 848 | } /* end ehca_map_phys_fmr() */ |
838 | 849 | ||
@@ -922,7 +933,7 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr) | |||
922 | "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", | 933 | "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", |
923 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | 934 | h_ret, e_fmr, shca->ipz_hca_handle.handle, |
924 | e_fmr->ipz_mr_handle.handle, fmr->lkey); | 935 | e_fmr->ipz_mr_handle.handle, fmr->lkey); |
925 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | 936 | ret = ehca2ib_return_code(h_ret); |
926 | goto free_fmr_exit0; | 937 | goto free_fmr_exit0; |
927 | } | 938 | } |
928 | /* successful deregistration */ | 939 | /* successful deregistration */ |
@@ -950,12 +961,12 @@ int ehca_reg_mr(struct ehca_shca *shca, | |||
950 | int ret; | 961 | int ret; |
951 | u64 h_ret; | 962 | u64 h_ret; |
952 | u32 hipz_acl; | 963 | u32 hipz_acl; |
953 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | 964 | struct ehca_mr_hipzout_parms hipzout; |
954 | 965 | ||
955 | ehca_mrmw_map_acl(acl, &hipz_acl); | 966 | ehca_mrmw_map_acl(acl, &hipz_acl); |
956 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | 967 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); |
957 | if (ehca_use_hp_mr == 1) | 968 | if (ehca_use_hp_mr == 1) |
958 | hipz_acl |= 0x00000001; | 969 | hipz_acl |= 0x00000001; |
959 | 970 | ||
960 | h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, | 971 | h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, |
961 | (u64)iova_start, size, hipz_acl, | 972 | (u64)iova_start, size, hipz_acl, |
@@ -963,7 +974,7 @@ int ehca_reg_mr(struct ehca_shca *shca, | |||
963 | if (h_ret != H_SUCCESS) { | 974 | if (h_ret != H_SUCCESS) { |
964 | ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx " | 975 | ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx " |
965 | "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); | 976 | "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); |
966 | ret = ehca_mrmw_map_hrc_alloc(h_ret); | 977 | ret = ehca2ib_return_code(h_ret); |
967 | goto ehca_reg_mr_exit0; | 978 | goto ehca_reg_mr_exit0; |
968 | } | 979 | } |
969 | 980 | ||
@@ -974,11 +985,11 @@ int ehca_reg_mr(struct ehca_shca *shca, | |||
974 | goto ehca_reg_mr_exit1; | 985 | goto ehca_reg_mr_exit1; |
975 | 986 | ||
976 | /* successful registration */ | 987 | /* successful registration */ |
977 | e_mr->num_pages = pginfo->num_pages; | 988 | e_mr->num_kpages = pginfo->num_kpages; |
978 | e_mr->num_4k = pginfo->num_4k; | 989 | e_mr->num_hwpages = pginfo->num_hwpages; |
979 | e_mr->start = iova_start; | 990 | e_mr->start = iova_start; |
980 | e_mr->size = size; | 991 | e_mr->size = size; |
981 | e_mr->acl = acl; | 992 | e_mr->acl = acl; |
982 | *lkey = hipzout.lkey; | 993 | *lkey = hipzout.lkey; |
983 | *rkey = hipzout.rkey; | 994 | *rkey = hipzout.rkey; |
984 | return 0; | 995 | return 0; |
@@ -988,10 +999,10 @@ ehca_reg_mr_exit1: | |||
988 | if (h_ret != H_SUCCESS) { | 999 | if (h_ret != H_SUCCESS) { |
989 | ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " | 1000 | ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " |
990 | "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " | 1001 | "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " |
991 | "pginfo=%p num_pages=%lx num_4k=%lx ret=%x", | 1002 | "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x", |
992 | h_ret, shca, e_mr, iova_start, size, acl, e_pd, | 1003 | h_ret, shca, e_mr, iova_start, size, acl, e_pd, |
993 | hipzout.lkey, pginfo, pginfo->num_pages, | 1004 | hipzout.lkey, pginfo, pginfo->num_kpages, |
994 | pginfo->num_4k, ret); | 1005 | pginfo->num_hwpages, ret); |
995 | ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " | 1006 | ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " |
996 | "not recoverable"); | 1007 | "not recoverable"); |
997 | } | 1008 | } |
@@ -999,9 +1010,9 @@ ehca_reg_mr_exit0: | |||
999 | if (ret) | 1010 | if (ret) |
1000 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " | 1011 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " |
1001 | "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " | 1012 | "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " |
1002 | "num_pages=%lx num_4k=%lx", | 1013 | "num_kpages=%lx num_hwpages=%lx", |
1003 | ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, | 1014 | ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, |
1004 | pginfo->num_pages, pginfo->num_4k); | 1015 | pginfo->num_kpages, pginfo->num_hwpages); |
1005 | return ret; | 1016 | return ret; |
1006 | } /* end ehca_reg_mr() */ | 1017 | } /* end ehca_reg_mr() */ |
1007 | 1018 | ||
@@ -1026,24 +1037,24 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1026 | } | 1037 | } |
1027 | 1038 | ||
1028 | /* max 512 pages per shot */ | 1039 | /* max 512 pages per shot */ |
1029 | for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) { | 1040 | for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) { |
1030 | 1041 | ||
1031 | if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) { | 1042 | if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { |
1032 | rnum = pginfo->num_4k % 512; /* last shot */ | 1043 | rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */ |
1033 | if (rnum == 0) | 1044 | if (rnum == 0) |
1034 | rnum = 512; /* last shot is full */ | 1045 | rnum = MAX_RPAGES; /* last shot is full */ |
1035 | } else | 1046 | } else |
1036 | rnum = 512; | 1047 | rnum = MAX_RPAGES; |
1037 | 1048 | ||
1038 | if (rnum > 1) { | 1049 | ret = ehca_set_pagebuf(pginfo, rnum, kpage); |
1039 | ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage); | 1050 | if (ret) { |
1040 | if (ret) { | 1051 | ehca_err(&shca->ib_device, "ehca_set_pagebuf " |
1041 | ehca_err(&shca->ib_device, "ehca_set_pagebuf " | ||
1042 | "bad rc, ret=%x rnum=%x kpage=%p", | 1052 | "bad rc, ret=%x rnum=%x kpage=%p", |
1043 | ret, rnum, kpage); | 1053 | ret, rnum, kpage); |
1044 | ret = -EFAULT; | 1054 | goto ehca_reg_mr_rpages_exit1; |
1045 | goto ehca_reg_mr_rpages_exit1; | 1055 | } |
1046 | } | 1056 | |
1057 | if (rnum > 1) { | ||
1047 | rpage = virt_to_abs(kpage); | 1058 | rpage = virt_to_abs(kpage); |
1048 | if (!rpage) { | 1059 | if (!rpage) { |
1049 | ehca_err(&shca->ib_device, "kpage=%p i=%x", | 1060 | ehca_err(&shca->ib_device, "kpage=%p i=%x", |
@@ -1051,21 +1062,14 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1051 | ret = -EFAULT; | 1062 | ret = -EFAULT; |
1052 | goto ehca_reg_mr_rpages_exit1; | 1063 | goto ehca_reg_mr_rpages_exit1; |
1053 | } | 1064 | } |
1054 | } else { /* rnum==1 */ | 1065 | } else |
1055 | ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage); | 1066 | rpage = *kpage; |
1056 | if (ret) { | ||
1057 | ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 " | ||
1058 | "bad rc, ret=%x i=%x", ret, i); | ||
1059 | ret = -EFAULT; | ||
1060 | goto ehca_reg_mr_rpages_exit1; | ||
1061 | } | ||
1062 | } | ||
1063 | 1067 | ||
1064 | h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr, | 1068 | h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr, |
1065 | 0, /* pagesize 4k */ | 1069 | 0, /* pagesize 4k */ |
1066 | 0, rpage, rnum); | 1070 | 0, rpage, rnum); |
1067 | 1071 | ||
1068 | if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) { | 1072 | if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { |
1069 | /* | 1073 | /* |
1070 | * check for 'registration complete'==H_SUCCESS | 1074 | * check for 'registration complete'==H_SUCCESS |
1071 | * and for 'page registered'==H_PAGE_REGISTERED | 1075 | * and for 'page registered'==H_PAGE_REGISTERED |
@@ -1078,7 +1082,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1078 | shca->ipz_hca_handle.handle, | 1082 | shca->ipz_hca_handle.handle, |
1079 | e_mr->ipz_mr_handle.handle, | 1083 | e_mr->ipz_mr_handle.handle, |
1080 | e_mr->ib.ib_mr.lkey); | 1084 | e_mr->ib.ib_mr.lkey); |
1081 | ret = ehca_mrmw_map_hrc_rrpg_last(h_ret); | 1085 | ret = ehca2ib_return_code(h_ret); |
1082 | break; | 1086 | break; |
1083 | } else | 1087 | } else |
1084 | ret = 0; | 1088 | ret = 0; |
@@ -1089,7 +1093,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, | |||
1089 | e_mr->ib.ib_mr.lkey, | 1093 | e_mr->ib.ib_mr.lkey, |
1090 | shca->ipz_hca_handle.handle, | 1094 | shca->ipz_hca_handle.handle, |
1091 | e_mr->ipz_mr_handle.handle); | 1095 | e_mr->ipz_mr_handle.handle); |
1092 | ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret); | 1096 | ret = ehca2ib_return_code(h_ret); |
1093 | break; | 1097 | break; |
1094 | } else | 1098 | } else |
1095 | ret = 0; | 1099 | ret = 0; |
@@ -1101,8 +1105,8 @@ ehca_reg_mr_rpages_exit1: | |||
1101 | ehca_reg_mr_rpages_exit0: | 1105 | ehca_reg_mr_rpages_exit0: |
1102 | if (ret) | 1106 | if (ret) |
1103 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " | 1107 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " |
1104 | "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo, | 1108 | "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, |
1105 | pginfo->num_pages, pginfo->num_4k); | 1109 | pginfo, pginfo->num_kpages, pginfo->num_hwpages); |
1106 | return ret; | 1110 | return ret; |
1107 | } /* end ehca_reg_mr_rpages() */ | 1111 | } /* end ehca_reg_mr_rpages() */ |
1108 | 1112 | ||
@@ -1124,7 +1128,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1124 | u64 *kpage; | 1128 | u64 *kpage; |
1125 | u64 rpage; | 1129 | u64 rpage; |
1126 | struct ehca_mr_pginfo pginfo_save; | 1130 | struct ehca_mr_pginfo pginfo_save; |
1127 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | 1131 | struct ehca_mr_hipzout_parms hipzout; |
1128 | 1132 | ||
1129 | ehca_mrmw_map_acl(acl, &hipz_acl); | 1133 | ehca_mrmw_map_acl(acl, &hipz_acl); |
1130 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | 1134 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); |
@@ -1137,12 +1141,12 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1137 | } | 1141 | } |
1138 | 1142 | ||
1139 | pginfo_save = *pginfo; | 1143 | pginfo_save = *pginfo; |
1140 | ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage); | 1144 | ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); |
1141 | if (ret) { | 1145 | if (ret) { |
1142 | ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " | 1146 | ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " |
1143 | "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p", | 1147 | "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " |
1144 | e_mr, pginfo, pginfo->type, pginfo->num_pages, | 1148 | "kpage=%p", e_mr, pginfo, pginfo->type, |
1145 | pginfo->num_4k,kpage); | 1149 | pginfo->num_kpages, pginfo->num_hwpages, kpage); |
1146 | goto ehca_rereg_mr_rereg1_exit1; | 1150 | goto ehca_rereg_mr_rereg1_exit1; |
1147 | } | 1151 | } |
1148 | rpage = virt_to_abs(kpage); | 1152 | rpage = virt_to_abs(kpage); |
@@ -1164,7 +1168,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1164 | "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr); | 1168 | "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr); |
1165 | *pginfo = pginfo_save; | 1169 | *pginfo = pginfo_save; |
1166 | ret = -EAGAIN; | 1170 | ret = -EAGAIN; |
1167 | } else if ((u64*)hipzout.vaddr != iova_start) { | 1171 | } else if ((u64 *)hipzout.vaddr != iova_start) { |
1168 | ehca_err(&shca->ib_device, "PHYP changed iova_start in " | 1172 | ehca_err(&shca->ib_device, "PHYP changed iova_start in " |
1169 | "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " | 1173 | "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " |
1170 | "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, | 1174 | "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, |
@@ -1176,11 +1180,11 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | |||
1176 | * successful reregistration | 1180 | * successful reregistration |
1177 | * note: start and start_out are identical for eServer HCAs | 1181 | * note: start and start_out are identical for eServer HCAs |
1178 | */ | 1182 | */ |
1179 | e_mr->num_pages = pginfo->num_pages; | 1183 | e_mr->num_kpages = pginfo->num_kpages; |
1180 | e_mr->num_4k = pginfo->num_4k; | 1184 | e_mr->num_hwpages = pginfo->num_hwpages; |
1181 | e_mr->start = iova_start; | 1185 | e_mr->start = iova_start; |
1182 | e_mr->size = size; | 1186 | e_mr->size = size; |
1183 | e_mr->acl = acl; | 1187 | e_mr->acl = acl; |
1184 | *lkey = hipzout.lkey; | 1188 | *lkey = hipzout.lkey; |
1185 | *rkey = hipzout.rkey; | 1189 | *rkey = hipzout.rkey; |
1186 | } | 1190 | } |
@@ -1190,9 +1194,9 @@ ehca_rereg_mr_rereg1_exit1: | |||
1190 | ehca_rereg_mr_rereg1_exit0: | 1194 | ehca_rereg_mr_rereg1_exit0: |
1191 | if ( ret && (ret != -EAGAIN) ) | 1195 | if ( ret && (ret != -EAGAIN) ) |
1192 | ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " | 1196 | ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " |
1193 | "pginfo=%p num_pages=%lx num_4k=%lx", | 1197 | "pginfo=%p num_kpages=%lx num_hwpages=%lx", |
1194 | ret, *lkey, *rkey, pginfo, pginfo->num_pages, | 1198 | ret, *lkey, *rkey, pginfo, pginfo->num_kpages, |
1195 | pginfo->num_4k); | 1199 | pginfo->num_hwpages); |
1196 | return ret; | 1200 | return ret; |
1197 | } /* end ehca_rereg_mr_rereg1() */ | 1201 | } /* end ehca_rereg_mr_rereg1() */ |
1198 | 1202 | ||
@@ -1214,10 +1218,12 @@ int ehca_rereg_mr(struct ehca_shca *shca, | |||
1214 | int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ | 1218 | int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ |
1215 | 1219 | ||
1216 | /* first determine reregistration hCall(s) */ | 1220 | /* first determine reregistration hCall(s) */ |
1217 | if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) || | 1221 | if ((pginfo->num_hwpages > MAX_RPAGES) || |
1218 | (pginfo->num_4k > e_mr->num_4k)) { | 1222 | (e_mr->num_hwpages > MAX_RPAGES) || |
1219 | ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx " | 1223 | (pginfo->num_hwpages > e_mr->num_hwpages)) { |
1220 | "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k); | 1224 | ehca_dbg(&shca->ib_device, "Rereg3 case, " |
1225 | "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", | ||
1226 | pginfo->num_hwpages, e_mr->num_hwpages); | ||
1221 | rereg_1_hcall = 0; | 1227 | rereg_1_hcall = 0; |
1222 | rereg_3_hcall = 1; | 1228 | rereg_3_hcall = 1; |
1223 | } | 1229 | } |
@@ -1253,7 +1259,7 @@ int ehca_rereg_mr(struct ehca_shca *shca, | |||
1253 | h_ret, e_mr, shca->ipz_hca_handle.handle, | 1259 | h_ret, e_mr, shca->ipz_hca_handle.handle, |
1254 | e_mr->ipz_mr_handle.handle, | 1260 | e_mr->ipz_mr_handle.handle, |
1255 | e_mr->ib.ib_mr.lkey); | 1261 | e_mr->ib.ib_mr.lkey); |
1256 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | 1262 | ret = ehca2ib_return_code(h_ret); |
1257 | goto ehca_rereg_mr_exit0; | 1263 | goto ehca_rereg_mr_exit0; |
1258 | } | 1264 | } |
1259 | /* clean ehca_mr_t, without changing struct ib_mr and lock */ | 1265 | /* clean ehca_mr_t, without changing struct ib_mr and lock */ |
@@ -1281,9 +1287,9 @@ ehca_rereg_mr_exit0: | |||
1281 | if (ret) | 1287 | if (ret) |
1282 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " | 1288 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " |
1283 | "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " | 1289 | "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " |
1284 | "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " | 1290 | "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " |
1285 | "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, | 1291 | "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, |
1286 | acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey, | 1292 | acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, |
1287 | rereg_1_hcall, rereg_3_hcall); | 1293 | rereg_1_hcall, rereg_3_hcall); |
1288 | return ret; | 1294 | return ret; |
1289 | } /* end ehca_rereg_mr() */ | 1295 | } /* end ehca_rereg_mr() */ |
@@ -1295,97 +1301,86 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca, | |||
1295 | { | 1301 | { |
1296 | int ret = 0; | 1302 | int ret = 0; |
1297 | u64 h_ret; | 1303 | u64 h_ret; |
1298 | int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */ | ||
1299 | int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */ | ||
1300 | struct ehca_pd *e_pd = | 1304 | struct ehca_pd *e_pd = |
1301 | container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); | 1305 | container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); |
1302 | struct ehca_mr save_fmr; | 1306 | struct ehca_mr save_fmr; |
1303 | u32 tmp_lkey, tmp_rkey; | 1307 | u32 tmp_lkey, tmp_rkey; |
1304 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | 1308 | struct ehca_mr_pginfo pginfo; |
1305 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | 1309 | struct ehca_mr_hipzout_parms hipzout; |
1310 | struct ehca_mr save_mr; | ||
1306 | 1311 | ||
1307 | /* first check if reregistration hCall can be used for unmap */ | 1312 | if (e_fmr->fmr_max_pages <= MAX_RPAGES) { |
1308 | if (e_fmr->fmr_max_pages > 512) { | ||
1309 | rereg_1_hcall = 0; | ||
1310 | rereg_3_hcall = 1; | ||
1311 | } | ||
1312 | |||
1313 | if (rereg_1_hcall) { | ||
1314 | /* | 1313 | /* |
1315 | * note: after using rereg hcall with len=0, | 1314 | * note: after using rereg hcall with len=0, |
1316 | * rereg hcall must be used again for registering pages | 1315 | * rereg hcall must be used again for registering pages |
1317 | */ | 1316 | */ |
1318 | h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, | 1317 | h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, |
1319 | 0, 0, e_pd->fw_pd, 0, &hipzout); | 1318 | 0, 0, e_pd->fw_pd, 0, &hipzout); |
1320 | if (h_ret != H_SUCCESS) { | 1319 | if (h_ret == H_SUCCESS) { |
1321 | /* | ||
1322 | * should not happen, because length checked above, | ||
1323 | * FMRs are not shared and no MW bound to FMRs | ||
1324 | */ | ||
1325 | ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " | ||
1326 | "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx " | ||
1327 | "mr_hndl=%lx lkey=%x lkey_out=%x", | ||
1328 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | ||
1329 | e_fmr->ipz_mr_handle.handle, | ||
1330 | e_fmr->ib.ib_fmr.lkey, hipzout.lkey); | ||
1331 | rereg_3_hcall = 1; | ||
1332 | } else { | ||
1333 | /* successful reregistration */ | 1320 | /* successful reregistration */ |
1334 | e_fmr->start = NULL; | 1321 | e_fmr->start = NULL; |
1335 | e_fmr->size = 0; | 1322 | e_fmr->size = 0; |
1336 | tmp_lkey = hipzout.lkey; | 1323 | tmp_lkey = hipzout.lkey; |
1337 | tmp_rkey = hipzout.rkey; | 1324 | tmp_rkey = hipzout.rkey; |
1325 | return 0; | ||
1338 | } | 1326 | } |
1327 | /* | ||
1328 | * should not happen, because length checked above, | ||
1329 | * FMRs are not shared and no MW bound to FMRs | ||
1330 | */ | ||
1331 | ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " | ||
1332 | "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx " | ||
1333 | "mr_hndl=%lx lkey=%x lkey_out=%x", | ||
1334 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | ||
1335 | e_fmr->ipz_mr_handle.handle, | ||
1336 | e_fmr->ib.ib_fmr.lkey, hipzout.lkey); | ||
1337 | /* try free and rereg */ | ||
1339 | } | 1338 | } |
1340 | 1339 | ||
1341 | if (rereg_3_hcall) { | 1340 | /* first free old FMR */ |
1342 | struct ehca_mr save_mr; | 1341 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); |
1343 | 1342 | if (h_ret != H_SUCCESS) { | |
1344 | /* first free old FMR */ | 1343 | ehca_err(&shca->ib_device, "hipz_free_mr failed, " |
1345 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); | 1344 | "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx " |
1346 | if (h_ret != H_SUCCESS) { | 1345 | "lkey=%x", |
1347 | ehca_err(&shca->ib_device, "hipz_free_mr failed, " | 1346 | h_ret, e_fmr, shca->ipz_hca_handle.handle, |
1348 | "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx " | 1347 | e_fmr->ipz_mr_handle.handle, |
1349 | "lkey=%x", | 1348 | e_fmr->ib.ib_fmr.lkey); |
1350 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | 1349 | ret = ehca2ib_return_code(h_ret); |
1351 | e_fmr->ipz_mr_handle.handle, | 1350 | goto ehca_unmap_one_fmr_exit0; |
1352 | e_fmr->ib.ib_fmr.lkey); | 1351 | } |
1353 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | 1352 | /* clean ehca_mr_t, without changing lock */ |
1354 | goto ehca_unmap_one_fmr_exit0; | 1353 | save_fmr = *e_fmr; |
1355 | } | 1354 | ehca_mr_deletenew(e_fmr); |
1356 | /* clean ehca_mr_t, without changing lock */ | 1355 | |
1357 | save_fmr = *e_fmr; | 1356 | /* set some MR values */ |
1358 | ehca_mr_deletenew(e_fmr); | 1357 | e_fmr->flags = save_fmr.flags; |
1359 | 1358 | e_fmr->fmr_page_size = save_fmr.fmr_page_size; | |
1360 | /* set some MR values */ | 1359 | e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; |
1361 | e_fmr->flags = save_fmr.flags; | 1360 | e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; |
1362 | e_fmr->fmr_page_size = save_fmr.fmr_page_size; | 1361 | e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; |
1363 | e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; | 1362 | e_fmr->acl = save_fmr.acl; |
1364 | e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; | 1363 | |
1365 | e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; | 1364 | memset(&pginfo, 0, sizeof(pginfo)); |
1366 | e_fmr->acl = save_fmr.acl; | 1365 | pginfo.type = EHCA_MR_PGI_FMR; |
1367 | 1366 | pginfo.num_kpages = 0; | |
1368 | pginfo.type = EHCA_MR_PGI_FMR; | 1367 | pginfo.num_hwpages = 0; |
1369 | pginfo.num_pages = 0; | 1368 | ret = ehca_reg_mr(shca, e_fmr, NULL, |
1370 | pginfo.num_4k = 0; | 1369 | (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), |
1371 | ret = ehca_reg_mr(shca, e_fmr, NULL, | 1370 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, |
1372 | (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), | 1371 | &tmp_rkey); |
1373 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, | 1372 | if (ret) { |
1374 | &tmp_rkey); | 1373 | u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; |
1375 | if (ret) { | 1374 | memcpy(&e_fmr->flags, &(save_mr.flags), |
1376 | u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; | 1375 | sizeof(struct ehca_mr) - offset); |
1377 | memcpy(&e_fmr->flags, &(save_mr.flags), | 1376 | goto ehca_unmap_one_fmr_exit0; |
1378 | sizeof(struct ehca_mr) - offset); | ||
1379 | goto ehca_unmap_one_fmr_exit0; | ||
1380 | } | ||
1381 | } | 1377 | } |
1382 | 1378 | ||
1383 | ehca_unmap_one_fmr_exit0: | 1379 | ehca_unmap_one_fmr_exit0: |
1384 | if (ret) | 1380 | if (ret) |
1385 | ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x " | 1381 | ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x " |
1386 | "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x", | 1382 | "fmr_max_pages=%x", |
1387 | ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages, | 1383 | ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages); |
1388 | rereg_1_hcall, rereg_3_hcall); | ||
1389 | return ret; | 1384 | return ret; |
1390 | } /* end ehca_unmap_one_fmr() */ | 1385 | } /* end ehca_unmap_one_fmr() */ |
1391 | 1386 | ||
@@ -1403,7 +1398,7 @@ int ehca_reg_smr(struct ehca_shca *shca, | |||
1403 | int ret = 0; | 1398 | int ret = 0; |
1404 | u64 h_ret; | 1399 | u64 h_ret; |
1405 | u32 hipz_acl; | 1400 | u32 hipz_acl; |
1406 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | 1401 | struct ehca_mr_hipzout_parms hipzout; |
1407 | 1402 | ||
1408 | ehca_mrmw_map_acl(acl, &hipz_acl); | 1403 | ehca_mrmw_map_acl(acl, &hipz_acl); |
1409 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | 1404 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); |
@@ -1419,15 +1414,15 @@ int ehca_reg_smr(struct ehca_shca *shca, | |||
1419 | shca->ipz_hca_handle.handle, | 1414 | shca->ipz_hca_handle.handle, |
1420 | e_origmr->ipz_mr_handle.handle, | 1415 | e_origmr->ipz_mr_handle.handle, |
1421 | e_origmr->ib.ib_mr.lkey); | 1416 | e_origmr->ib.ib_mr.lkey); |
1422 | ret = ehca_mrmw_map_hrc_reg_smr(h_ret); | 1417 | ret = ehca2ib_return_code(h_ret); |
1423 | goto ehca_reg_smr_exit0; | 1418 | goto ehca_reg_smr_exit0; |
1424 | } | 1419 | } |
1425 | /* successful registration */ | 1420 | /* successful registration */ |
1426 | e_newmr->num_pages = e_origmr->num_pages; | 1421 | e_newmr->num_kpages = e_origmr->num_kpages; |
1427 | e_newmr->num_4k = e_origmr->num_4k; | 1422 | e_newmr->num_hwpages = e_origmr->num_hwpages; |
1428 | e_newmr->start = iova_start; | 1423 | e_newmr->start = iova_start; |
1429 | e_newmr->size = e_origmr->size; | 1424 | e_newmr->size = e_origmr->size; |
1430 | e_newmr->acl = acl; | 1425 | e_newmr->acl = acl; |
1431 | e_newmr->ipz_mr_handle = hipzout.handle; | 1426 | e_newmr->ipz_mr_handle = hipzout.handle; |
1432 | *lkey = hipzout.lkey; | 1427 | *lkey = hipzout.lkey; |
1433 | *rkey = hipzout.rkey; | 1428 | *rkey = hipzout.rkey; |
@@ -1453,10 +1448,10 @@ int ehca_reg_internal_maxmr( | |||
1453 | struct ehca_mr *e_mr; | 1448 | struct ehca_mr *e_mr; |
1454 | u64 *iova_start; | 1449 | u64 *iova_start; |
1455 | u64 size_maxmr; | 1450 | u64 size_maxmr; |
1456 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | 1451 | struct ehca_mr_pginfo pginfo; |
1457 | struct ib_phys_buf ib_pbuf; | 1452 | struct ib_phys_buf ib_pbuf; |
1458 | u32 num_pages_mr; | 1453 | u32 num_kpages; |
1459 | u32 num_pages_4k; /* 4k portion "pages" */ | 1454 | u32 num_hwpages; |
1460 | 1455 | ||
1461 | e_mr = ehca_mr_new(); | 1456 | e_mr = ehca_mr_new(); |
1462 | if (!e_mr) { | 1457 | if (!e_mr) { |
@@ -1468,28 +1463,29 @@ int ehca_reg_internal_maxmr( | |||
1468 | 1463 | ||
1469 | /* register internal max-MR on HCA */ | 1464 | /* register internal max-MR on HCA */ |
1470 | size_maxmr = (u64)high_memory - PAGE_OFFSET; | 1465 | size_maxmr = (u64)high_memory - PAGE_OFFSET; |
1471 | iova_start = (u64*)KERNELBASE; | 1466 | iova_start = (u64 *)KERNELBASE; |
1472 | ib_pbuf.addr = 0; | 1467 | ib_pbuf.addr = 0; |
1473 | ib_pbuf.size = size_maxmr; | 1468 | ib_pbuf.size = size_maxmr; |
1474 | num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr + | 1469 | num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, |
1475 | PAGE_SIZE - 1) / PAGE_SIZE); | 1470 | PAGE_SIZE); |
1476 | num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr + | 1471 | num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr, |
1477 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | 1472 | EHCA_PAGESIZE); |
1478 | 1473 | ||
1479 | pginfo.type = EHCA_MR_PGI_PHYS; | 1474 | memset(&pginfo, 0, sizeof(pginfo)); |
1480 | pginfo.num_pages = num_pages_mr; | 1475 | pginfo.type = EHCA_MR_PGI_PHYS; |
1481 | pginfo.num_4k = num_pages_4k; | 1476 | pginfo.num_kpages = num_kpages; |
1482 | pginfo.num_phys_buf = 1; | 1477 | pginfo.num_hwpages = num_hwpages; |
1483 | pginfo.phys_buf_array = &ib_pbuf; | 1478 | pginfo.u.phy.num_phys_buf = 1; |
1479 | pginfo.u.phy.phys_buf_array = &ib_pbuf; | ||
1484 | 1480 | ||
1485 | ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, | 1481 | ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, |
1486 | &pginfo, &e_mr->ib.ib_mr.lkey, | 1482 | &pginfo, &e_mr->ib.ib_mr.lkey, |
1487 | &e_mr->ib.ib_mr.rkey); | 1483 | &e_mr->ib.ib_mr.rkey); |
1488 | if (ret) { | 1484 | if (ret) { |
1489 | ehca_err(&shca->ib_device, "reg of internal max MR failed, " | 1485 | ehca_err(&shca->ib_device, "reg of internal max MR failed, " |
1490 | "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x " | 1486 | "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " |
1491 | "num_pages_4k=%x", e_mr, iova_start, size_maxmr, | 1487 | "num_hwpages=%x", e_mr, iova_start, size_maxmr, |
1492 | num_pages_mr, num_pages_4k); | 1488 | num_kpages, num_hwpages); |
1493 | goto ehca_reg_internal_maxmr_exit1; | 1489 | goto ehca_reg_internal_maxmr_exit1; |
1494 | } | 1490 | } |
1495 | 1491 | ||
@@ -1524,7 +1520,7 @@ int ehca_reg_maxmr(struct ehca_shca *shca, | |||
1524 | u64 h_ret; | 1520 | u64 h_ret; |
1525 | struct ehca_mr *e_origmr = shca->maxmr; | 1521 | struct ehca_mr *e_origmr = shca->maxmr; |
1526 | u32 hipz_acl; | 1522 | u32 hipz_acl; |
1527 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | 1523 | struct ehca_mr_hipzout_parms hipzout; |
1528 | 1524 | ||
1529 | ehca_mrmw_map_acl(acl, &hipz_acl); | 1525 | ehca_mrmw_map_acl(acl, &hipz_acl); |
1530 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | 1526 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); |
@@ -1538,14 +1534,14 @@ int ehca_reg_maxmr(struct ehca_shca *shca, | |||
1538 | h_ret, e_origmr, shca->ipz_hca_handle.handle, | 1534 | h_ret, e_origmr, shca->ipz_hca_handle.handle, |
1539 | e_origmr->ipz_mr_handle.handle, | 1535 | e_origmr->ipz_mr_handle.handle, |
1540 | e_origmr->ib.ib_mr.lkey); | 1536 | e_origmr->ib.ib_mr.lkey); |
1541 | return ehca_mrmw_map_hrc_reg_smr(h_ret); | 1537 | return ehca2ib_return_code(h_ret); |
1542 | } | 1538 | } |
1543 | /* successful registration */ | 1539 | /* successful registration */ |
1544 | e_newmr->num_pages = e_origmr->num_pages; | 1540 | e_newmr->num_kpages = e_origmr->num_kpages; |
1545 | e_newmr->num_4k = e_origmr->num_4k; | 1541 | e_newmr->num_hwpages = e_origmr->num_hwpages; |
1546 | e_newmr->start = iova_start; | 1542 | e_newmr->start = iova_start; |
1547 | e_newmr->size = e_origmr->size; | 1543 | e_newmr->size = e_origmr->size; |
1548 | e_newmr->acl = acl; | 1544 | e_newmr->acl = acl; |
1549 | e_newmr->ipz_mr_handle = hipzout.handle; | 1545 | e_newmr->ipz_mr_handle = hipzout.handle; |
1550 | *lkey = hipzout.lkey; | 1546 | *lkey = hipzout.lkey; |
1551 | *rkey = hipzout.rkey; | 1547 | *rkey = hipzout.rkey; |
@@ -1677,299 +1673,187 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, | |||
1677 | 1673 | ||
1678 | /*----------------------------------------------------------------------*/ | 1674 | /*----------------------------------------------------------------------*/ |
1679 | 1675 | ||
1680 | /* setup page buffer from page info */ | 1676 | /* PAGE_SIZE >= pginfo->hwpage_size */ |
1681 | int ehca_set_pagebuf(struct ehca_mr *e_mr, | 1677 | static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, |
1682 | struct ehca_mr_pginfo *pginfo, | 1678 | u32 number, |
1683 | u32 number, | 1679 | u64 *kpage) |
1684 | u64 *kpage) | ||
1685 | { | 1680 | { |
1686 | int ret = 0; | 1681 | int ret = 0; |
1687 | struct ib_umem_chunk *prev_chunk; | 1682 | struct ib_umem_chunk *prev_chunk; |
1688 | struct ib_umem_chunk *chunk; | 1683 | struct ib_umem_chunk *chunk; |
1689 | struct ib_phys_buf *pbuf; | 1684 | u64 pgaddr; |
1690 | u64 *fmrlist; | ||
1691 | u64 num4k, pgaddr, offs4k; | ||
1692 | u32 i = 0; | 1685 | u32 i = 0; |
1693 | u32 j = 0; | 1686 | u32 j = 0; |
1694 | 1687 | ||
1695 | if (pginfo->type == EHCA_MR_PGI_PHYS) { | 1688 | /* loop over desired chunk entries */ |
1696 | /* loop over desired phys_buf_array entries */ | 1689 | chunk = pginfo->u.usr.next_chunk; |
1697 | while (i < number) { | 1690 | prev_chunk = pginfo->u.usr.next_chunk; |
1698 | pbuf = pginfo->phys_buf_array + pginfo->next_buf; | 1691 | list_for_each_entry_continue( |
1699 | num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size + | 1692 | chunk, (&(pginfo->u.usr.region->chunk_list)), list) { |
1700 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE; | 1693 | for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { |
1701 | offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; | 1694 | pgaddr = page_to_pfn(chunk->page_list[i].page) |
1702 | while (pginfo->next_4k < offs4k + num4k) { | 1695 | << PAGE_SHIFT ; |
1703 | /* sanity check */ | 1696 | *kpage = phys_to_abs(pgaddr + |
1704 | if ((pginfo->page_cnt >= pginfo->num_pages) || | 1697 | (pginfo->next_hwpage * |
1705 | (pginfo->page_4k_cnt >= pginfo->num_4k)) { | 1698 | EHCA_PAGESIZE)); |
1706 | ehca_gen_err("page_cnt >= num_pages, " | ||
1707 | "page_cnt=%lx " | ||
1708 | "num_pages=%lx " | ||
1709 | "page_4k_cnt=%lx " | ||
1710 | "num_4k=%lx i=%x", | ||
1711 | pginfo->page_cnt, | ||
1712 | pginfo->num_pages, | ||
1713 | pginfo->page_4k_cnt, | ||
1714 | pginfo->num_4k, i); | ||
1715 | ret = -EFAULT; | ||
1716 | goto ehca_set_pagebuf_exit0; | ||
1717 | } | ||
1718 | *kpage = phys_to_abs( | ||
1719 | (pbuf->addr & EHCA_PAGEMASK) | ||
1720 | + (pginfo->next_4k * EHCA_PAGESIZE)); | ||
1721 | if ( !(*kpage) && pbuf->addr ) { | ||
1722 | ehca_gen_err("pbuf->addr=%lx " | ||
1723 | "pbuf->size=%lx " | ||
1724 | "next_4k=%lx", pbuf->addr, | ||
1725 | pbuf->size, | ||
1726 | pginfo->next_4k); | ||
1727 | ret = -EFAULT; | ||
1728 | goto ehca_set_pagebuf_exit0; | ||
1729 | } | ||
1730 | (pginfo->page_4k_cnt)++; | ||
1731 | (pginfo->next_4k)++; | ||
1732 | if (pginfo->next_4k % | ||
1733 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) | ||
1734 | (pginfo->page_cnt)++; | ||
1735 | kpage++; | ||
1736 | i++; | ||
1737 | if (i >= number) break; | ||
1738 | } | ||
1739 | if (pginfo->next_4k >= offs4k + num4k) { | ||
1740 | (pginfo->next_buf)++; | ||
1741 | pginfo->next_4k = 0; | ||
1742 | } | ||
1743 | } | ||
1744 | } else if (pginfo->type == EHCA_MR_PGI_USER) { | ||
1745 | /* loop over desired chunk entries */ | ||
1746 | chunk = pginfo->next_chunk; | ||
1747 | prev_chunk = pginfo->next_chunk; | ||
1748 | list_for_each_entry_continue(chunk, | ||
1749 | (&(pginfo->region->chunk_list)), | ||
1750 | list) { | ||
1751 | for (i = pginfo->next_nmap; i < chunk->nmap; ) { | ||
1752 | pgaddr = ( page_to_pfn(chunk->page_list[i].page) | ||
1753 | << PAGE_SHIFT ); | ||
1754 | *kpage = phys_to_abs(pgaddr + | ||
1755 | (pginfo->next_4k * | ||
1756 | EHCA_PAGESIZE)); | ||
1757 | if ( !(*kpage) ) { | ||
1758 | ehca_gen_err("pgaddr=%lx " | ||
1759 | "chunk->page_list[i]=%lx " | ||
1760 | "i=%x next_4k=%lx mr=%p", | ||
1761 | pgaddr, | ||
1762 | (u64)sg_dma_address( | ||
1763 | &chunk-> | ||
1764 | page_list[i]), | ||
1765 | i, pginfo->next_4k, e_mr); | ||
1766 | ret = -EFAULT; | ||
1767 | goto ehca_set_pagebuf_exit0; | ||
1768 | } | ||
1769 | (pginfo->page_4k_cnt)++; | ||
1770 | (pginfo->next_4k)++; | ||
1771 | kpage++; | ||
1772 | if (pginfo->next_4k % | ||
1773 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) { | ||
1774 | (pginfo->page_cnt)++; | ||
1775 | (pginfo->next_nmap)++; | ||
1776 | pginfo->next_4k = 0; | ||
1777 | i++; | ||
1778 | } | ||
1779 | j++; | ||
1780 | if (j >= number) break; | ||
1781 | } | ||
1782 | if ((pginfo->next_nmap >= chunk->nmap) && | ||
1783 | (j >= number)) { | ||
1784 | pginfo->next_nmap = 0; | ||
1785 | prev_chunk = chunk; | ||
1786 | break; | ||
1787 | } else if (pginfo->next_nmap >= chunk->nmap) { | ||
1788 | pginfo->next_nmap = 0; | ||
1789 | prev_chunk = chunk; | ||
1790 | } else if (j >= number) | ||
1791 | break; | ||
1792 | else | ||
1793 | prev_chunk = chunk; | ||
1794 | } | ||
1795 | pginfo->next_chunk = | ||
1796 | list_prepare_entry(prev_chunk, | ||
1797 | (&(pginfo->region->chunk_list)), | ||
1798 | list); | ||
1799 | } else if (pginfo->type == EHCA_MR_PGI_FMR) { | ||
1800 | /* loop over desired page_list entries */ | ||
1801 | fmrlist = pginfo->page_list + pginfo->next_listelem; | ||
1802 | for (i = 0; i < number; i++) { | ||
1803 | *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + | ||
1804 | pginfo->next_4k * EHCA_PAGESIZE); | ||
1805 | if ( !(*kpage) ) { | 1699 | if ( !(*kpage) ) { |
1806 | ehca_gen_err("*fmrlist=%lx fmrlist=%p " | 1700 | ehca_gen_err("pgaddr=%lx " |
1807 | "next_listelem=%lx next_4k=%lx", | 1701 | "chunk->page_list[i]=%lx " |
1808 | *fmrlist, fmrlist, | 1702 | "i=%x next_hwpage=%lx", |
1809 | pginfo->next_listelem, | 1703 | pgaddr, (u64)sg_dma_address( |
1810 | pginfo->next_4k); | 1704 | &chunk->page_list[i]), |
1811 | ret = -EFAULT; | 1705 | i, pginfo->next_hwpage); |
1812 | goto ehca_set_pagebuf_exit0; | 1706 | return -EFAULT; |
1813 | } | 1707 | } |
1814 | (pginfo->page_4k_cnt)++; | 1708 | (pginfo->hwpage_cnt)++; |
1815 | (pginfo->next_4k)++; | 1709 | (pginfo->next_hwpage)++; |
1816 | kpage++; | 1710 | kpage++; |
1817 | if (pginfo->next_4k % | 1711 | if (pginfo->next_hwpage % |
1818 | (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { | 1712 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) { |
1819 | (pginfo->page_cnt)++; | 1713 | (pginfo->kpage_cnt)++; |
1820 | (pginfo->next_listelem)++; | 1714 | (pginfo->u.usr.next_nmap)++; |
1821 | fmrlist++; | 1715 | pginfo->next_hwpage = 0; |
1822 | pginfo->next_4k = 0; | 1716 | i++; |
1823 | } | 1717 | } |
1718 | j++; | ||
1719 | if (j >= number) break; | ||
1824 | } | 1720 | } |
1825 | } else { | 1721 | if ((pginfo->u.usr.next_nmap >= chunk->nmap) && |
1826 | ehca_gen_err("bad pginfo->type=%x", pginfo->type); | 1722 | (j >= number)) { |
1827 | ret = -EFAULT; | 1723 | pginfo->u.usr.next_nmap = 0; |
1828 | goto ehca_set_pagebuf_exit0; | 1724 | prev_chunk = chunk; |
1725 | break; | ||
1726 | } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { | ||
1727 | pginfo->u.usr.next_nmap = 0; | ||
1728 | prev_chunk = chunk; | ||
1729 | } else if (j >= number) | ||
1730 | break; | ||
1731 | else | ||
1732 | prev_chunk = chunk; | ||
1829 | } | 1733 | } |
1830 | 1734 | pginfo->u.usr.next_chunk = | |
1831 | ehca_set_pagebuf_exit0: | 1735 | list_prepare_entry(prev_chunk, |
1832 | if (ret) | 1736 | (&(pginfo->u.usr.region->chunk_list)), |
1833 | ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " | 1737 | list); |
1834 | "num_4k=%lx next_buf=%lx next_4k=%lx number=%x " | ||
1835 | "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x " | ||
1836 | "next_listelem=%lx region=%p next_chunk=%p " | ||
1837 | "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type, | ||
1838 | pginfo->num_pages, pginfo->num_4k, | ||
1839 | pginfo->next_buf, pginfo->next_4k, number, kpage, | ||
1840 | pginfo->page_cnt, pginfo->page_4k_cnt, i, | ||
1841 | pginfo->next_listelem, pginfo->region, | ||
1842 | pginfo->next_chunk, pginfo->next_nmap); | ||
1843 | return ret; | 1738 | return ret; |
1844 | } /* end ehca_set_pagebuf() */ | 1739 | } |
1845 | |||
1846 | /*----------------------------------------------------------------------*/ | ||
1847 | 1740 | ||
1848 | /* setup 1 page from page info page buffer */ | 1741 | int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, |
1849 | int ehca_set_pagebuf_1(struct ehca_mr *e_mr, | 1742 | u32 number, |
1850 | struct ehca_mr_pginfo *pginfo, | 1743 | u64 *kpage) |
1851 | u64 *rpage) | ||
1852 | { | 1744 | { |
1853 | int ret = 0; | 1745 | int ret = 0; |
1854 | struct ib_phys_buf *tmp_pbuf; | 1746 | struct ib_phys_buf *pbuf; |
1855 | u64 *fmrlist; | 1747 | u64 num_hw, offs_hw; |
1856 | struct ib_umem_chunk *chunk; | 1748 | u32 i = 0; |
1857 | struct ib_umem_chunk *prev_chunk; | 1749 | |
1858 | u64 pgaddr, num4k, offs4k; | 1750 | /* loop over desired phys_buf_array entries */ |
1859 | 1751 | while (i < number) { | |
1860 | if (pginfo->type == EHCA_MR_PGI_PHYS) { | 1752 | pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf; |
1861 | /* sanity check */ | 1753 | num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) + |
1862 | if ((pginfo->page_cnt >= pginfo->num_pages) || | 1754 | pbuf->size, EHCA_PAGESIZE); |
1863 | (pginfo->page_4k_cnt >= pginfo->num_4k)) { | 1755 | offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; |
1864 | ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx " | 1756 | while (pginfo->next_hwpage < offs_hw + num_hw) { |
1865 | "num_pages=%lx page_4k_cnt=%lx num_4k=%lx", | 1757 | /* sanity check */ |
1866 | pginfo->page_cnt, pginfo->num_pages, | 1758 | if ((pginfo->kpage_cnt >= pginfo->num_kpages) || |
1867 | pginfo->page_4k_cnt, pginfo->num_4k); | 1759 | (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { |
1868 | ret = -EFAULT; | 1760 | ehca_gen_err("kpage_cnt >= num_kpages, " |
1869 | goto ehca_set_pagebuf_1_exit0; | 1761 | "kpage_cnt=%lx num_kpages=%lx " |
1870 | } | 1762 | "hwpage_cnt=%lx " |
1871 | tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf; | 1763 | "num_hwpages=%lx i=%x", |
1872 | num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size + | 1764 | pginfo->kpage_cnt, |
1873 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE; | 1765 | pginfo->num_kpages, |
1874 | offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; | 1766 | pginfo->hwpage_cnt, |
1875 | *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) + | 1767 | pginfo->num_hwpages, i); |
1876 | (pginfo->next_4k * EHCA_PAGESIZE)); | 1768 | return -EFAULT; |
1877 | if ( !(*rpage) && tmp_pbuf->addr ) { | ||
1878 | ehca_gen_err("tmp_pbuf->addr=%lx" | ||
1879 | " tmp_pbuf->size=%lx next_4k=%lx", | ||
1880 | tmp_pbuf->addr, tmp_pbuf->size, | ||
1881 | pginfo->next_4k); | ||
1882 | ret = -EFAULT; | ||
1883 | goto ehca_set_pagebuf_1_exit0; | ||
1884 | } | ||
1885 | (pginfo->page_4k_cnt)++; | ||
1886 | (pginfo->next_4k)++; | ||
1887 | if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0) | ||
1888 | (pginfo->page_cnt)++; | ||
1889 | if (pginfo->next_4k >= offs4k + num4k) { | ||
1890 | (pginfo->next_buf)++; | ||
1891 | pginfo->next_4k = 0; | ||
1892 | } | ||
1893 | } else if (pginfo->type == EHCA_MR_PGI_USER) { | ||
1894 | chunk = pginfo->next_chunk; | ||
1895 | prev_chunk = pginfo->next_chunk; | ||
1896 | list_for_each_entry_continue(chunk, | ||
1897 | (&(pginfo->region->chunk_list)), | ||
1898 | list) { | ||
1899 | pgaddr = ( page_to_pfn(chunk->page_list[ | ||
1900 | pginfo->next_nmap].page) | ||
1901 | << PAGE_SHIFT); | ||
1902 | *rpage = phys_to_abs(pgaddr + | ||
1903 | (pginfo->next_4k * EHCA_PAGESIZE)); | ||
1904 | if ( !(*rpage) ) { | ||
1905 | ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx" | ||
1906 | " next_nmap=%lx next_4k=%lx mr=%p", | ||
1907 | pgaddr, (u64)sg_dma_address( | ||
1908 | &chunk->page_list[ | ||
1909 | pginfo-> | ||
1910 | next_nmap]), | ||
1911 | pginfo->next_nmap, pginfo->next_4k, | ||
1912 | e_mr); | ||
1913 | ret = -EFAULT; | ||
1914 | goto ehca_set_pagebuf_1_exit0; | ||
1915 | } | ||
1916 | (pginfo->page_4k_cnt)++; | ||
1917 | (pginfo->next_4k)++; | ||
1918 | if (pginfo->next_4k % | ||
1919 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) { | ||
1920 | (pginfo->page_cnt)++; | ||
1921 | (pginfo->next_nmap)++; | ||
1922 | pginfo->next_4k = 0; | ||
1923 | } | 1769 | } |
1924 | if (pginfo->next_nmap >= chunk->nmap) { | 1770 | *kpage = phys_to_abs( |
1925 | pginfo->next_nmap = 0; | 1771 | (pbuf->addr & EHCA_PAGEMASK) |
1926 | prev_chunk = chunk; | 1772 | + (pginfo->next_hwpage * EHCA_PAGESIZE)); |
1773 | if ( !(*kpage) && pbuf->addr ) { | ||
1774 | ehca_gen_err("pbuf->addr=%lx " | ||
1775 | "pbuf->size=%lx " | ||
1776 | "next_hwpage=%lx", pbuf->addr, | ||
1777 | pbuf->size, | ||
1778 | pginfo->next_hwpage); | ||
1779 | return -EFAULT; | ||
1927 | } | 1780 | } |
1928 | break; | 1781 | (pginfo->hwpage_cnt)++; |
1782 | (pginfo->next_hwpage)++; | ||
1783 | if (pginfo->next_hwpage % | ||
1784 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) | ||
1785 | (pginfo->kpage_cnt)++; | ||
1786 | kpage++; | ||
1787 | i++; | ||
1788 | if (i >= number) break; | ||
1789 | } | ||
1790 | if (pginfo->next_hwpage >= offs_hw + num_hw) { | ||
1791 | (pginfo->u.phy.next_buf)++; | ||
1792 | pginfo->next_hwpage = 0; | ||
1929 | } | 1793 | } |
1930 | pginfo->next_chunk = | 1794 | } |
1931 | list_prepare_entry(prev_chunk, | 1795 | return ret; |
1932 | (&(pginfo->region->chunk_list)), | 1796 | } |
1933 | list); | 1797 | |
1934 | } else if (pginfo->type == EHCA_MR_PGI_FMR) { | 1798 | int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, |
1935 | fmrlist = pginfo->page_list + pginfo->next_listelem; | 1799 | u32 number, |
1936 | *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + | 1800 | u64 *kpage) |
1937 | pginfo->next_4k * EHCA_PAGESIZE); | 1801 | { |
1938 | if ( !(*rpage) ) { | 1802 | int ret = 0; |
1803 | u64 *fmrlist; | ||
1804 | u32 i; | ||
1805 | |||
1806 | /* loop over desired page_list entries */ | ||
1807 | fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; | ||
1808 | for (i = 0; i < number; i++) { | ||
1809 | *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + | ||
1810 | pginfo->next_hwpage * EHCA_PAGESIZE); | ||
1811 | if ( !(*kpage) ) { | ||
1939 | ehca_gen_err("*fmrlist=%lx fmrlist=%p " | 1812 | ehca_gen_err("*fmrlist=%lx fmrlist=%p " |
1940 | "next_listelem=%lx next_4k=%lx", | 1813 | "next_listelem=%lx next_hwpage=%lx", |
1941 | *fmrlist, fmrlist, pginfo->next_listelem, | 1814 | *fmrlist, fmrlist, |
1942 | pginfo->next_4k); | 1815 | pginfo->u.fmr.next_listelem, |
1943 | ret = -EFAULT; | 1816 | pginfo->next_hwpage); |
1944 | goto ehca_set_pagebuf_1_exit0; | 1817 | return -EFAULT; |
1945 | } | 1818 | } |
1946 | (pginfo->page_4k_cnt)++; | 1819 | (pginfo->hwpage_cnt)++; |
1947 | (pginfo->next_4k)++; | 1820 | (pginfo->next_hwpage)++; |
1948 | if (pginfo->next_4k % | 1821 | kpage++; |
1949 | (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { | 1822 | if (pginfo->next_hwpage % |
1950 | (pginfo->page_cnt)++; | 1823 | (pginfo->u.fmr.fmr_pgsize / EHCA_PAGESIZE) == 0) { |
1951 | (pginfo->next_listelem)++; | 1824 | (pginfo->kpage_cnt)++; |
1952 | pginfo->next_4k = 0; | 1825 | (pginfo->u.fmr.next_listelem)++; |
1826 | fmrlist++; | ||
1827 | pginfo->next_hwpage = 0; | ||
1953 | } | 1828 | } |
1954 | } else { | 1829 | } |
1830 | return ret; | ||
1831 | } | ||
1832 | |||
1833 | /* setup page buffer from page info */ | ||
1834 | int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo, | ||
1835 | u32 number, | ||
1836 | u64 *kpage) | ||
1837 | { | ||
1838 | int ret; | ||
1839 | |||
1840 | switch (pginfo->type) { | ||
1841 | case EHCA_MR_PGI_PHYS: | ||
1842 | ret = ehca_set_pagebuf_phys(pginfo, number, kpage); | ||
1843 | break; | ||
1844 | case EHCA_MR_PGI_USER: | ||
1845 | ret = ehca_set_pagebuf_user1(pginfo, number, kpage); | ||
1846 | break; | ||
1847 | case EHCA_MR_PGI_FMR: | ||
1848 | ret = ehca_set_pagebuf_fmr(pginfo, number, kpage); | ||
1849 | break; | ||
1850 | default: | ||
1955 | ehca_gen_err("bad pginfo->type=%x", pginfo->type); | 1851 | ehca_gen_err("bad pginfo->type=%x", pginfo->type); |
1956 | ret = -EFAULT; | 1852 | ret = -EFAULT; |
1957 | goto ehca_set_pagebuf_1_exit0; | 1853 | break; |
1958 | } | 1854 | } |
1959 | |||
1960 | ehca_set_pagebuf_1_exit0: | ||
1961 | if (ret) | ||
1962 | ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " | ||
1963 | "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p " | ||
1964 | "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx " | ||
1965 | "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr, | ||
1966 | pginfo, pginfo->type, pginfo->num_pages, | ||
1967 | pginfo->num_4k, pginfo->next_buf, pginfo->next_4k, | ||
1968 | rpage, pginfo->page_cnt, pginfo->page_4k_cnt, | ||
1969 | pginfo->next_listelem, pginfo->region, | ||
1970 | pginfo->next_chunk, pginfo->next_nmap); | ||
1971 | return ret; | 1855 | return ret; |
1972 | } /* end ehca_set_pagebuf_1() */ | 1856 | } /* end ehca_set_pagebuf() */ |
1973 | 1857 | ||
1974 | /*----------------------------------------------------------------------*/ | 1858 | /*----------------------------------------------------------------------*/ |
1975 | 1859 | ||
@@ -1982,7 +1866,7 @@ int ehca_mr_is_maxmr(u64 size, | |||
1982 | { | 1866 | { |
1983 | /* a MR is treated as max-MR only if it fits following: */ | 1867 | /* a MR is treated as max-MR only if it fits following: */ |
1984 | if ((size == ((u64)high_memory - PAGE_OFFSET)) && | 1868 | if ((size == ((u64)high_memory - PAGE_OFFSET)) && |
1985 | (iova_start == (void*)KERNELBASE)) { | 1869 | (iova_start == (void *)KERNELBASE)) { |
1986 | ehca_gen_dbg("this is a max-MR"); | 1870 | ehca_gen_dbg("this is a max-MR"); |
1987 | return 1; | 1871 | return 1; |
1988 | } else | 1872 | } else |
@@ -2042,196 +1926,23 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, | |||
2042 | /*----------------------------------------------------------------------*/ | 1926 | /*----------------------------------------------------------------------*/ |
2043 | 1927 | ||
2044 | /* | 1928 | /* |
2045 | * map HIPZ rc to IB retcodes for MR/MW allocations | ||
2046 | * Used for hipz_mr_reg_alloc and hipz_mw_alloc. | ||
2047 | */ | ||
2048 | int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc) | ||
2049 | { | ||
2050 | switch (hipz_rc) { | ||
2051 | case H_SUCCESS: /* successful completion */ | ||
2052 | return 0; | ||
2053 | case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ | ||
2054 | case H_CONSTRAINED: /* resource constraint */ | ||
2055 | case H_NO_MEM: | ||
2056 | return -ENOMEM; | ||
2057 | case H_BUSY: /* long busy */ | ||
2058 | return -EBUSY; | ||
2059 | default: | ||
2060 | return -EINVAL; | ||
2061 | } | ||
2062 | } /* end ehca_mrmw_map_hrc_alloc() */ | ||
2063 | |||
2064 | /*----------------------------------------------------------------------*/ | ||
2065 | |||
2066 | /* | ||
2067 | * map HIPZ rc to IB retcodes for MR register rpage | ||
2068 | * Used for hipz_h_register_rpage_mr at registering last page | ||
2069 | */ | ||
2070 | int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc) | ||
2071 | { | ||
2072 | switch (hipz_rc) { | ||
2073 | case H_SUCCESS: /* registration complete */ | ||
2074 | return 0; | ||
2075 | case H_PAGE_REGISTERED: /* page registered */ | ||
2076 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
2077 | case H_RH_PARM: /* invalid resource handle */ | ||
2078 | /* case H_QT_PARM: invalid queue type */ | ||
2079 | case H_PARAMETER: /* | ||
2080 | * invalid logical address, | ||
2081 | * or count zero or greater 512 | ||
2082 | */ | ||
2083 | case H_TABLE_FULL: /* page table full */ | ||
2084 | case H_HARDWARE: /* HCA not operational */ | ||
2085 | return -EINVAL; | ||
2086 | case H_BUSY: /* long busy */ | ||
2087 | return -EBUSY; | ||
2088 | default: | ||
2089 | return -EINVAL; | ||
2090 | } | ||
2091 | } /* end ehca_mrmw_map_hrc_rrpg_last() */ | ||
2092 | |||
2093 | /*----------------------------------------------------------------------*/ | ||
2094 | |||
2095 | /* | ||
2096 | * map HIPZ rc to IB retcodes for MR register rpage | ||
2097 | * Used for hipz_h_register_rpage_mr at registering one page, but not last page | ||
2098 | */ | ||
2099 | int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc) | ||
2100 | { | ||
2101 | switch (hipz_rc) { | ||
2102 | case H_PAGE_REGISTERED: /* page registered */ | ||
2103 | return 0; | ||
2104 | case H_SUCCESS: /* registration complete */ | ||
2105 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
2106 | case H_RH_PARM: /* invalid resource handle */ | ||
2107 | /* case H_QT_PARM: invalid queue type */ | ||
2108 | case H_PARAMETER: /* | ||
2109 | * invalid logical address, | ||
2110 | * or count zero or greater 512 | ||
2111 | */ | ||
2112 | case H_TABLE_FULL: /* page table full */ | ||
2113 | case H_HARDWARE: /* HCA not operational */ | ||
2114 | return -EINVAL; | ||
2115 | case H_BUSY: /* long busy */ | ||
2116 | return -EBUSY; | ||
2117 | default: | ||
2118 | return -EINVAL; | ||
2119 | } | ||
2120 | } /* end ehca_mrmw_map_hrc_rrpg_notlast() */ | ||
2121 | |||
2122 | /*----------------------------------------------------------------------*/ | ||
2123 | |||
2124 | /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */ | ||
2125 | int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc) | ||
2126 | { | ||
2127 | switch (hipz_rc) { | ||
2128 | case H_SUCCESS: /* successful completion */ | ||
2129 | return 0; | ||
2130 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
2131 | case H_RH_PARM: /* invalid resource handle */ | ||
2132 | return -EINVAL; | ||
2133 | case H_BUSY: /* long busy */ | ||
2134 | return -EBUSY; | ||
2135 | default: | ||
2136 | return -EINVAL; | ||
2137 | } | ||
2138 | } /* end ehca_mrmw_map_hrc_query_mr() */ | ||
2139 | |||
2140 | /*----------------------------------------------------------------------*/ | ||
2141 | /*----------------------------------------------------------------------*/ | ||
2142 | |||
2143 | /* | ||
2144 | * map HIPZ rc to IB retcodes for freeing MR resource | ||
2145 | * Used for hipz_h_free_resource_mr | ||
2146 | */ | ||
2147 | int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc) | ||
2148 | { | ||
2149 | switch (hipz_rc) { | ||
2150 | case H_SUCCESS: /* resource freed */ | ||
2151 | return 0; | ||
2152 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
2153 | case H_RH_PARM: /* invalid resource handle */ | ||
2154 | case H_R_STATE: /* invalid resource state */ | ||
2155 | case H_HARDWARE: /* HCA not operational */ | ||
2156 | return -EINVAL; | ||
2157 | case H_RESOURCE: /* Resource in use */ | ||
2158 | case H_BUSY: /* long busy */ | ||
2159 | return -EBUSY; | ||
2160 | default: | ||
2161 | return -EINVAL; | ||
2162 | } | ||
2163 | } /* end ehca_mrmw_map_hrc_free_mr() */ | ||
2164 | |||
2165 | /*----------------------------------------------------------------------*/ | ||
2166 | |||
2167 | /* | ||
2168 | * map HIPZ rc to IB retcodes for freeing MW resource | ||
2169 | * Used for hipz_h_free_resource_mw | ||
2170 | */ | ||
2171 | int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc) | ||
2172 | { | ||
2173 | switch (hipz_rc) { | ||
2174 | case H_SUCCESS: /* resource freed */ | ||
2175 | return 0; | ||
2176 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
2177 | case H_RH_PARM: /* invalid resource handle */ | ||
2178 | case H_R_STATE: /* invalid resource state */ | ||
2179 | case H_HARDWARE: /* HCA not operational */ | ||
2180 | return -EINVAL; | ||
2181 | case H_RESOURCE: /* Resource in use */ | ||
2182 | case H_BUSY: /* long busy */ | ||
2183 | return -EBUSY; | ||
2184 | default: | ||
2185 | return -EINVAL; | ||
2186 | } | ||
2187 | } /* end ehca_mrmw_map_hrc_free_mw() */ | ||
2188 | |||
2189 | /*----------------------------------------------------------------------*/ | ||
2190 | |||
2191 | /* | ||
2192 | * map HIPZ rc to IB retcodes for SMR registrations | ||
2193 | * Used for hipz_h_register_smr. | ||
2194 | */ | ||
2195 | int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc) | ||
2196 | { | ||
2197 | switch (hipz_rc) { | ||
2198 | case H_SUCCESS: /* successful completion */ | ||
2199 | return 0; | ||
2200 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
2201 | case H_RH_PARM: /* invalid resource handle */ | ||
2202 | case H_MEM_PARM: /* invalid MR virtual address */ | ||
2203 | case H_MEM_ACCESS_PARM: /* invalid access controls */ | ||
2204 | case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ | ||
2205 | return -EINVAL; | ||
2206 | case H_BUSY: /* long busy */ | ||
2207 | return -EBUSY; | ||
2208 | default: | ||
2209 | return -EINVAL; | ||
2210 | } | ||
2211 | } /* end ehca_mrmw_map_hrc_reg_smr() */ | ||
2212 | |||
2213 | /*----------------------------------------------------------------------*/ | ||
2214 | |||
2215 | /* | ||
2216 | * MR destructor and constructor | 1929 | * MR destructor and constructor |
2217 | * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, | 1930 | * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, |
2218 | * except struct ib_mr and spinlock | 1931 | * except struct ib_mr and spinlock |
2219 | */ | 1932 | */ |
2220 | void ehca_mr_deletenew(struct ehca_mr *mr) | 1933 | void ehca_mr_deletenew(struct ehca_mr *mr) |
2221 | { | 1934 | { |
2222 | mr->flags = 0; | 1935 | mr->flags = 0; |
2223 | mr->num_pages = 0; | 1936 | mr->num_kpages = 0; |
2224 | mr->num_4k = 0; | 1937 | mr->num_hwpages = 0; |
2225 | mr->acl = 0; | 1938 | mr->acl = 0; |
2226 | mr->start = NULL; | 1939 | mr->start = NULL; |
2227 | mr->fmr_page_size = 0; | 1940 | mr->fmr_page_size = 0; |
2228 | mr->fmr_max_pages = 0; | 1941 | mr->fmr_max_pages = 0; |
2229 | mr->fmr_max_maps = 0; | 1942 | mr->fmr_max_maps = 0; |
2230 | mr->fmr_map_cnt = 0; | 1943 | mr->fmr_map_cnt = 0; |
2231 | memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); | 1944 | memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); |
2232 | memset(&mr->galpas, 0, sizeof(mr->galpas)); | 1945 | memset(&mr->galpas, 0, sizeof(mr->galpas)); |
2233 | mr->nr_of_pages = 0; | ||
2234 | mr->pagearray = NULL; | ||
2235 | } /* end ehca_mr_deletenew() */ | 1946 | } /* end ehca_mr_deletenew() */ |
2236 | 1947 | ||
2237 | int ehca_init_mrmw_cache(void) | 1948 | int ehca_init_mrmw_cache(void) |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h index d936e40a5748..24f13fe3708b 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.h +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h | |||
@@ -101,15 +101,10 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, | |||
101 | u64 *page_list, | 101 | u64 *page_list, |
102 | int list_len); | 102 | int list_len); |
103 | 103 | ||
104 | int ehca_set_pagebuf(struct ehca_mr *e_mr, | 104 | int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo, |
105 | struct ehca_mr_pginfo *pginfo, | ||
106 | u32 number, | 105 | u32 number, |
107 | u64 *kpage); | 106 | u64 *kpage); |
108 | 107 | ||
109 | int ehca_set_pagebuf_1(struct ehca_mr *e_mr, | ||
110 | struct ehca_mr_pginfo *pginfo, | ||
111 | u64 *rpage); | ||
112 | |||
113 | int ehca_mr_is_maxmr(u64 size, | 108 | int ehca_mr_is_maxmr(u64 size, |
114 | u64 *iova_start); | 109 | u64 *iova_start); |
115 | 110 | ||
@@ -121,20 +116,6 @@ void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl); | |||
121 | void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, | 116 | void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, |
122 | int *ib_acl); | 117 | int *ib_acl); |
123 | 118 | ||
124 | int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc); | ||
125 | |||
126 | int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc); | ||
127 | |||
128 | int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc); | ||
129 | |||
130 | int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc); | ||
131 | |||
132 | int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc); | ||
133 | |||
134 | int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc); | ||
135 | |||
136 | int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc); | ||
137 | |||
138 | void ehca_mr_deletenew(struct ehca_mr *mr); | 119 | void ehca_mr_deletenew(struct ehca_mr *mr); |
139 | 120 | ||
140 | #endif /*_EHCA_MRMW_H_*/ | 121 | #endif /*_EHCA_MRMW_H_*/ |
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h index 8707d297ce4c..818803057ebf 100644 --- a/drivers/infiniband/hw/ehca/ehca_qes.h +++ b/drivers/infiniband/hw/ehca/ehca_qes.h | |||
@@ -53,13 +53,13 @@ struct ehca_vsgentry { | |||
53 | u32 length; | 53 | u32 length; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7) | 56 | #define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7) |
57 | #define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3) | 57 | #define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3) |
58 | #define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12) | 58 | #define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12) |
59 | #define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31) | 59 | #define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31) |
60 | #define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47) | 60 | #define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47) |
61 | #define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55) | 61 | #define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55) |
62 | #define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63) | 62 | #define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63) |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Unreliable Datagram Address Vector Format | 65 | * Unreliable Datagram Address Vector Format |
@@ -206,10 +206,10 @@ struct ehca_wqe { | |||
206 | 206 | ||
207 | }; | 207 | }; |
208 | 208 | ||
209 | #define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0) | 209 | #define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0) |
210 | #define WC_IMM_DATA EHCA_BMASK_IBM(1,1) | 210 | #define WC_IMM_DATA EHCA_BMASK_IBM(1, 1) |
211 | #define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2) | 211 | #define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2) |
212 | #define WC_SE_BIT EHCA_BMASK_IBM(3,3) | 212 | #define WC_SE_BIT EHCA_BMASK_IBM(3, 3) |
213 | #define WC_STATUS_ERROR_BIT 0x80000000 | 213 | #define WC_STATUS_ERROR_BIT 0x80000000 |
214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 | 214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 |
215 | #define WC_STATUS_PURGE_BIT 0x10 | 215 | #define WC_STATUS_PURGE_BIT 0x10 |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 74671250303f..48e9ceacd6fa 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -602,10 +602,10 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd, | |||
602 | /* UD circumvention */ | 602 | /* UD circumvention */ |
603 | parms.act_nr_send_sges -= 2; | 603 | parms.act_nr_send_sges -= 2; |
604 | parms.act_nr_recv_sges -= 2; | 604 | parms.act_nr_recv_sges -= 2; |
605 | swqe_size = offsetof(struct ehca_wqe, | 605 | swqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[ |
606 | u.ud_av.sg_list[parms.act_nr_send_sges]); | 606 | parms.act_nr_send_sges]); |
607 | rwqe_size = offsetof(struct ehca_wqe, | 607 | rwqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[ |
608 | u.ud_av.sg_list[parms.act_nr_recv_sges]); | 608 | parms.act_nr_recv_sges]); |
609 | } | 609 | } |
610 | 610 | ||
611 | if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) { | 611 | if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) { |
@@ -690,8 +690,8 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd, | |||
690 | if (my_qp->send_cq) { | 690 | if (my_qp->send_cq) { |
691 | ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp); | 691 | ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp); |
692 | if (ret) { | 692 | if (ret) { |
693 | ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x", | 693 | ehca_err(pd->device, |
694 | ret); | 694 | "Couldn't assign qp to send_cq ret=%x", ret); |
695 | goto create_qp_exit4; | 695 | goto create_qp_exit4; |
696 | } | 696 | } |
697 | } | 697 | } |
@@ -749,7 +749,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
749 | struct ehca_qp *ret; | 749 | struct ehca_qp *ret; |
750 | 750 | ||
751 | ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0); | 751 | ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0); |
752 | return IS_ERR(ret) ? (struct ib_qp *) ret : &ret->ib_qp; | 752 | return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp; |
753 | } | 753 | } |
754 | 754 | ||
755 | int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | 755 | int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, |
@@ -780,7 +780,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd, | |||
780 | 780 | ||
781 | my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1); | 781 | my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1); |
782 | if (IS_ERR(my_qp)) | 782 | if (IS_ERR(my_qp)) |
783 | return (struct ib_srq *) my_qp; | 783 | return (struct ib_srq *)my_qp; |
784 | 784 | ||
785 | /* copy back return values */ | 785 | /* copy back return values */ |
786 | srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr; | 786 | srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr; |
@@ -875,7 +875,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
875 | my_qp, qp_num, h_ret); | 875 | my_qp, qp_num, h_ret); |
876 | return ehca2ib_return_code(h_ret); | 876 | return ehca2ib_return_code(h_ret); |
877 | } | 877 | } |
878 | bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63))); | 878 | bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63))); |
879 | ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", | 879 | ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", |
880 | qp_num, bad_send_wqe_p); | 880 | qp_num, bad_send_wqe_p); |
881 | /* convert wqe pointer to vadr */ | 881 | /* convert wqe pointer to vadr */ |
@@ -890,7 +890,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
890 | } | 890 | } |
891 | 891 | ||
892 | /* loop sets wqe's purge bit */ | 892 | /* loop sets wqe's purge bit */ |
893 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); | 893 | wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); |
894 | *bad_wqe_cnt = 0; | 894 | *bad_wqe_cnt = 0; |
895 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | 895 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { |
896 | if (ehca_debug_level) | 896 | if (ehca_debug_level) |
@@ -898,7 +898,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
898 | wqe->nr_of_data_seg = 0; /* suppress data access */ | 898 | wqe->nr_of_data_seg = 0; /* suppress data access */ |
899 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | 899 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ |
900 | q_ofs = ipz_queue_advance_offset(squeue, q_ofs); | 900 | q_ofs = ipz_queue_advance_offset(squeue, q_ofs); |
901 | wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); | 901 | wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); |
902 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; | 902 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; |
903 | } | 903 | } |
904 | /* | 904 | /* |
@@ -1003,7 +1003,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1003 | goto modify_qp_exit1; | 1003 | goto modify_qp_exit1; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x " | 1006 | ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x " |
1007 | "new qp_state=%x attribute_mask=%x", | 1007 | "new qp_state=%x attribute_mask=%x", |
1008 | my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask); | 1008 | my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask); |
1009 | 1009 | ||
@@ -1019,7 +1019,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1019 | goto modify_qp_exit1; | 1019 | goto modify_qp_exit1; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state))) | 1022 | mqpcb->qp_state = ib2ehca_qp_state(qp_new_state); |
1023 | if (mqpcb->qp_state) | ||
1023 | update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); | 1024 | update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); |
1024 | else { | 1025 | else { |
1025 | ret = -EINVAL; | 1026 | ret = -EINVAL; |
@@ -1077,7 +1078,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1077 | spin_lock_irqsave(&my_qp->spinlock_s, flags); | 1078 | spin_lock_irqsave(&my_qp->spinlock_s, flags); |
1078 | squeue_locked = 1; | 1079 | squeue_locked = 1; |
1079 | /* mark next free wqe */ | 1080 | /* mark next free wqe */ |
1080 | wqe = (struct ehca_wqe*) | 1081 | wqe = (struct ehca_wqe *) |
1081 | ipz_qeit_get(&my_qp->ipz_squeue); | 1082 | ipz_qeit_get(&my_qp->ipz_squeue); |
1082 | wqe->optype = wqe->wqef = 0xff; | 1083 | wqe->optype = wqe->wqef = 0xff; |
1083 | ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p", | 1084 | ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p", |
@@ -1312,7 +1313,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1312 | if (h_ret != H_SUCCESS) { | 1313 | if (h_ret != H_SUCCESS) { |
1313 | ret = ehca2ib_return_code(h_ret); | 1314 | ret = ehca2ib_return_code(h_ret); |
1314 | ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx " | 1315 | ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx " |
1315 | "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num); | 1316 | "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); |
1316 | goto modify_qp_exit2; | 1317 | goto modify_qp_exit2; |
1317 | } | 1318 | } |
1318 | 1319 | ||
@@ -1411,7 +1412,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1411 | } | 1412 | } |
1412 | 1413 | ||
1413 | if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { | 1414 | if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { |
1414 | ehca_err(qp->device,"Invalid attribute mask " | 1415 | ehca_err(qp->device, "Invalid attribute mask " |
1415 | "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", | 1416 | "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", |
1416 | my_qp, qp->qp_num, qp_attr_mask); | 1417 | my_qp, qp->qp_num, qp_attr_mask); |
1417 | return -EINVAL; | 1418 | return -EINVAL; |
@@ -1419,7 +1420,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1419 | 1420 | ||
1420 | qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); | 1421 | qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
1421 | if (!qpcb) { | 1422 | if (!qpcb) { |
1422 | ehca_err(qp->device,"Out of memory for qpcb " | 1423 | ehca_err(qp->device, "Out of memory for qpcb " |
1423 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); | 1424 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); |
1424 | return -ENOMEM; | 1425 | return -ENOMEM; |
1425 | } | 1426 | } |
@@ -1431,7 +1432,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1431 | 1432 | ||
1432 | if (h_ret != H_SUCCESS) { | 1433 | if (h_ret != H_SUCCESS) { |
1433 | ret = ehca2ib_return_code(h_ret); | 1434 | ret = ehca2ib_return_code(h_ret); |
1434 | ehca_err(qp->device,"hipz_h_query_qp() failed " | 1435 | ehca_err(qp->device, "hipz_h_query_qp() failed " |
1435 | "ehca_qp=%p qp_num=%x h_ret=%lx", | 1436 | "ehca_qp=%p qp_num=%x h_ret=%lx", |
1436 | my_qp, qp->qp_num, h_ret); | 1437 | my_qp, qp->qp_num, h_ret); |
1437 | goto query_qp_exit1; | 1438 | goto query_qp_exit1; |
@@ -1442,7 +1443,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1442 | 1443 | ||
1443 | if (qp_attr->cur_qp_state == -EINVAL) { | 1444 | if (qp_attr->cur_qp_state == -EINVAL) { |
1444 | ret = -EINVAL; | 1445 | ret = -EINVAL; |
1445 | ehca_err(qp->device,"Got invalid ehca_qp_state=%x " | 1446 | ehca_err(qp->device, "Got invalid ehca_qp_state=%x " |
1446 | "ehca_qp=%p qp_num=%x", | 1447 | "ehca_qp=%p qp_num=%x", |
1447 | qpcb->qp_state, my_qp, qp->qp_num); | 1448 | qpcb->qp_state, my_qp, qp->qp_num); |
1448 | goto query_qp_exit1; | 1449 | goto query_qp_exit1; |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 61da65e6e5e0..94eed70fedf5 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -79,7 +79,8 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, | |||
79 | } | 79 | } |
80 | 80 | ||
81 | if (ehca_debug_level) { | 81 | if (ehca_debug_level) { |
82 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue); | 82 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", |
83 | ipz_rqueue); | ||
83 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); | 84 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); |
84 | } | 85 | } |
85 | 86 | ||
@@ -99,7 +100,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | |||
99 | struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; | 100 | struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; |
100 | struct ib_sge *sge = send_wr->sg_list; | 101 | struct ib_sge *sge = send_wr->sg_list; |
101 | ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " | 102 | ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " |
102 | "send_flags=%x opcode=%x",idx, send_wr->wr_id, | 103 | "send_flags=%x opcode=%x", idx, send_wr->wr_id, |
103 | send_wr->num_sge, send_wr->send_flags, | 104 | send_wr->num_sge, send_wr->send_flags, |
104 | send_wr->opcode); | 105 | send_wr->opcode); |
105 | if (mad_hdr) { | 106 | if (mad_hdr) { |
@@ -116,7 +117,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | |||
116 | mad_hdr->attr_mod); | 117 | mad_hdr->attr_mod); |
117 | } | 118 | } |
118 | for (j = 0; j < send_wr->num_sge; j++) { | 119 | for (j = 0; j < send_wr->num_sge; j++) { |
119 | u8 *data = (u8 *) abs_to_virt(sge->addr); | 120 | u8 *data = (u8 *)abs_to_virt(sge->addr); |
120 | ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " | 121 | ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " |
121 | "lkey=%x", | 122 | "lkey=%x", |
122 | idx, j, data, sge->length, sge->lkey); | 123 | idx, j, data, sge->length, sge->lkey); |
@@ -534,9 +535,11 @@ poll_cq_one_read_cqe: | |||
534 | 535 | ||
535 | cqe_count++; | 536 | cqe_count++; |
536 | if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { | 537 | if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { |
537 | struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number); | 538 | struct ehca_qp *qp; |
538 | int purgeflag; | 539 | int purgeflag; |
539 | unsigned long flags; | 540 | unsigned long flags; |
541 | |||
542 | qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); | ||
540 | if (!qp) { | 543 | if (!qp) { |
541 | ehca_err(cq->device, "cq_num=%x qp_num=%x " | 544 | ehca_err(cq->device, "cq_num=%x qp_num=%x " |
542 | "could not find qp -> ignore cqe", | 545 | "could not find qp -> ignore cqe", |
@@ -551,8 +554,8 @@ poll_cq_one_read_cqe: | |||
551 | spin_unlock_irqrestore(&qp->spinlock_s, flags); | 554 | spin_unlock_irqrestore(&qp->spinlock_s, flags); |
552 | 555 | ||
553 | if (purgeflag) { | 556 | if (purgeflag) { |
554 | ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x " | 557 | ehca_dbg(cq->device, |
555 | "src_qp=%x", | 558 | "Got CQE with purged bit qp_num=%x src_qp=%x", |
556 | cqe->local_qp_number, cqe->remote_qp_number); | 559 | cqe->local_qp_number, cqe->remote_qp_number); |
557 | if (ehca_debug_level) | 560 | if (ehca_debug_level) |
558 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", | 561 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", |
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h index 03b185f873da..678b81391861 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/infiniband/hw/ehca/ehca_tools.h | |||
@@ -93,14 +93,14 @@ extern int ehca_debug_level; | |||
93 | #define ehca_gen_dbg(format, arg...) \ | 93 | #define ehca_gen_dbg(format, arg...) \ |
94 | do { \ | 94 | do { \ |
95 | if (unlikely(ehca_debug_level)) \ | 95 | if (unlikely(ehca_debug_level)) \ |
96 | printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\ | 96 | printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \ |
97 | get_paca()->paca_index, __FUNCTION__, ## arg); \ | 97 | get_paca()->paca_index, __FUNCTION__, ## arg); \ |
98 | } while (0) | 98 | } while (0) |
99 | 99 | ||
100 | #define ehca_gen_warn(format, arg...) \ | 100 | #define ehca_gen_warn(format, arg...) \ |
101 | do { \ | 101 | do { \ |
102 | if (unlikely(ehca_debug_level)) \ | 102 | if (unlikely(ehca_debug_level)) \ |
103 | printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\ | 103 | printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \ |
104 | get_paca()->paca_index, __FUNCTION__, ## arg); \ | 104 | get_paca()->paca_index, __FUNCTION__, ## arg); \ |
105 | } while (0) | 105 | } while (0) |
106 | 106 | ||
@@ -114,12 +114,12 @@ extern int ehca_debug_level; | |||
114 | * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex> | 114 | * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex> |
115 | */ | 115 | */ |
116 | #define ehca_dmp(adr, len, format, args...) \ | 116 | #define ehca_dmp(adr, len, format, args...) \ |
117 | do { \ | 117 | do { \ |
118 | unsigned int x; \ | 118 | unsigned int x; \ |
119 | unsigned int l = (unsigned int)(len); \ | 119 | unsigned int l = (unsigned int)(len); \ |
120 | unsigned char *deb = (unsigned char*)(adr); \ | 120 | unsigned char *deb = (unsigned char *)(adr); \ |
121 | for (x = 0; x < l; x += 16) { \ | 121 | for (x = 0; x < l; x += 16) { \ |
122 | printk("EHCA_DMP:%s " format \ | 122 | printk(KERN_INFO "EHCA_DMP:%s " format \ |
123 | " adr=%p ofs=%04x %016lx %016lx\n", \ | 123 | " adr=%p ofs=%04x %016lx %016lx\n", \ |
124 | __FUNCTION__, ##args, deb, x, \ | 124 | __FUNCTION__, ##args, deb, x, \ |
125 | *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ | 125 | *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ |
@@ -128,16 +128,16 @@ extern int ehca_debug_level; | |||
128 | } while (0) | 128 | } while (0) |
129 | 129 | ||
130 | /* define a bitmask, little endian version */ | 130 | /* define a bitmask, little endian version */ |
131 | #define EHCA_BMASK(pos,length) (((pos)<<16)+(length)) | 131 | #define EHCA_BMASK(pos, length) (((pos) << 16) + (length)) |
132 | 132 | ||
133 | /* define a bitmask, the ibm way... */ | 133 | /* define a bitmask, the ibm way... */ |
134 | #define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1)) | 134 | #define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1)) |
135 | 135 | ||
136 | /* internal function, don't use */ | 136 | /* internal function, don't use */ |
137 | #define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff) | 137 | #define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff) |
138 | 138 | ||
139 | /* internal function, don't use */ | 139 | /* internal function, don't use */ |
140 | #define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff)) | 140 | #define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff)) |
141 | 141 | ||
142 | /** | 142 | /** |
143 | * EHCA_BMASK_SET - return value shifted and masked by mask | 143 | * EHCA_BMASK_SET - return value shifted and masked by mask |
@@ -145,14 +145,14 @@ extern int ehca_debug_level; | |||
145 | * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask | 145 | * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask |
146 | * in variable | 146 | * in variable |
147 | */ | 147 | */ |
148 | #define EHCA_BMASK_SET(mask,value) \ | 148 | #define EHCA_BMASK_SET(mask, value) \ |
149 | ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask)) | 149 | ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask)) |
150 | 150 | ||
151 | /** | 151 | /** |
152 | * EHCA_BMASK_GET - extract a parameter from value by mask | 152 | * EHCA_BMASK_GET - extract a parameter from value by mask |
153 | */ | 153 | */ |
154 | #define EHCA_BMASK_GET(mask,value) \ | 154 | #define EHCA_BMASK_GET(mask, value) \ |
155 | (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask))) | 155 | (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask))) |
156 | 156 | ||
157 | 157 | ||
158 | /* Converts ehca to ib return code */ | 158 | /* Converts ehca to ib return code */ |
@@ -161,8 +161,11 @@ static inline int ehca2ib_return_code(u64 ehca_rc) | |||
161 | switch (ehca_rc) { | 161 | switch (ehca_rc) { |
162 | case H_SUCCESS: | 162 | case H_SUCCESS: |
163 | return 0; | 163 | return 0; |
164 | case H_RESOURCE: /* Resource in use */ | ||
164 | case H_BUSY: | 165 | case H_BUSY: |
165 | return -EBUSY; | 166 | return -EBUSY; |
167 | case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ | ||
168 | case H_CONSTRAINED: /* resource constraint */ | ||
166 | case H_NO_MEM: | 169 | case H_NO_MEM: |
167 | return -ENOMEM; | 170 | return -ENOMEM; |
168 | default: | 171 | default: |
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c index 3031b3bb56f9..05c415744e3b 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c | |||
@@ -70,7 +70,7 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context) | |||
70 | 70 | ||
71 | static void ehca_mm_open(struct vm_area_struct *vma) | 71 | static void ehca_mm_open(struct vm_area_struct *vma) |
72 | { | 72 | { |
73 | u32 *count = (u32*)vma->vm_private_data; | 73 | u32 *count = (u32 *)vma->vm_private_data; |
74 | if (!count) { | 74 | if (!count) { |
75 | ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", | 75 | ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", |
76 | vma->vm_start, vma->vm_end); | 76 | vma->vm_start, vma->vm_end); |
@@ -86,7 +86,7 @@ static void ehca_mm_open(struct vm_area_struct *vma) | |||
86 | 86 | ||
87 | static void ehca_mm_close(struct vm_area_struct *vma) | 87 | static void ehca_mm_close(struct vm_area_struct *vma) |
88 | { | 88 | { |
89 | u32 *count = (u32*)vma->vm_private_data; | 89 | u32 *count = (u32 *)vma->vm_private_data; |
90 | if (!count) { | 90 | if (!count) { |
91 | ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", | 91 | ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", |
92 | vma->vm_start, vma->vm_end); | 92 | vma->vm_start, vma->vm_end); |
@@ -215,7 +215,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
215 | case 2: /* qp rqueue_addr */ | 215 | case 2: /* qp rqueue_addr */ |
216 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", | 216 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", |
217 | qp->ib_qp.qp_num); | 217 | qp->ib_qp.qp_num); |
218 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue); | 218 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, |
219 | &qp->mm_count_rqueue); | ||
219 | if (unlikely(ret)) { | 220 | if (unlikely(ret)) { |
220 | ehca_err(qp->ib_qp.device, | 221 | ehca_err(qp->ib_qp.device, |
221 | "ehca_mmap_queue(rq) failed rc=%x qp_num=%x", | 222 | "ehca_mmap_queue(rq) failed rc=%x qp_num=%x", |
@@ -227,7 +228,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
227 | case 3: /* qp squeue_addr */ | 228 | case 3: /* qp squeue_addr */ |
228 | ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", | 229 | ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", |
229 | qp->ib_qp.qp_num); | 230 | qp->ib_qp.qp_num); |
230 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue); | 231 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, |
232 | &qp->mm_count_squeue); | ||
231 | if (unlikely(ret)) { | 233 | if (unlikely(ret)) { |
232 | ehca_err(qp->ib_qp.device, | 234 | ehca_err(qp->ib_qp.device, |
233 | "ehca_mmap_queue(sq) failed rc=%x qp_num=%x", | 235 | "ehca_mmap_queue(sq) failed rc=%x qp_num=%x", |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 4776a8b0feec..3394e05f4b4f 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
@@ -501,8 +501,8 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, | |||
501 | return H_PARAMETER; | 501 | return H_PARAMETER; |
502 | } | 502 | } |
503 | 503 | ||
504 | return hipz_h_register_rpage(adapter_handle,pagesize,queue_type, | 504 | return hipz_h_register_rpage(adapter_handle, pagesize, queue_type, |
505 | qp_handle.handle,logical_address_of_page, | 505 | qp_handle.handle, logical_address_of_page, |
506 | count); | 506 | count); |
507 | } | 507 | } |
508 | 508 | ||
@@ -522,9 +522,9 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, | |||
522 | qp_handle.handle, /* r6 */ | 522 | qp_handle.handle, /* r6 */ |
523 | 0, 0, 0, 0, 0, 0); | 523 | 0, 0, 0, 0, 0, 0); |
524 | if (log_addr_next_sq_wqe2processed) | 524 | if (log_addr_next_sq_wqe2processed) |
525 | *log_addr_next_sq_wqe2processed = (void*)outs[0]; | 525 | *log_addr_next_sq_wqe2processed = (void *)outs[0]; |
526 | if (log_addr_next_rq_wqe2processed) | 526 | if (log_addr_next_rq_wqe2processed) |
527 | *log_addr_next_rq_wqe2processed = (void*)outs[1]; | 527 | *log_addr_next_rq_wqe2processed = (void *)outs[1]; |
528 | 528 | ||
529 | return ret; | 529 | return ret; |
530 | } | 530 | } |
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c index 0b1a4772c78a..214821095cb1 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c | |||
@@ -50,7 +50,7 @@ int hcall_map_page(u64 physaddr, u64 *mapaddr) | |||
50 | 50 | ||
51 | int hcall_unmap_page(u64 mapaddr) | 51 | int hcall_unmap_page(u64 mapaddr) |
52 | { | 52 | { |
53 | iounmap((volatile void __iomem*)mapaddr); | 53 | iounmap((volatile void __iomem *) mapaddr); |
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h index 20898a153446..868735fd3187 100644 --- a/drivers/infiniband/hw/ehca/hipz_fns_core.h +++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h | |||
@@ -53,10 +53,10 @@ | |||
53 | #define hipz_galpa_load_cq(gal, offset) \ | 53 | #define hipz_galpa_load_cq(gal, offset) \ |
54 | hipz_galpa_load(gal, CQTEMM_OFFSET(offset)) | 54 | hipz_galpa_load(gal, CQTEMM_OFFSET(offset)) |
55 | 55 | ||
56 | #define hipz_galpa_store_qp(gal,offset, value) \ | 56 | #define hipz_galpa_store_qp(gal, offset, value) \ |
57 | hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value) | 57 | hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value) |
58 | #define hipz_galpa_load_qp(gal, offset) \ | 58 | #define hipz_galpa_load_qp(gal, offset) \ |
59 | hipz_galpa_load(gal,QPTEMM_OFFSET(offset)) | 59 | hipz_galpa_load(gal, QPTEMM_OFFSET(offset)) |
60 | 60 | ||
61 | static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) | 61 | static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) |
62 | { | 62 | { |
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h index dad6dea5636b..d9739e554515 100644 --- a/drivers/infiniband/hw/ehca/hipz_hw.h +++ b/drivers/infiniband/hw/ehca/hipz_hw.h | |||
@@ -161,11 +161,11 @@ struct hipz_qptemm { | |||
161 | /* 0x1000 */ | 161 | /* 0x1000 */ |
162 | }; | 162 | }; |
163 | 163 | ||
164 | #define QPX_SQADDER EHCA_BMASK_IBM(48,63) | 164 | #define QPX_SQADDER EHCA_BMASK_IBM(48, 63) |
165 | #define QPX_RQADDER EHCA_BMASK_IBM(48,63) | 165 | #define QPX_RQADDER EHCA_BMASK_IBM(48, 63) |
166 | #define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3,3) | 166 | #define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3) |
167 | 167 | ||
168 | #define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x) | 168 | #define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x) |
169 | 169 | ||
170 | /* MRMWPT Entry Memory Map */ | 170 | /* MRMWPT Entry Memory Map */ |
171 | struct hipz_mrmwmm { | 171 | struct hipz_mrmwmm { |
@@ -187,7 +187,7 @@ struct hipz_mrmwmm { | |||
187 | 187 | ||
188 | }; | 188 | }; |
189 | 189 | ||
190 | #define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x) | 190 | #define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x) |
191 | 191 | ||
192 | struct hipz_qpedmm { | 192 | struct hipz_qpedmm { |
193 | /* 0x00 */ | 193 | /* 0x00 */ |
@@ -238,7 +238,7 @@ struct hipz_qpedmm { | |||
238 | u64 qpedx_rrva3; | 238 | u64 qpedx_rrva3; |
239 | }; | 239 | }; |
240 | 240 | ||
241 | #define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x) | 241 | #define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x) |
242 | 242 | ||
243 | /* CQ Table Entry Memory Map */ | 243 | /* CQ Table Entry Memory Map */ |
244 | struct hipz_cqtemm { | 244 | struct hipz_cqtemm { |
@@ -263,12 +263,12 @@ struct hipz_cqtemm { | |||
263 | /* 0x1000 */ | 263 | /* 0x1000 */ |
264 | }; | 264 | }; |
265 | 265 | ||
266 | #define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63) | 266 | #define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63) |
267 | #define CQX_FECADDER EHCA_BMASK_IBM(32,63) | 267 | #define CQX_FECADDER EHCA_BMASK_IBM(32, 63) |
268 | #define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0) | 268 | #define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0) |
269 | #define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0) | 269 | #define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0) |
270 | 270 | ||
271 | #define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x) | 271 | #define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x) |
272 | 272 | ||
273 | /* EQ Table Entry Memory Map */ | 273 | /* EQ Table Entry Memory Map */ |
274 | struct hipz_eqtemm { | 274 | struct hipz_eqtemm { |
@@ -293,7 +293,7 @@ struct hipz_eqtemm { | |||
293 | 293 | ||
294 | }; | 294 | }; |
295 | 295 | ||
296 | #define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x) | 296 | #define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x) |
297 | 297 | ||
298 | /* access control defines for MR/MW */ | 298 | /* access control defines for MR/MW */ |
299 | #define HIPZ_ACCESSCTRL_L_WRITE 0x00800000 | 299 | #define HIPZ_ACCESSCTRL_L_WRITE 0x00800000 |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index bf7a40088f61..9606f13ed092 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c | |||
@@ -114,7 +114,7 @@ int ipz_queue_ctor(struct ipz_queue *queue, | |||
114 | */ | 114 | */ |
115 | f = 0; | 115 | f = 0; |
116 | while (f < nr_of_pages) { | 116 | while (f < nr_of_pages) { |
117 | u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); | 117 | u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL); |
118 | int k; | 118 | int k; |
119 | if (!kpage) | 119 | if (!kpage) |
120 | goto ipz_queue_ctor_exit0; /*NOMEM*/ | 120 | goto ipz_queue_ctor_exit0; /*NOMEM*/ |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h index 007f0882fd40..39a4f64aff41 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h | |||
@@ -240,7 +240,7 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue); | |||
240 | static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) | 240 | static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) |
241 | { | 241 | { |
242 | void *ret = ipz_qeit_get(queue); | 242 | void *ret = ipz_qeit_get(queue); |
243 | u32 qe = *(u8 *) ret; | 243 | u32 qe = *(u8 *)ret; |
244 | if ((qe >> 7) != (queue->toggle_state & 1)) | 244 | if ((qe >> 7) != (queue->toggle_state & 1)) |
245 | return NULL; | 245 | return NULL; |
246 | ipz_qeit_eq_get_inc(queue); /* this is a good one */ | 246 | ipz_qeit_eq_get_inc(queue); /* this is a good one */ |
@@ -250,7 +250,7 @@ static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) | |||
250 | static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue) | 250 | static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue) |
251 | { | 251 | { |
252 | void *ret = ipz_qeit_get(queue); | 252 | void *ret = ipz_qeit_get(queue); |
253 | u32 qe = *(u8 *) ret; | 253 | u32 qe = *(u8 *)ret; |
254 | if ((qe >> 7) != (queue->toggle_state & 1)) | 254 | if ((qe >> 7) != (queue->toggle_state & 1)) |
255 | return NULL; | 255 | return NULL; |
256 | return ret; | 256 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 9361f5ab8bd6..09c5fd84b1e3 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -1889,7 +1889,7 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno, | |||
1889 | /* Below is "non-zero" to force override, but both actual LEDs are off */ | 1889 | /* Below is "non-zero" to force override, but both actual LEDs are off */ |
1890 | #define LED_OVER_BOTH_OFF (8) | 1890 | #define LED_OVER_BOTH_OFF (8) |
1891 | 1891 | ||
1892 | void ipath_run_led_override(unsigned long opaque) | 1892 | static void ipath_run_led_override(unsigned long opaque) |
1893 | { | 1893 | { |
1894 | struct ipath_devdata *dd = (struct ipath_devdata *)opaque; | 1894 | struct ipath_devdata *dd = (struct ipath_devdata *)opaque; |
1895 | int timeoff; | 1895 | int timeoff; |
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index 6b9147964a4f..b4503e9c1e95 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c | |||
@@ -426,8 +426,8 @@ bail: | |||
426 | * @buffer: data to write | 426 | * @buffer: data to write |
427 | * @len: number of bytes to write | 427 | * @len: number of bytes to write |
428 | */ | 428 | */ |
429 | int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset, | 429 | static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset, |
430 | const void *buffer, int len) | 430 | const void *buffer, int len) |
431 | { | 431 | { |
432 | u8 single_byte; | 432 | u8 single_byte; |
433 | int sub_len; | 433 | int sub_len; |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 47aa43428fbf..1fd91c59f246 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -70,7 +70,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum) | |||
70 | * If rewrite is true, and bits are set in the sendbufferror registers, | 70 | * If rewrite is true, and bits are set in the sendbufferror registers, |
71 | * we'll write to the buffer, for error recovery on parity errors. | 71 | * we'll write to the buffer, for error recovery on parity errors. |
72 | */ | 72 | */ |
73 | void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) | 73 | static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) |
74 | { | 74 | { |
75 | u32 piobcnt; | 75 | u32 piobcnt; |
76 | unsigned long sbuf[4]; | 76 | unsigned long sbuf[4]; |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 3105005fc9d2..ace63ef78e6f 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -776,7 +776,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *); | |||
776 | int ipath_update_eeprom_log(struct ipath_devdata *dd); | 776 | int ipath_update_eeprom_log(struct ipath_devdata *dd); |
777 | void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); | 777 | void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); |
778 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); | 778 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); |
779 | void ipath_disarm_senderrbufs(struct ipath_devdata *, int); | ||
780 | 779 | ||
781 | /* | 780 | /* |
782 | * Set LED override, only the two LSBs have "public" meaning, but | 781 | * Set LED override, only the two LSBs have "public" meaning, but |
@@ -820,7 +819,6 @@ static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data) | |||
820 | #define IPATH_MDIO_CTRL_8355_REG_10 0x1D | 819 | #define IPATH_MDIO_CTRL_8355_REG_10 0x1D |
821 | 820 | ||
822 | int ipath_get_user_pages(unsigned long, size_t, struct page **); | 821 | int ipath_get_user_pages(unsigned long, size_t, struct page **); |
823 | int ipath_get_user_pages_nocopy(unsigned long, struct page **); | ||
824 | void ipath_release_user_pages(struct page **, size_t); | 822 | void ipath_release_user_pages(struct page **, size_t); |
825 | void ipath_release_user_pages_on_close(struct page **, size_t); | 823 | void ipath_release_user_pages_on_close(struct page **, size_t); |
826 | int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int); | 824 | int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int); |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 85256747d8a1..c69c25239443 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -507,7 +507,7 @@ static int want_buffer(struct ipath_devdata *dd) | |||
507 | * | 507 | * |
508 | * Called when we run out of PIO buffers. | 508 | * Called when we run out of PIO buffers. |
509 | */ | 509 | */ |
510 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | 510 | static void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) |
511 | { | 511 | { |
512 | unsigned long flags; | 512 | unsigned long flags; |
513 | 513 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 27034d38b3dd..0190edc8044e 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -171,32 +171,6 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, | |||
171 | return ret; | 171 | return ret; |
172 | } | 172 | } |
173 | 173 | ||
174 | /** | ||
175 | * ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared | ||
176 | * @start_page: the page to lock | ||
177 | * @p: the output page structure | ||
178 | * | ||
179 | * This is similar to ipath_get_user_pages, but it's always one page, and we | ||
180 | * mark the page as locked for I/O, and shared. This is used for the user | ||
181 | * process page that contains the destination address for the rcvhdrq tail | ||
182 | * update, so we need to have the vma. If we don't do this, the page can be | ||
183 | * taken away from us on fork, even if the child never touches it, and then | ||
184 | * the user process never sees the tail register updates. | ||
185 | */ | ||
186 | int ipath_get_user_pages_nocopy(unsigned long page, struct page **p) | ||
187 | { | ||
188 | struct vm_area_struct *vma; | ||
189 | int ret; | ||
190 | |||
191 | down_write(¤t->mm->mmap_sem); | ||
192 | |||
193 | ret = __get_user_pages(page, 1, p, &vma); | ||
194 | |||
195 | up_write(¤t->mm->mmap_sem); | ||
196 | |||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | void ipath_release_user_pages(struct page **p, size_t num_pages) | 174 | void ipath_release_user_pages(struct page **p, size_t num_pages) |
201 | { | 175 | { |
202 | down_write(¤t->mm->mmap_sem); | 176 | down_write(¤t->mm->mmap_sem); |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 65f7181e9cf8..16aa61fd8085 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -488,7 +488,7 @@ bail:; | |||
488 | * This is called from ipath_do_rcv_timer() at interrupt level to check for | 488 | * This is called from ipath_do_rcv_timer() at interrupt level to check for |
489 | * QPs which need retransmits and to collect performance numbers. | 489 | * QPs which need retransmits and to collect performance numbers. |
490 | */ | 490 | */ |
491 | void ipath_ib_timer(struct ipath_ibdev *dev) | 491 | static void ipath_ib_timer(struct ipath_ibdev *dev) |
492 | { | 492 | { |
493 | struct ipath_qp *resend = NULL; | 493 | struct ipath_qp *resend = NULL; |
494 | struct list_head *last; | 494 | struct list_head *last; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index f3d1f2cee6f8..9bbe81967f14 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -782,8 +782,6 @@ void ipath_update_mmap_info(struct ipath_ibdev *dev, | |||
782 | 782 | ||
783 | int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | 783 | int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); |
784 | 784 | ||
785 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); | ||
786 | |||
787 | void ipath_insert_rnr_queue(struct ipath_qp *qp); | 785 | void ipath_insert_rnr_queue(struct ipath_qp *qp); |
788 | 786 | ||
789 | int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); | 787 | int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); |
@@ -807,8 +805,6 @@ void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32); | |||
807 | 805 | ||
808 | int ipath_ib_piobufavail(struct ipath_ibdev *); | 806 | int ipath_ib_piobufavail(struct ipath_ibdev *); |
809 | 807 | ||
810 | void ipath_ib_timer(struct ipath_ibdev *); | ||
811 | |||
812 | unsigned ipath_get_npkeys(struct ipath_devdata *); | 808 | unsigned ipath_get_npkeys(struct ipath_devdata *); |
813 | 809 | ||
814 | u32 ipath_get_cr_errpkey(struct ipath_devdata *); | 810 | u32 ipath_get_cr_errpkey(struct ipath_devdata *); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 40042184ad58..b5a24fbef70d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1183,6 +1183,43 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq | |||
1183 | return cur + nreq >= wq->max_post; | 1183 | return cur + nreq >= wq->max_post; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, | ||
1187 | u64 remote_addr, u32 rkey) | ||
1188 | { | ||
1189 | rseg->raddr = cpu_to_be64(remote_addr); | ||
1190 | rseg->rkey = cpu_to_be32(rkey); | ||
1191 | rseg->reserved = 0; | ||
1192 | } | ||
1193 | |||
1194 | static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | ||
1195 | { | ||
1196 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
1197 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1198 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1199 | } else { | ||
1200 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1201 | aseg->compare = 0; | ||
1202 | } | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | ||
1207 | struct ib_send_wr *wr) | ||
1208 | { | ||
1209 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | ||
1210 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1211 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1212 | |||
1213 | } | ||
1214 | |||
1215 | static void set_data_seg(struct mlx4_wqe_data_seg *dseg, | ||
1216 | struct ib_sge *sg) | ||
1217 | { | ||
1218 | dseg->byte_count = cpu_to_be32(sg->length); | ||
1219 | dseg->lkey = cpu_to_be32(sg->lkey); | ||
1220 | dseg->addr = cpu_to_be64(sg->addr); | ||
1221 | } | ||
1222 | |||
1186 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 1223 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
1187 | struct ib_send_wr **bad_wr) | 1224 | struct ib_send_wr **bad_wr) |
1188 | { | 1225 | { |
@@ -1238,26 +1275,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1238 | switch (wr->opcode) { | 1275 | switch (wr->opcode) { |
1239 | case IB_WR_ATOMIC_CMP_AND_SWP: | 1276 | case IB_WR_ATOMIC_CMP_AND_SWP: |
1240 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 1277 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
1241 | ((struct mlx4_wqe_raddr_seg *) wqe)->raddr = | 1278 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, |
1242 | cpu_to_be64(wr->wr.atomic.remote_addr); | 1279 | wr->wr.atomic.rkey); |
1243 | ((struct mlx4_wqe_raddr_seg *) wqe)->rkey = | ||
1244 | cpu_to_be32(wr->wr.atomic.rkey); | ||
1245 | ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0; | ||
1246 | |||
1247 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 1280 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
1248 | 1281 | ||
1249 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 1282 | set_atomic_seg(wqe, wr); |
1250 | ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add = | ||
1251 | cpu_to_be64(wr->wr.atomic.swap); | ||
1252 | ((struct mlx4_wqe_atomic_seg *) wqe)->compare = | ||
1253 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
1254 | } else { | ||
1255 | ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add = | ||
1256 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
1257 | ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0; | ||
1258 | } | ||
1259 | |||
1260 | wqe += sizeof (struct mlx4_wqe_atomic_seg); | 1283 | wqe += sizeof (struct mlx4_wqe_atomic_seg); |
1284 | |||
1261 | size += (sizeof (struct mlx4_wqe_raddr_seg) + | 1285 | size += (sizeof (struct mlx4_wqe_raddr_seg) + |
1262 | sizeof (struct mlx4_wqe_atomic_seg)) / 16; | 1286 | sizeof (struct mlx4_wqe_atomic_seg)) / 16; |
1263 | 1287 | ||
@@ -1266,15 +1290,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1266 | case IB_WR_RDMA_READ: | 1290 | case IB_WR_RDMA_READ: |
1267 | case IB_WR_RDMA_WRITE: | 1291 | case IB_WR_RDMA_WRITE: |
1268 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1292 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1269 | ((struct mlx4_wqe_raddr_seg *) wqe)->raddr = | 1293 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, |
1270 | cpu_to_be64(wr->wr.rdma.remote_addr); | 1294 | wr->wr.rdma.rkey); |
1271 | ((struct mlx4_wqe_raddr_seg *) wqe)->rkey = | ||
1272 | cpu_to_be32(wr->wr.rdma.rkey); | ||
1273 | ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0; | ||
1274 | |||
1275 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 1295 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
1276 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; | 1296 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; |
1277 | |||
1278 | break; | 1297 | break; |
1279 | 1298 | ||
1280 | default: | 1299 | default: |
@@ -1284,13 +1303,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1284 | break; | 1303 | break; |
1285 | 1304 | ||
1286 | case IB_QPT_UD: | 1305 | case IB_QPT_UD: |
1287 | memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av, | 1306 | set_datagram_seg(wqe, wr); |
1288 | &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | ||
1289 | ((struct mlx4_wqe_datagram_seg *) wqe)->dqpn = | ||
1290 | cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1291 | ((struct mlx4_wqe_datagram_seg *) wqe)->qkey = | ||
1292 | cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1293 | |||
1294 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 1307 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
1295 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 1308 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
1296 | break; | 1309 | break; |
@@ -1313,12 +1326,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1313 | } | 1326 | } |
1314 | 1327 | ||
1315 | for (i = 0; i < wr->num_sge; ++i) { | 1328 | for (i = 0; i < wr->num_sge; ++i) { |
1316 | ((struct mlx4_wqe_data_seg *) wqe)->byte_count = | 1329 | set_data_seg(wqe, wr->sg_list + i); |
1317 | cpu_to_be32(wr->sg_list[i].length); | ||
1318 | ((struct mlx4_wqe_data_seg *) wqe)->lkey = | ||
1319 | cpu_to_be32(wr->sg_list[i].lkey); | ||
1320 | ((struct mlx4_wqe_data_seg *) wqe)->addr = | ||
1321 | cpu_to_be64(wr->sg_list[i].addr); | ||
1322 | 1330 | ||
1323 | wqe += sizeof (struct mlx4_wqe_data_seg); | 1331 | wqe += sizeof (struct mlx4_wqe_data_seg); |
1324 | size += sizeof (struct mlx4_wqe_data_seg) / 16; | 1332 | size += sizeof (struct mlx4_wqe_data_seg) / 16; |
@@ -1498,7 +1506,7 @@ static int to_ib_qp_access_flags(int mlx4_flags) | |||
1498 | static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, | 1506 | static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, |
1499 | struct mlx4_qp_path *path) | 1507 | struct mlx4_qp_path *path) |
1500 | { | 1508 | { |
1501 | memset(ib_ah_attr, 0, sizeof *path); | 1509 | memset(ib_ah_attr, 0, sizeof *ib_ah_attr); |
1502 | ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; | 1510 | ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; |
1503 | 1511 | ||
1504 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) | 1512 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) |
@@ -1515,7 +1523,7 @@ static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, | |||
1515 | ib_ah_attr->grh.traffic_class = | 1523 | ib_ah_attr->grh.traffic_class = |
1516 | (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; | 1524 | (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; |
1517 | ib_ah_attr->grh.flow_label = | 1525 | ib_ah_attr->grh.flow_label = |
1518 | be32_to_cpu(path->tclass_flowlabel) & 0xffffff; | 1526 | be32_to_cpu(path->tclass_flowlabel) & 0xfffff; |
1519 | memcpy(ib_ah_attr->grh.dgid.raw, | 1527 | memcpy(ib_ah_attr->grh.dgid.raw, |
1520 | path->rgid, sizeof ib_ah_attr->grh.dgid.raw); | 1528 | path->rgid, sizeof ib_ah_attr->grh.dgid.raw); |
1521 | } | 1529 | } |
@@ -1560,7 +1568,10 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr | |||
1560 | } | 1568 | } |
1561 | 1569 | ||
1562 | qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; | 1570 | qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; |
1563 | qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; | 1571 | if (qp_attr->qp_state == IB_QPS_INIT) |
1572 | qp_attr->port_num = qp->port; | ||
1573 | else | ||
1574 | qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; | ||
1564 | 1575 | ||
1565 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | 1576 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ |
1566 | qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; | 1577 | qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; |
@@ -1578,17 +1589,25 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr | |||
1578 | 1589 | ||
1579 | done: | 1590 | done: |
1580 | qp_attr->cur_qp_state = qp_attr->qp_state; | 1591 | qp_attr->cur_qp_state = qp_attr->qp_state; |
1592 | qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; | ||
1593 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | ||
1594 | |||
1581 | if (!ibqp->uobject) { | 1595 | if (!ibqp->uobject) { |
1582 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | 1596 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; |
1583 | qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; | 1597 | qp_attr->cap.max_send_sge = qp->sq.max_gs; |
1584 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | 1598 | } else { |
1585 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | 1599 | qp_attr->cap.max_send_wr = 0; |
1586 | qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) - | 1600 | qp_attr->cap.max_send_sge = 0; |
1587 | send_wqe_overhead(qp->ibqp.qp_type) - | ||
1588 | sizeof (struct mlx4_wqe_inline_seg); | ||
1589 | qp_init_attr->cap = qp_attr->cap; | ||
1590 | } | 1601 | } |
1591 | 1602 | ||
1603 | /* | ||
1604 | * We don't support inline sends for kernel QPs (yet), and we | ||
1605 | * don't know what userspace's value should be. | ||
1606 | */ | ||
1607 | qp_attr->cap.max_inline_data = 0; | ||
1608 | |||
1609 | qp_init_attr->cap = qp_attr->cap; | ||
1610 | |||
1592 | return 0; | 1611 | return 0; |
1593 | } | 1612 | } |
1594 | 1613 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index aa563e61de65..76fed7545c53 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); | |||
67 | 67 | ||
68 | static int msi = 0; | 68 | static int msi = 0; |
69 | module_param(msi, int, 0444); | 69 | module_param(msi, int, 0444); |
70 | MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero"); | 70 | MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)"); |
71 | 71 | ||
72 | #else /* CONFIG_PCI_MSI */ | 72 | #else /* CONFIG_PCI_MSI */ |
73 | 73 | ||
@@ -1117,9 +1117,21 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) | |||
1117 | 1117 | ||
1118 | if (msi_x && !mthca_enable_msi_x(mdev)) | 1118 | if (msi_x && !mthca_enable_msi_x(mdev)) |
1119 | mdev->mthca_flags |= MTHCA_FLAG_MSI_X; | 1119 | mdev->mthca_flags |= MTHCA_FLAG_MSI_X; |
1120 | if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) && | 1120 | else if (msi) { |
1121 | !pci_enable_msi(pdev)) | 1121 | static int warned; |
1122 | mdev->mthca_flags |= MTHCA_FLAG_MSI; | 1122 | |
1123 | if (!warned) { | ||
1124 | printk(KERN_WARNING PFX "WARNING: MSI support will be " | ||
1125 | "removed from the ib_mthca driver in January 2008.\n"); | ||
1126 | printk(KERN_WARNING " If you are using MSI and cannot " | ||
1127 | "switch to MSI-X, please tell " | ||
1128 | "<general@lists.openfabrics.org>.\n"); | ||
1129 | ++warned; | ||
1130 | } | ||
1131 | |||
1132 | if (!pci_enable_msi(pdev)) | ||
1133 | mdev->mthca_flags |= MTHCA_FLAG_MSI; | ||
1134 | } | ||
1123 | 1135 | ||
1124 | if (mthca_cmd_init(mdev)) { | 1136 | if (mthca_cmd_init(mdev)) { |
1125 | mthca_err(mdev, "Failed to init command interface, aborting.\n"); | 1137 | mthca_err(mdev, "Failed to init command interface, aborting.\n"); |
@@ -1135,7 +1147,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) | |||
1135 | goto err_cmd; | 1147 | goto err_cmd; |
1136 | 1148 | ||
1137 | if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { | 1149 | if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { |
1138 | mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n", | 1150 | mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n", |
1139 | (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, | 1151 | (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, |
1140 | (int) (mdev->fw_ver & 0xffff), | 1152 | (int) (mdev->fw_ver & 0xffff), |
1141 | (int) (mthca_hca_table[hca_type].latest_fw >> 32), | 1153 | (int) (mthca_hca_table[hca_type].latest_fw >> 32), |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 11f1d99db40b..df01b2026a64 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1578,6 +1578,45 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, | |||
1578 | return cur + nreq >= wq->max; | 1578 | return cur + nreq >= wq->max; |
1579 | } | 1579 | } |
1580 | 1580 | ||
1581 | static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, | ||
1582 | u64 remote_addr, u32 rkey) | ||
1583 | { | ||
1584 | rseg->raddr = cpu_to_be64(remote_addr); | ||
1585 | rseg->rkey = cpu_to_be32(rkey); | ||
1586 | rseg->reserved = 0; | ||
1587 | } | ||
1588 | |||
1589 | static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, | ||
1590 | struct ib_send_wr *wr) | ||
1591 | { | ||
1592 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
1593 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1594 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1595 | } else { | ||
1596 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1597 | aseg->compare = 0; | ||
1598 | } | ||
1599 | |||
1600 | } | ||
1601 | |||
1602 | static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, | ||
1603 | struct ib_send_wr *wr) | ||
1604 | { | ||
1605 | useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | ||
1606 | useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | ||
1607 | useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1608 | useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1609 | |||
1610 | } | ||
1611 | |||
1612 | static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, | ||
1613 | struct ib_send_wr *wr) | ||
1614 | { | ||
1615 | memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | ||
1616 | useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1617 | useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1618 | } | ||
1619 | |||
1581 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 1620 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
1582 | struct ib_send_wr **bad_wr) | 1621 | struct ib_send_wr **bad_wr) |
1583 | { | 1622 | { |
@@ -1590,8 +1629,15 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1590 | int nreq; | 1629 | int nreq; |
1591 | int i; | 1630 | int i; |
1592 | int size; | 1631 | int size; |
1593 | int size0 = 0; | 1632 | /* |
1594 | u32 f0 = 0; | 1633 | * f0 and size0 are only used if nreq != 0, and they will |
1634 | * always be initialized the first time through the main loop | ||
1635 | * before nreq is incremented. So nreq cannot become non-zero | ||
1636 | * without initializing f0 and size0, and they are in fact | ||
1637 | * never used uninitialized. | ||
1638 | */ | ||
1639 | int uninitialized_var(size0); | ||
1640 | u32 uninitialized_var(f0); | ||
1595 | int ind; | 1641 | int ind; |
1596 | u8 op0 = 0; | 1642 | u8 op0 = 0; |
1597 | 1643 | ||
@@ -1636,25 +1682,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1636 | switch (wr->opcode) { | 1682 | switch (wr->opcode) { |
1637 | case IB_WR_ATOMIC_CMP_AND_SWP: | 1683 | case IB_WR_ATOMIC_CMP_AND_SWP: |
1638 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 1684 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
1639 | ((struct mthca_raddr_seg *) wqe)->raddr = | 1685 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, |
1640 | cpu_to_be64(wr->wr.atomic.remote_addr); | 1686 | wr->wr.atomic.rkey); |
1641 | ((struct mthca_raddr_seg *) wqe)->rkey = | ||
1642 | cpu_to_be32(wr->wr.atomic.rkey); | ||
1643 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | ||
1644 | |||
1645 | wqe += sizeof (struct mthca_raddr_seg); | 1687 | wqe += sizeof (struct mthca_raddr_seg); |
1646 | 1688 | ||
1647 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 1689 | set_atomic_seg(wqe, wr); |
1648 | ((struct mthca_atomic_seg *) wqe)->swap_add = | ||
1649 | cpu_to_be64(wr->wr.atomic.swap); | ||
1650 | ((struct mthca_atomic_seg *) wqe)->compare = | ||
1651 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
1652 | } else { | ||
1653 | ((struct mthca_atomic_seg *) wqe)->swap_add = | ||
1654 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
1655 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | ||
1656 | } | ||
1657 | |||
1658 | wqe += sizeof (struct mthca_atomic_seg); | 1690 | wqe += sizeof (struct mthca_atomic_seg); |
1659 | size += (sizeof (struct mthca_raddr_seg) + | 1691 | size += (sizeof (struct mthca_raddr_seg) + |
1660 | sizeof (struct mthca_atomic_seg)) / 16; | 1692 | sizeof (struct mthca_atomic_seg)) / 16; |
@@ -1663,12 +1695,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1663 | case IB_WR_RDMA_WRITE: | 1695 | case IB_WR_RDMA_WRITE: |
1664 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1696 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1665 | case IB_WR_RDMA_READ: | 1697 | case IB_WR_RDMA_READ: |
1666 | ((struct mthca_raddr_seg *) wqe)->raddr = | 1698 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, |
1667 | cpu_to_be64(wr->wr.rdma.remote_addr); | 1699 | wr->wr.rdma.rkey); |
1668 | ((struct mthca_raddr_seg *) wqe)->rkey = | 1700 | wqe += sizeof (struct mthca_raddr_seg); |
1669 | cpu_to_be32(wr->wr.rdma.rkey); | ||
1670 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | ||
1671 | wqe += sizeof (struct mthca_raddr_seg); | ||
1672 | size += sizeof (struct mthca_raddr_seg) / 16; | 1701 | size += sizeof (struct mthca_raddr_seg) / 16; |
1673 | break; | 1702 | break; |
1674 | 1703 | ||
@@ -1683,12 +1712,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1683 | switch (wr->opcode) { | 1712 | switch (wr->opcode) { |
1684 | case IB_WR_RDMA_WRITE: | 1713 | case IB_WR_RDMA_WRITE: |
1685 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1714 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1686 | ((struct mthca_raddr_seg *) wqe)->raddr = | 1715 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, |
1687 | cpu_to_be64(wr->wr.rdma.remote_addr); | 1716 | wr->wr.rdma.rkey); |
1688 | ((struct mthca_raddr_seg *) wqe)->rkey = | 1717 | wqe += sizeof (struct mthca_raddr_seg); |
1689 | cpu_to_be32(wr->wr.rdma.rkey); | ||
1690 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | ||
1691 | wqe += sizeof (struct mthca_raddr_seg); | ||
1692 | size += sizeof (struct mthca_raddr_seg) / 16; | 1718 | size += sizeof (struct mthca_raddr_seg) / 16; |
1693 | break; | 1719 | break; |
1694 | 1720 | ||
@@ -1700,16 +1726,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1700 | break; | 1726 | break; |
1701 | 1727 | ||
1702 | case UD: | 1728 | case UD: |
1703 | ((struct mthca_tavor_ud_seg *) wqe)->lkey = | 1729 | set_tavor_ud_seg(wqe, wr); |
1704 | cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | 1730 | wqe += sizeof (struct mthca_tavor_ud_seg); |
1705 | ((struct mthca_tavor_ud_seg *) wqe)->av_addr = | ||
1706 | cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | ||
1707 | ((struct mthca_tavor_ud_seg *) wqe)->dqpn = | ||
1708 | cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1709 | ((struct mthca_tavor_ud_seg *) wqe)->qkey = | ||
1710 | cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1711 | |||
1712 | wqe += sizeof (struct mthca_tavor_ud_seg); | ||
1713 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | 1731 | size += sizeof (struct mthca_tavor_ud_seg) / 16; |
1714 | break; | 1732 | break; |
1715 | 1733 | ||
@@ -1734,13 +1752,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1734 | } | 1752 | } |
1735 | 1753 | ||
1736 | for (i = 0; i < wr->num_sge; ++i) { | 1754 | for (i = 0; i < wr->num_sge; ++i) { |
1737 | ((struct mthca_data_seg *) wqe)->byte_count = | 1755 | mthca_set_data_seg(wqe, wr->sg_list + i); |
1738 | cpu_to_be32(wr->sg_list[i].length); | 1756 | wqe += sizeof (struct mthca_data_seg); |
1739 | ((struct mthca_data_seg *) wqe)->lkey = | ||
1740 | cpu_to_be32(wr->sg_list[i].lkey); | ||
1741 | ((struct mthca_data_seg *) wqe)->addr = | ||
1742 | cpu_to_be64(wr->sg_list[i].addr); | ||
1743 | wqe += sizeof (struct mthca_data_seg); | ||
1744 | size += sizeof (struct mthca_data_seg) / 16; | 1757 | size += sizeof (struct mthca_data_seg) / 16; |
1745 | } | 1758 | } |
1746 | 1759 | ||
@@ -1768,11 +1781,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1768 | mthca_opcode[wr->opcode]); | 1781 | mthca_opcode[wr->opcode]); |
1769 | wmb(); | 1782 | wmb(); |
1770 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1783 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1771 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size | | 1784 | cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | |
1772 | ((wr->send_flags & IB_SEND_FENCE) ? | 1785 | ((wr->send_flags & IB_SEND_FENCE) ? |
1773 | MTHCA_NEXT_FENCE : 0)); | 1786 | MTHCA_NEXT_FENCE : 0)); |
1774 | 1787 | ||
1775 | if (!size0) { | 1788 | if (!nreq) { |
1776 | size0 = size; | 1789 | size0 = size; |
1777 | op0 = mthca_opcode[wr->opcode]; | 1790 | op0 = mthca_opcode[wr->opcode]; |
1778 | f0 = wr->send_flags & IB_SEND_FENCE ? | 1791 | f0 = wr->send_flags & IB_SEND_FENCE ? |
@@ -1822,7 +1835,14 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1822 | int nreq; | 1835 | int nreq; |
1823 | int i; | 1836 | int i; |
1824 | int size; | 1837 | int size; |
1825 | int size0 = 0; | 1838 | /* |
1839 | * size0 is only used if nreq != 0, and it will always be | ||
1840 | * initialized the first time through the main loop before | ||
1841 | * nreq is incremented. So nreq cannot become non-zero | ||
1842 | * without initializing size0, and it is in fact never used | ||
1843 | * uninitialized. | ||
1844 | */ | ||
1845 | int uninitialized_var(size0); | ||
1826 | int ind; | 1846 | int ind; |
1827 | void *wqe; | 1847 | void *wqe; |
1828 | void *prev_wqe; | 1848 | void *prev_wqe; |
@@ -1863,13 +1883,8 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1863 | } | 1883 | } |
1864 | 1884 | ||
1865 | for (i = 0; i < wr->num_sge; ++i) { | 1885 | for (i = 0; i < wr->num_sge; ++i) { |
1866 | ((struct mthca_data_seg *) wqe)->byte_count = | 1886 | mthca_set_data_seg(wqe, wr->sg_list + i); |
1867 | cpu_to_be32(wr->sg_list[i].length); | 1887 | wqe += sizeof (struct mthca_data_seg); |
1868 | ((struct mthca_data_seg *) wqe)->lkey = | ||
1869 | cpu_to_be32(wr->sg_list[i].lkey); | ||
1870 | ((struct mthca_data_seg *) wqe)->addr = | ||
1871 | cpu_to_be64(wr->sg_list[i].addr); | ||
1872 | wqe += sizeof (struct mthca_data_seg); | ||
1873 | size += sizeof (struct mthca_data_seg) / 16; | 1888 | size += sizeof (struct mthca_data_seg) / 16; |
1874 | } | 1889 | } |
1875 | 1890 | ||
@@ -1881,7 +1896,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1881 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1896 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1882 | cpu_to_be32(MTHCA_NEXT_DBD | size); | 1897 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1883 | 1898 | ||
1884 | if (!size0) | 1899 | if (!nreq) |
1885 | size0 = size; | 1900 | size0 = size; |
1886 | 1901 | ||
1887 | ++ind; | 1902 | ++ind; |
@@ -1903,7 +1918,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1903 | 1918 | ||
1904 | qp->rq.next_ind = ind; | 1919 | qp->rq.next_ind = ind; |
1905 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | 1920 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; |
1906 | size0 = 0; | ||
1907 | } | 1921 | } |
1908 | } | 1922 | } |
1909 | 1923 | ||
@@ -1945,8 +1959,15 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1945 | int nreq; | 1959 | int nreq; |
1946 | int i; | 1960 | int i; |
1947 | int size; | 1961 | int size; |
1948 | int size0 = 0; | 1962 | /* |
1949 | u32 f0 = 0; | 1963 | * f0 and size0 are only used if nreq != 0, and they will |
1964 | * always be initialized the first time through the main loop | ||
1965 | * before nreq is incremented. So nreq cannot become non-zero | ||
1966 | * without initializing f0 and size0, and they are in fact | ||
1967 | * never used uninitialized. | ||
1968 | */ | ||
1969 | int uninitialized_var(size0); | ||
1970 | u32 uninitialized_var(f0); | ||
1950 | int ind; | 1971 | int ind; |
1951 | u8 op0 = 0; | 1972 | u8 op0 = 0; |
1952 | 1973 | ||
@@ -1966,7 +1987,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1966 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | 1987 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); |
1967 | 1988 | ||
1968 | qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; | 1989 | qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; |
1969 | size0 = 0; | ||
1970 | 1990 | ||
1971 | /* | 1991 | /* |
1972 | * Make sure that descriptors are written before | 1992 | * Make sure that descriptors are written before |
@@ -2017,26 +2037,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2017 | switch (wr->opcode) { | 2037 | switch (wr->opcode) { |
2018 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2038 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2019 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2039 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2020 | ((struct mthca_raddr_seg *) wqe)->raddr = | 2040 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, |
2021 | cpu_to_be64(wr->wr.atomic.remote_addr); | 2041 | wr->wr.atomic.rkey); |
2022 | ((struct mthca_raddr_seg *) wqe)->rkey = | ||
2023 | cpu_to_be32(wr->wr.atomic.rkey); | ||
2024 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | ||
2025 | |||
2026 | wqe += sizeof (struct mthca_raddr_seg); | 2042 | wqe += sizeof (struct mthca_raddr_seg); |
2027 | 2043 | ||
2028 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 2044 | set_atomic_seg(wqe, wr); |
2029 | ((struct mthca_atomic_seg *) wqe)->swap_add = | 2045 | wqe += sizeof (struct mthca_atomic_seg); |
2030 | cpu_to_be64(wr->wr.atomic.swap); | ||
2031 | ((struct mthca_atomic_seg *) wqe)->compare = | ||
2032 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
2033 | } else { | ||
2034 | ((struct mthca_atomic_seg *) wqe)->swap_add = | ||
2035 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
2036 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | ||
2037 | } | ||
2038 | |||
2039 | wqe += sizeof (struct mthca_atomic_seg); | ||
2040 | size += (sizeof (struct mthca_raddr_seg) + | 2046 | size += (sizeof (struct mthca_raddr_seg) + |
2041 | sizeof (struct mthca_atomic_seg)) / 16; | 2047 | sizeof (struct mthca_atomic_seg)) / 16; |
2042 | break; | 2048 | break; |
@@ -2044,12 +2050,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2044 | case IB_WR_RDMA_READ: | 2050 | case IB_WR_RDMA_READ: |
2045 | case IB_WR_RDMA_WRITE: | 2051 | case IB_WR_RDMA_WRITE: |
2046 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2052 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2047 | ((struct mthca_raddr_seg *) wqe)->raddr = | 2053 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, |
2048 | cpu_to_be64(wr->wr.rdma.remote_addr); | 2054 | wr->wr.rdma.rkey); |
2049 | ((struct mthca_raddr_seg *) wqe)->rkey = | 2055 | wqe += sizeof (struct mthca_raddr_seg); |
2050 | cpu_to_be32(wr->wr.rdma.rkey); | ||
2051 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | ||
2052 | wqe += sizeof (struct mthca_raddr_seg); | ||
2053 | size += sizeof (struct mthca_raddr_seg) / 16; | 2056 | size += sizeof (struct mthca_raddr_seg) / 16; |
2054 | break; | 2057 | break; |
2055 | 2058 | ||
@@ -2064,12 +2067,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2064 | switch (wr->opcode) { | 2067 | switch (wr->opcode) { |
2065 | case IB_WR_RDMA_WRITE: | 2068 | case IB_WR_RDMA_WRITE: |
2066 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2069 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2067 | ((struct mthca_raddr_seg *) wqe)->raddr = | 2070 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, |
2068 | cpu_to_be64(wr->wr.rdma.remote_addr); | 2071 | wr->wr.rdma.rkey); |
2069 | ((struct mthca_raddr_seg *) wqe)->rkey = | 2072 | wqe += sizeof (struct mthca_raddr_seg); |
2070 | cpu_to_be32(wr->wr.rdma.rkey); | ||
2071 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | ||
2072 | wqe += sizeof (struct mthca_raddr_seg); | ||
2073 | size += sizeof (struct mthca_raddr_seg) / 16; | 2073 | size += sizeof (struct mthca_raddr_seg) / 16; |
2074 | break; | 2074 | break; |
2075 | 2075 | ||
@@ -2081,14 +2081,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2081 | break; | 2081 | break; |
2082 | 2082 | ||
2083 | case UD: | 2083 | case UD: |
2084 | memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, | 2084 | set_arbel_ud_seg(wqe, wr); |
2085 | to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | 2085 | wqe += sizeof (struct mthca_arbel_ud_seg); |
2086 | ((struct mthca_arbel_ud_seg *) wqe)->dqpn = | ||
2087 | cpu_to_be32(wr->wr.ud.remote_qpn); | ||
2088 | ((struct mthca_arbel_ud_seg *) wqe)->qkey = | ||
2089 | cpu_to_be32(wr->wr.ud.remote_qkey); | ||
2090 | |||
2091 | wqe += sizeof (struct mthca_arbel_ud_seg); | ||
2092 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | 2086 | size += sizeof (struct mthca_arbel_ud_seg) / 16; |
2093 | break; | 2087 | break; |
2094 | 2088 | ||
@@ -2113,13 +2107,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2113 | } | 2107 | } |
2114 | 2108 | ||
2115 | for (i = 0; i < wr->num_sge; ++i) { | 2109 | for (i = 0; i < wr->num_sge; ++i) { |
2116 | ((struct mthca_data_seg *) wqe)->byte_count = | 2110 | mthca_set_data_seg(wqe, wr->sg_list + i); |
2117 | cpu_to_be32(wr->sg_list[i].length); | 2111 | wqe += sizeof (struct mthca_data_seg); |
2118 | ((struct mthca_data_seg *) wqe)->lkey = | ||
2119 | cpu_to_be32(wr->sg_list[i].lkey); | ||
2120 | ((struct mthca_data_seg *) wqe)->addr = | ||
2121 | cpu_to_be64(wr->sg_list[i].addr); | ||
2122 | wqe += sizeof (struct mthca_data_seg); | ||
2123 | size += sizeof (struct mthca_data_seg) / 16; | 2112 | size += sizeof (struct mthca_data_seg) / 16; |
2124 | } | 2113 | } |
2125 | 2114 | ||
@@ -2151,7 +2140,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2151 | ((wr->send_flags & IB_SEND_FENCE) ? | 2140 | ((wr->send_flags & IB_SEND_FENCE) ? |
2152 | MTHCA_NEXT_FENCE : 0)); | 2141 | MTHCA_NEXT_FENCE : 0)); |
2153 | 2142 | ||
2154 | if (!size0) { | 2143 | if (!nreq) { |
2155 | size0 = size; | 2144 | size0 = size; |
2156 | op0 = mthca_opcode[wr->opcode]; | 2145 | op0 = mthca_opcode[wr->opcode]; |
2157 | f0 = wr->send_flags & IB_SEND_FENCE ? | 2146 | f0 = wr->send_flags & IB_SEND_FENCE ? |
@@ -2241,20 +2230,12 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
2241 | } | 2230 | } |
2242 | 2231 | ||
2243 | for (i = 0; i < wr->num_sge; ++i) { | 2232 | for (i = 0; i < wr->num_sge; ++i) { |
2244 | ((struct mthca_data_seg *) wqe)->byte_count = | 2233 | mthca_set_data_seg(wqe, wr->sg_list + i); |
2245 | cpu_to_be32(wr->sg_list[i].length); | ||
2246 | ((struct mthca_data_seg *) wqe)->lkey = | ||
2247 | cpu_to_be32(wr->sg_list[i].lkey); | ||
2248 | ((struct mthca_data_seg *) wqe)->addr = | ||
2249 | cpu_to_be64(wr->sg_list[i].addr); | ||
2250 | wqe += sizeof (struct mthca_data_seg); | 2234 | wqe += sizeof (struct mthca_data_seg); |
2251 | } | 2235 | } |
2252 | 2236 | ||
2253 | if (i < qp->rq.max_gs) { | 2237 | if (i < qp->rq.max_gs) |
2254 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | 2238 | mthca_set_data_seg_inval(wqe); |
2255 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | ||
2256 | ((struct mthca_data_seg *) wqe)->addr = 0; | ||
2257 | } | ||
2258 | 2239 | ||
2259 | qp->wrid[ind] = wr->wr_id; | 2240 | qp->wrid[ind] = wr->wr_id; |
2260 | 2241 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index b8f05a526673..88d219e730ad 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -543,20 +543,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | for (i = 0; i < wr->num_sge; ++i) { | 545 | for (i = 0; i < wr->num_sge; ++i) { |
546 | ((struct mthca_data_seg *) wqe)->byte_count = | 546 | mthca_set_data_seg(wqe, wr->sg_list + i); |
547 | cpu_to_be32(wr->sg_list[i].length); | ||
548 | ((struct mthca_data_seg *) wqe)->lkey = | ||
549 | cpu_to_be32(wr->sg_list[i].lkey); | ||
550 | ((struct mthca_data_seg *) wqe)->addr = | ||
551 | cpu_to_be64(wr->sg_list[i].addr); | ||
552 | wqe += sizeof (struct mthca_data_seg); | 547 | wqe += sizeof (struct mthca_data_seg); |
553 | } | 548 | } |
554 | 549 | ||
555 | if (i < srq->max_gs) { | 550 | if (i < srq->max_gs) |
556 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | 551 | mthca_set_data_seg_inval(wqe); |
557 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | ||
558 | ((struct mthca_data_seg *) wqe)->addr = 0; | ||
559 | } | ||
560 | 552 | ||
561 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 553 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
562 | cpu_to_be32((ind << srq->wqe_shift) | 1); | 554 | cpu_to_be32((ind << srq->wqe_shift) | 1); |
@@ -662,20 +654,12 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
662 | } | 654 | } |
663 | 655 | ||
664 | for (i = 0; i < wr->num_sge; ++i) { | 656 | for (i = 0; i < wr->num_sge; ++i) { |
665 | ((struct mthca_data_seg *) wqe)->byte_count = | 657 | mthca_set_data_seg(wqe, wr->sg_list + i); |
666 | cpu_to_be32(wr->sg_list[i].length); | ||
667 | ((struct mthca_data_seg *) wqe)->lkey = | ||
668 | cpu_to_be32(wr->sg_list[i].lkey); | ||
669 | ((struct mthca_data_seg *) wqe)->addr = | ||
670 | cpu_to_be64(wr->sg_list[i].addr); | ||
671 | wqe += sizeof (struct mthca_data_seg); | 658 | wqe += sizeof (struct mthca_data_seg); |
672 | } | 659 | } |
673 | 660 | ||
674 | if (i < srq->max_gs) { | 661 | if (i < srq->max_gs) |
675 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | 662 | mthca_set_data_seg_inval(wqe); |
676 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | ||
677 | ((struct mthca_data_seg *) wqe)->addr = 0; | ||
678 | } | ||
679 | 663 | ||
680 | srq->wrid[ind] = wr->wr_id; | 664 | srq->wrid[ind] = wr->wr_id; |
681 | srq->first_free = next_ind; | 665 | srq->first_free = next_ind; |
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h index e7d2c1e86199..f6a66fe78e48 100644 --- a/drivers/infiniband/hw/mthca/mthca_wqe.h +++ b/drivers/infiniband/hw/mthca/mthca_wqe.h | |||
@@ -113,4 +113,19 @@ struct mthca_mlx_seg { | |||
113 | __be16 vcrc; | 113 | __be16 vcrc; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | static __always_inline void mthca_set_data_seg(struct mthca_data_seg *dseg, | ||
117 | struct ib_sge *sg) | ||
118 | { | ||
119 | dseg->byte_count = cpu_to_be32(sg->length); | ||
120 | dseg->lkey = cpu_to_be32(sg->lkey); | ||
121 | dseg->addr = cpu_to_be64(sg->addr); | ||
122 | } | ||
123 | |||
124 | static __always_inline void mthca_set_data_seg_inval(struct mthca_data_seg *dseg) | ||
125 | { | ||
126 | dseg->byte_count = 0; | ||
127 | dseg->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | ||
128 | dseg->addr = 0; | ||
129 | } | ||
130 | |||
116 | #endif /* MTHCA_WQE_H */ | 131 | #endif /* MTHCA_WQE_H */ |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index e2353701e8bb..1ee867b1b341 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -310,8 +310,6 @@ int iser_conn_init(struct iser_conn **ib_conn); | |||
310 | 310 | ||
311 | void iser_conn_terminate(struct iser_conn *ib_conn); | 311 | void iser_conn_terminate(struct iser_conn *ib_conn); |
312 | 312 | ||
313 | void iser_conn_release(struct iser_conn *ib_conn); | ||
314 | |||
315 | void iser_rcv_completion(struct iser_desc *desc, | 313 | void iser_rcv_completion(struct iser_desc *desc, |
316 | unsigned long dto_xfer_len); | 314 | unsigned long dto_xfer_len); |
317 | 315 | ||
@@ -329,9 +327,6 @@ void iser_reg_single(struct iser_device *device, | |||
329 | struct iser_regd_buf *regd_buf, | 327 | struct iser_regd_buf *regd_buf, |
330 | enum dma_data_direction direction); | 328 | enum dma_data_direction direction); |
331 | 329 | ||
332 | int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, | ||
333 | enum iser_data_dir cmd_dir); | ||
334 | |||
335 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, | 330 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, |
336 | enum iser_data_dir cmd_dir); | 331 | enum iser_data_dir cmd_dir); |
337 | 332 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index fc9f1fd0ae54..36cdf77ae92a 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -103,8 +103,8 @@ void iser_reg_single(struct iser_device *device, | |||
103 | /** | 103 | /** |
104 | * iser_start_rdma_unaligned_sg | 104 | * iser_start_rdma_unaligned_sg |
105 | */ | 105 | */ |
106 | int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 106 | static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, |
107 | enum iser_data_dir cmd_dir) | 107 | enum iser_data_dir cmd_dir) |
108 | { | 108 | { |
109 | int dma_nents; | 109 | int dma_nents; |
110 | struct ib_device *dev; | 110 | struct ib_device *dev; |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 2044de1164ac..d42ec0156eec 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -311,6 +311,29 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, | |||
311 | } | 311 | } |
312 | 312 | ||
313 | /** | 313 | /** |
314 | * Frees all conn objects and deallocs conn descriptor | ||
315 | */ | ||
316 | static void iser_conn_release(struct iser_conn *ib_conn) | ||
317 | { | ||
318 | struct iser_device *device = ib_conn->device; | ||
319 | |||
320 | BUG_ON(ib_conn->state != ISER_CONN_DOWN); | ||
321 | |||
322 | mutex_lock(&ig.connlist_mutex); | ||
323 | list_del(&ib_conn->conn_list); | ||
324 | mutex_unlock(&ig.connlist_mutex); | ||
325 | |||
326 | iser_free_ib_conn_res(ib_conn); | ||
327 | ib_conn->device = NULL; | ||
328 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ | ||
329 | if (device != NULL) | ||
330 | iser_device_try_release(device); | ||
331 | if (ib_conn->iser_conn) | ||
332 | ib_conn->iser_conn->ib_conn = NULL; | ||
333 | kfree(ib_conn); | ||
334 | } | ||
335 | |||
336 | /** | ||
314 | * triggers start of the disconnect procedures and wait for them to be done | 337 | * triggers start of the disconnect procedures and wait for them to be done |
315 | */ | 338 | */ |
316 | void iser_conn_terminate(struct iser_conn *ib_conn) | 339 | void iser_conn_terminate(struct iser_conn *ib_conn) |
@@ -550,30 +573,6 @@ connect_failure: | |||
550 | } | 573 | } |
551 | 574 | ||
552 | /** | 575 | /** |
553 | * Frees all conn objects and deallocs conn descriptor | ||
554 | */ | ||
555 | void iser_conn_release(struct iser_conn *ib_conn) | ||
556 | { | ||
557 | struct iser_device *device = ib_conn->device; | ||
558 | |||
559 | BUG_ON(ib_conn->state != ISER_CONN_DOWN); | ||
560 | |||
561 | mutex_lock(&ig.connlist_mutex); | ||
562 | list_del(&ib_conn->conn_list); | ||
563 | mutex_unlock(&ig.connlist_mutex); | ||
564 | |||
565 | iser_free_ib_conn_res(ib_conn); | ||
566 | ib_conn->device = NULL; | ||
567 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ | ||
568 | if (device != NULL) | ||
569 | iser_device_try_release(device); | ||
570 | if (ib_conn->iser_conn) | ||
571 | ib_conn->iser_conn->ib_conn = NULL; | ||
572 | kfree(ib_conn); | ||
573 | } | ||
574 | |||
575 | |||
576 | /** | ||
577 | * iser_reg_page_vec - Register physical memory | 576 | * iser_reg_page_vec - Register physical memory |
578 | * | 577 | * |
579 | * returns: 0 on success, errno code on failure | 578 | * returns: 0 on success, errno code on failure |
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index 1bb088aeaf71..6b32ec94b3a8 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c | |||
@@ -30,41 +30,133 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/workqueue.h> | ||
34 | |||
33 | #include "mlx4.h" | 35 | #include "mlx4.h" |
34 | 36 | ||
35 | void mlx4_handle_catas_err(struct mlx4_dev *dev) | 37 | enum { |
38 | MLX4_CATAS_POLL_INTERVAL = 5 * HZ, | ||
39 | }; | ||
40 | |||
41 | static DEFINE_SPINLOCK(catas_lock); | ||
42 | |||
43 | static LIST_HEAD(catas_list); | ||
44 | static struct workqueue_struct *catas_wq; | ||
45 | static struct work_struct catas_work; | ||
46 | |||
47 | static int internal_err_reset = 1; | ||
48 | module_param(internal_err_reset, int, 0644); | ||
49 | MODULE_PARM_DESC(internal_err_reset, | ||
50 | "Reset device on internal errors if non-zero (default 1)"); | ||
51 | |||
52 | static void dump_err_buf(struct mlx4_dev *dev) | ||
36 | { | 53 | { |
37 | struct mlx4_priv *priv = mlx4_priv(dev); | 54 | struct mlx4_priv *priv = mlx4_priv(dev); |
38 | 55 | ||
39 | int i; | 56 | int i; |
40 | 57 | ||
41 | mlx4_err(dev, "Catastrophic error detected:\n"); | 58 | mlx4_err(dev, "Internal error detected:\n"); |
42 | for (i = 0; i < priv->fw.catas_size; ++i) | 59 | for (i = 0; i < priv->fw.catas_size; ++i) |
43 | mlx4_err(dev, " buf[%02x]: %08x\n", | 60 | mlx4_err(dev, " buf[%02x]: %08x\n", |
44 | i, swab32(readl(priv->catas_err.map + i))); | 61 | i, swab32(readl(priv->catas_err.map + i))); |
62 | } | ||
45 | 63 | ||
46 | mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); | 64 | static void poll_catas(unsigned long dev_ptr) |
65 | { | ||
66 | struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; | ||
67 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
68 | |||
69 | if (readl(priv->catas_err.map)) { | ||
70 | dump_err_buf(dev); | ||
71 | |||
72 | mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); | ||
73 | |||
74 | if (internal_err_reset) { | ||
75 | spin_lock(&catas_lock); | ||
76 | list_add(&priv->catas_err.list, &catas_list); | ||
77 | spin_unlock(&catas_lock); | ||
78 | |||
79 | queue_work(catas_wq, &catas_work); | ||
80 | } | ||
81 | } else | ||
82 | mod_timer(&priv->catas_err.timer, | ||
83 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); | ||
47 | } | 84 | } |
48 | 85 | ||
49 | void mlx4_map_catas_buf(struct mlx4_dev *dev) | 86 | static void catas_reset(struct work_struct *work) |
87 | { | ||
88 | struct mlx4_priv *priv, *tmppriv; | ||
89 | struct mlx4_dev *dev; | ||
90 | |||
91 | LIST_HEAD(tlist); | ||
92 | int ret; | ||
93 | |||
94 | spin_lock_irq(&catas_lock); | ||
95 | list_splice_init(&catas_list, &tlist); | ||
96 | spin_unlock_irq(&catas_lock); | ||
97 | |||
98 | list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { | ||
99 | ret = mlx4_restart_one(priv->dev.pdev); | ||
100 | dev = &priv->dev; | ||
101 | if (ret) | ||
102 | mlx4_err(dev, "Reset failed (%d)\n", ret); | ||
103 | else | ||
104 | mlx4_dbg(dev, "Reset succeeded\n"); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | void mlx4_start_catas_poll(struct mlx4_dev *dev) | ||
50 | { | 109 | { |
51 | struct mlx4_priv *priv = mlx4_priv(dev); | 110 | struct mlx4_priv *priv = mlx4_priv(dev); |
52 | unsigned long addr; | 111 | unsigned long addr; |
53 | 112 | ||
113 | INIT_LIST_HEAD(&priv->catas_err.list); | ||
114 | init_timer(&priv->catas_err.timer); | ||
115 | priv->catas_err.map = NULL; | ||
116 | |||
54 | addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + | 117 | addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + |
55 | priv->fw.catas_offset; | 118 | priv->fw.catas_offset; |
56 | 119 | ||
57 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); | 120 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); |
58 | if (!priv->catas_err.map) | 121 | if (!priv->catas_err.map) { |
59 | mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n", | 122 | mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", |
60 | addr); | 123 | addr); |
124 | return; | ||
125 | } | ||
61 | 126 | ||
127 | priv->catas_err.timer.data = (unsigned long) dev; | ||
128 | priv->catas_err.timer.function = poll_catas; | ||
129 | priv->catas_err.timer.expires = | ||
130 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); | ||
131 | add_timer(&priv->catas_err.timer); | ||
62 | } | 132 | } |
63 | 133 | ||
64 | void mlx4_unmap_catas_buf(struct mlx4_dev *dev) | 134 | void mlx4_stop_catas_poll(struct mlx4_dev *dev) |
65 | { | 135 | { |
66 | struct mlx4_priv *priv = mlx4_priv(dev); | 136 | struct mlx4_priv *priv = mlx4_priv(dev); |
67 | 137 | ||
138 | del_timer_sync(&priv->catas_err.timer); | ||
139 | |||
68 | if (priv->catas_err.map) | 140 | if (priv->catas_err.map) |
69 | iounmap(priv->catas_err.map); | 141 | iounmap(priv->catas_err.map); |
142 | |||
143 | spin_lock_irq(&catas_lock); | ||
144 | list_del(&priv->catas_err.list); | ||
145 | spin_unlock_irq(&catas_lock); | ||
146 | } | ||
147 | |||
148 | int __init mlx4_catas_init(void) | ||
149 | { | ||
150 | INIT_WORK(&catas_work, catas_reset); | ||
151 | |||
152 | catas_wq = create_singlethread_workqueue("mlx4_err"); | ||
153 | if (!catas_wq) | ||
154 | return -ENOMEM; | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | void mlx4_catas_cleanup(void) | ||
160 | { | ||
161 | destroy_workqueue(catas_wq); | ||
70 | } | 162 | } |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 27a82cecd693..2095c843fa15 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -89,14 +89,12 @@ struct mlx4_eq_context { | |||
89 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ | 89 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ |
90 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | 90 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ |
91 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | 91 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ |
92 | (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ | ||
93 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ | 92 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ |
94 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ | 93 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ |
95 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | 94 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
96 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | 95 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ |
97 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | 96 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ |
98 | (1ull << MLX4_EVENT_TYPE_CMD)) | 97 | (1ull << MLX4_EVENT_TYPE_CMD)) |
99 | #define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | ||
100 | 98 | ||
101 | struct mlx4_eqe { | 99 | struct mlx4_eqe { |
102 | u8 reserved1; | 100 | u8 reserved1; |
@@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |||
264 | 262 | ||
265 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | 263 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); |
266 | 264 | ||
267 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 265 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
268 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); | 266 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
269 | 267 | ||
270 | return IRQ_RETVAL(work); | 268 | return IRQ_RETVAL(work); |
@@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | |||
281 | return IRQ_HANDLED; | 279 | return IRQ_HANDLED; |
282 | } | 280 | } |
283 | 281 | ||
284 | static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr) | ||
285 | { | ||
286 | mlx4_handle_catas_err(dev_ptr); | ||
287 | |||
288 | /* MSI-X vectors always belong to us */ | ||
289 | return IRQ_HANDLED; | ||
290 | } | ||
291 | |||
292 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, | 282 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
293 | int eq_num) | 283 | int eq_num) |
294 | { | 284 | { |
@@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) | |||
490 | 480 | ||
491 | if (eq_table->have_irq) | 481 | if (eq_table->have_irq) |
492 | free_irq(dev->pdev->irq, dev); | 482 | free_irq(dev->pdev->irq, dev); |
493 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 483 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
494 | if (eq_table->eq[i].have_irq) | 484 | if (eq_table->eq[i].have_irq) |
495 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 485 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
496 | if (eq_table->eq[MLX4_EQ_CATAS].have_irq) | ||
497 | free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev); | ||
498 | } | 486 | } |
499 | 487 | ||
500 | static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) | 488 | static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) |
@@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) | |||
598 | if (dev->flags & MLX4_FLAG_MSI_X) { | 586 | if (dev->flags & MLX4_FLAG_MSI_X) { |
599 | static const char *eq_name[] = { | 587 | static const char *eq_name[] = { |
600 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", | 588 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", |
601 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)", | 589 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)" |
602 | [MLX4_EQ_CATAS] = DRV_NAME " (catas)" | ||
603 | }; | 590 | }; |
604 | 591 | ||
605 | err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS, | 592 | for (i = 0; i < MLX4_NUM_EQ; ++i) { |
606 | &priv->eq_table.eq[MLX4_EQ_CATAS]); | ||
607 | if (err) | ||
608 | goto err_out_async; | ||
609 | |||
610 | for (i = 0; i < MLX4_EQ_CATAS; ++i) { | ||
611 | err = request_irq(priv->eq_table.eq[i].irq, | 593 | err = request_irq(priv->eq_table.eq[i].irq, |
612 | mlx4_msi_x_interrupt, | 594 | mlx4_msi_x_interrupt, |
613 | 0, eq_name[i], priv->eq_table.eq + i); | 595 | 0, eq_name[i], priv->eq_table.eq + i); |
614 | if (err) | 596 | if (err) |
615 | goto err_out_catas; | 597 | goto err_out_async; |
616 | 598 | ||
617 | priv->eq_table.eq[i].have_irq = 1; | 599 | priv->eq_table.eq[i].have_irq = 1; |
618 | } | 600 | } |
619 | 601 | ||
620 | err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq, | ||
621 | mlx4_catas_interrupt, 0, | ||
622 | eq_name[MLX4_EQ_CATAS], dev); | ||
623 | if (err) | ||
624 | goto err_out_catas; | ||
625 | |||
626 | priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1; | ||
627 | } else { | 602 | } else { |
628 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 603 | err = request_irq(dev->pdev->irq, mlx4_interrupt, |
629 | IRQF_SHARED, DRV_NAME, dev); | 604 | IRQF_SHARED, DRV_NAME, dev); |
@@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) | |||
639 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | 614 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
640 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); | 615 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); |
641 | 616 | ||
642 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 617 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
643 | eq_set_ci(&priv->eq_table.eq[i], 1); | 618 | eq_set_ci(&priv->eq_table.eq[i], 1); |
644 | 619 | ||
645 | if (dev->flags & MLX4_FLAG_MSI_X) { | ||
646 | err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0, | ||
647 | priv->eq_table.eq[MLX4_EQ_CATAS].eqn); | ||
648 | if (err) | ||
649 | mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n", | ||
650 | priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err); | ||
651 | } | ||
652 | |||
653 | return 0; | 620 | return 0; |
654 | 621 | ||
655 | err_out_catas: | ||
656 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); | ||
657 | |||
658 | err_out_async: | 622 | err_out_async: |
659 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); | 623 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); |
660 | 624 | ||
@@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
675 | struct mlx4_priv *priv = mlx4_priv(dev); | 639 | struct mlx4_priv *priv = mlx4_priv(dev); |
676 | int i; | 640 | int i; |
677 | 641 | ||
678 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
679 | mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1, | ||
680 | priv->eq_table.eq[MLX4_EQ_CATAS].eqn); | ||
681 | |||
682 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, | 642 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, |
683 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 643 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
684 | 644 | ||
685 | mlx4_free_irqs(dev); | 645 | mlx4_free_irqs(dev); |
686 | 646 | ||
687 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 647 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
688 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 648 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
689 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
690 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); | ||
691 | 649 | ||
692 | mlx4_unmap_clr_int(dev); | 650 | mlx4_unmap_clr_int(dev); |
693 | 651 | ||
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 9ae951bf6aa6..be5d9e90ccf2 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c | |||
@@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev) | |||
142 | mlx4_add_device(intf, priv); | 142 | mlx4_add_device(intf, priv); |
143 | 143 | ||
144 | mutex_unlock(&intf_mutex); | 144 | mutex_unlock(&intf_mutex); |
145 | mlx4_start_catas_poll(dev); | ||
145 | 146 | ||
146 | return 0; | 147 | return 0; |
147 | } | 148 | } |
@@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) | |||
151 | struct mlx4_priv *priv = mlx4_priv(dev); | 152 | struct mlx4_priv *priv = mlx4_priv(dev); |
152 | struct mlx4_interface *intf; | 153 | struct mlx4_interface *intf; |
153 | 154 | ||
155 | mlx4_stop_catas_poll(dev); | ||
154 | mutex_lock(&intf_mutex); | 156 | mutex_lock(&intf_mutex); |
155 | 157 | ||
156 | list_for_each_entry(intf, &intf_list, list) | 158 | list_for_each_entry(intf, &intf_list, list) |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index a4f2e0475a71..4dc9dc19b716 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata = | |||
78 | static struct mlx4_profile default_profile = { | 78 | static struct mlx4_profile default_profile = { |
79 | .num_qp = 1 << 16, | 79 | .num_qp = 1 << 16, |
80 | .num_srq = 1 << 16, | 80 | .num_srq = 1 << 16, |
81 | .rdmarc_per_qp = 4, | 81 | .rdmarc_per_qp = 1 << 4, |
82 | .num_cq = 1 << 16, | 82 | .num_cq = 1 << 16, |
83 | .num_mcg = 1 << 13, | 83 | .num_mcg = 1 << 13, |
84 | .num_mpt = 1 << 17, | 84 | .num_mpt = 1 << 17, |
@@ -583,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) | |||
583 | goto err_pd_table_free; | 583 | goto err_pd_table_free; |
584 | } | 584 | } |
585 | 585 | ||
586 | mlx4_map_catas_buf(dev); | ||
587 | |||
588 | err = mlx4_init_eq_table(dev); | 586 | err = mlx4_init_eq_table(dev); |
589 | if (err) { | 587 | if (err) { |
590 | mlx4_err(dev, "Failed to initialize " | 588 | mlx4_err(dev, "Failed to initialize " |
591 | "event queue table, aborting.\n"); | 589 | "event queue table, aborting.\n"); |
592 | goto err_catas_buf; | 590 | goto err_mr_table_free; |
593 | } | 591 | } |
594 | 592 | ||
595 | err = mlx4_cmd_use_events(dev); | 593 | err = mlx4_cmd_use_events(dev); |
@@ -659,8 +657,7 @@ err_cmd_poll: | |||
659 | err_eq_table_free: | 657 | err_eq_table_free: |
660 | mlx4_cleanup_eq_table(dev); | 658 | mlx4_cleanup_eq_table(dev); |
661 | 659 | ||
662 | err_catas_buf: | 660 | err_mr_table_free: |
663 | mlx4_unmap_catas_buf(dev); | ||
664 | mlx4_cleanup_mr_table(dev); | 661 | mlx4_cleanup_mr_table(dev); |
665 | 662 | ||
666 | err_pd_table_free: | 663 | err_pd_table_free: |
@@ -836,9 +833,6 @@ err_cleanup: | |||
836 | mlx4_cleanup_cq_table(dev); | 833 | mlx4_cleanup_cq_table(dev); |
837 | mlx4_cmd_use_polling(dev); | 834 | mlx4_cmd_use_polling(dev); |
838 | mlx4_cleanup_eq_table(dev); | 835 | mlx4_cleanup_eq_table(dev); |
839 | |||
840 | mlx4_unmap_catas_buf(dev); | ||
841 | |||
842 | mlx4_cleanup_mr_table(dev); | 836 | mlx4_cleanup_mr_table(dev); |
843 | mlx4_cleanup_pd_table(dev); | 837 | mlx4_cleanup_pd_table(dev); |
844 | mlx4_cleanup_uar_table(dev); | 838 | mlx4_cleanup_uar_table(dev); |
@@ -885,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) | |||
885 | mlx4_cleanup_cq_table(dev); | 879 | mlx4_cleanup_cq_table(dev); |
886 | mlx4_cmd_use_polling(dev); | 880 | mlx4_cmd_use_polling(dev); |
887 | mlx4_cleanup_eq_table(dev); | 881 | mlx4_cleanup_eq_table(dev); |
888 | |||
889 | mlx4_unmap_catas_buf(dev); | ||
890 | |||
891 | mlx4_cleanup_mr_table(dev); | 882 | mlx4_cleanup_mr_table(dev); |
892 | mlx4_cleanup_pd_table(dev); | 883 | mlx4_cleanup_pd_table(dev); |
893 | 884 | ||
@@ -908,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) | |||
908 | } | 899 | } |
909 | } | 900 | } |
910 | 901 | ||
902 | int mlx4_restart_one(struct pci_dev *pdev) | ||
903 | { | ||
904 | mlx4_remove_one(pdev); | ||
905 | return mlx4_init_one(pdev, NULL); | ||
906 | } | ||
907 | |||
911 | static struct pci_device_id mlx4_pci_table[] = { | 908 | static struct pci_device_id mlx4_pci_table[] = { |
912 | { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ | 909 | { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ |
913 | { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ | 910 | { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ |
@@ -930,6 +927,10 @@ static int __init mlx4_init(void) | |||
930 | { | 927 | { |
931 | int ret; | 928 | int ret; |
932 | 929 | ||
930 | ret = mlx4_catas_init(); | ||
931 | if (ret) | ||
932 | return ret; | ||
933 | |||
933 | ret = pci_register_driver(&mlx4_driver); | 934 | ret = pci_register_driver(&mlx4_driver); |
934 | return ret < 0 ? ret : 0; | 935 | return ret < 0 ? ret : 0; |
935 | } | 936 | } |
@@ -937,6 +938,7 @@ static int __init mlx4_init(void) | |||
937 | static void __exit mlx4_cleanup(void) | 938 | static void __exit mlx4_cleanup(void) |
938 | { | 939 | { |
939 | pci_unregister_driver(&mlx4_driver); | 940 | pci_unregister_driver(&mlx4_driver); |
941 | mlx4_catas_cleanup(); | ||
940 | } | 942 | } |
941 | 943 | ||
942 | module_init(mlx4_init); | 944 | module_init(mlx4_init); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index d9c91a71fc87..be304a7c2c91 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -39,6 +39,7 @@ | |||
39 | 39 | ||
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/radix-tree.h> | 41 | #include <linux/radix-tree.h> |
42 | #include <linux/timer.h> | ||
42 | 43 | ||
43 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
44 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
@@ -67,7 +68,6 @@ enum { | |||
67 | enum { | 68 | enum { |
68 | MLX4_EQ_ASYNC, | 69 | MLX4_EQ_ASYNC, |
69 | MLX4_EQ_COMP, | 70 | MLX4_EQ_COMP, |
70 | MLX4_EQ_CATAS, | ||
71 | MLX4_NUM_EQ | 71 | MLX4_NUM_EQ |
72 | }; | 72 | }; |
73 | 73 | ||
@@ -248,7 +248,8 @@ struct mlx4_mcg_table { | |||
248 | 248 | ||
249 | struct mlx4_catas_err { | 249 | struct mlx4_catas_err { |
250 | u32 __iomem *map; | 250 | u32 __iomem *map; |
251 | int size; | 251 | struct timer_list timer; |
252 | struct list_head list; | ||
252 | }; | 253 | }; |
253 | 254 | ||
254 | struct mlx4_priv { | 255 | struct mlx4_priv { |
@@ -311,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev); | |||
311 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); | 312 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); |
312 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); | 313 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); |
313 | 314 | ||
314 | void mlx4_map_catas_buf(struct mlx4_dev *dev); | 315 | void mlx4_start_catas_poll(struct mlx4_dev *dev); |
315 | void mlx4_unmap_catas_buf(struct mlx4_dev *dev); | 316 | void mlx4_stop_catas_poll(struct mlx4_dev *dev); |
316 | 317 | int mlx4_catas_init(void); | |
318 | void mlx4_catas_cleanup(void); | ||
319 | int mlx4_restart_one(struct pci_dev *pdev); | ||
317 | int mlx4_register_device(struct mlx4_dev *dev); | 320 | int mlx4_register_device(struct mlx4_dev *dev); |
318 | void mlx4_unregister_device(struct mlx4_dev *dev); | 321 | void mlx4_unregister_device(struct mlx4_dev *dev); |
319 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, | 322 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, |
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig new file mode 100644 index 000000000000..b778ed71f636 --- /dev/null +++ b/drivers/uio/Kconfig | |||
@@ -0,0 +1,29 @@ | |||
1 | menu "Userspace I/O" | ||
2 | depends on !S390 | ||
3 | |||
4 | config UIO | ||
5 | tristate "Userspace I/O drivers" | ||
6 | default n | ||
7 | help | ||
8 | Enable this to allow the userspace driver core code to be | ||
9 | built. This code allows userspace programs easy access to | ||
10 | kernel interrupts and memory locations, allowing some drivers | ||
11 | to be written in userspace. Note that a small kernel driver | ||
12 | is also required for interrupt handling to work properly. | ||
13 | |||
14 | If you don't know what to do here, say N. | ||
15 | |||
16 | config UIO_CIF | ||
17 | tristate "generic Hilscher CIF Card driver" | ||
18 | depends on UIO && PCI | ||
19 | default n | ||
20 | help | ||
21 | Driver for Hilscher CIF DeviceNet and Profibus cards. This | ||
22 | driver requires a userspace component that handles all of the | ||
23 | heavy lifting and can be found at: | ||
24 | http://www.osadl.org/projects/downloads/UIO/user/cif-* | ||
25 | |||
26 | To compile this driver as a module, choose M here: the module | ||
27 | will be called uio_cif. | ||
28 | |||
29 | endmenu | ||
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile new file mode 100644 index 000000000000..7fecfb459da5 --- /dev/null +++ b/drivers/uio/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_UIO) += uio.o | ||
2 | obj-$(CONFIG_UIO_CIF) += uio_cif.o | ||
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c new file mode 100644 index 000000000000..865f32b63b5c --- /dev/null +++ b/drivers/uio/uio.c | |||
@@ -0,0 +1,701 @@ | |||
1 | /* | ||
2 | * drivers/uio/uio.c | ||
3 | * | ||
4 | * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> | ||
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | ||
6 | * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> | ||
7 | * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> | ||
8 | * | ||
9 | * Userspace IO | ||
10 | * | ||
11 | * Base Functions | ||
12 | * | ||
13 | * Licensed under the GPLv2 only. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/poll.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/idr.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/kobject.h> | ||
24 | #include <linux/uio_driver.h> | ||
25 | |||
26 | #define UIO_MAX_DEVICES 255 | ||
27 | |||
28 | struct uio_device { | ||
29 | struct module *owner; | ||
30 | struct device *dev; | ||
31 | int minor; | ||
32 | atomic_t event; | ||
33 | struct fasync_struct *async_queue; | ||
34 | wait_queue_head_t wait; | ||
35 | int vma_count; | ||
36 | struct uio_info *info; | ||
37 | struct kset map_attr_kset; | ||
38 | }; | ||
39 | |||
40 | static int uio_major; | ||
41 | static DEFINE_IDR(uio_idr); | ||
42 | static struct file_operations uio_fops; | ||
43 | |||
44 | /* UIO class infrastructure */ | ||
45 | static struct uio_class { | ||
46 | struct kref kref; | ||
47 | struct class *class; | ||
48 | } *uio_class; | ||
49 | |||
50 | /* | ||
51 | * attributes | ||
52 | */ | ||
53 | |||
54 | static struct attribute attr_addr = { | ||
55 | .name = "addr", | ||
56 | .mode = S_IRUGO, | ||
57 | }; | ||
58 | |||
59 | static struct attribute attr_size = { | ||
60 | .name = "size", | ||
61 | .mode = S_IRUGO, | ||
62 | }; | ||
63 | |||
64 | static struct attribute* map_attrs[] = { | ||
65 | &attr_addr, &attr_size, NULL | ||
66 | }; | ||
67 | |||
68 | static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, | ||
69 | char *buf) | ||
70 | { | ||
71 | struct uio_mem *mem = container_of(kobj, struct uio_mem, kobj); | ||
72 | |||
73 | if (strncmp(attr->name,"addr",4) == 0) | ||
74 | return sprintf(buf, "0x%lx\n", mem->addr); | ||
75 | |||
76 | if (strncmp(attr->name,"size",4) == 0) | ||
77 | return sprintf(buf, "0x%lx\n", mem->size); | ||
78 | |||
79 | return -ENODEV; | ||
80 | } | ||
81 | |||
82 | static void map_attr_release(struct kobject *kobj) | ||
83 | { | ||
84 | /* TODO ??? */ | ||
85 | } | ||
86 | |||
87 | static struct sysfs_ops map_attr_ops = { | ||
88 | .show = map_attr_show, | ||
89 | }; | ||
90 | |||
91 | static struct kobj_type map_attr_type = { | ||
92 | .release = map_attr_release, | ||
93 | .sysfs_ops = &map_attr_ops, | ||
94 | .default_attrs = map_attrs, | ||
95 | }; | ||
96 | |||
97 | static ssize_t show_name(struct device *dev, | ||
98 | struct device_attribute *attr, char *buf) | ||
99 | { | ||
100 | struct uio_device *idev = dev_get_drvdata(dev); | ||
101 | if (idev) | ||
102 | return sprintf(buf, "%s\n", idev->info->name); | ||
103 | else | ||
104 | return -ENODEV; | ||
105 | } | ||
106 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | ||
107 | |||
108 | static ssize_t show_version(struct device *dev, | ||
109 | struct device_attribute *attr, char *buf) | ||
110 | { | ||
111 | struct uio_device *idev = dev_get_drvdata(dev); | ||
112 | if (idev) | ||
113 | return sprintf(buf, "%s\n", idev->info->version); | ||
114 | else | ||
115 | return -ENODEV; | ||
116 | } | ||
117 | static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); | ||
118 | |||
119 | static ssize_t show_event(struct device *dev, | ||
120 | struct device_attribute *attr, char *buf) | ||
121 | { | ||
122 | struct uio_device *idev = dev_get_drvdata(dev); | ||
123 | if (idev) | ||
124 | return sprintf(buf, "%u\n", | ||
125 | (unsigned int)atomic_read(&idev->event)); | ||
126 | else | ||
127 | return -ENODEV; | ||
128 | } | ||
129 | static DEVICE_ATTR(event, S_IRUGO, show_event, NULL); | ||
130 | |||
131 | static struct attribute *uio_attrs[] = { | ||
132 | &dev_attr_name.attr, | ||
133 | &dev_attr_version.attr, | ||
134 | &dev_attr_event.attr, | ||
135 | NULL, | ||
136 | }; | ||
137 | |||
138 | static struct attribute_group uio_attr_grp = { | ||
139 | .attrs = uio_attrs, | ||
140 | }; | ||
141 | |||
142 | /* | ||
143 | * device functions | ||
144 | */ | ||
145 | static int uio_dev_add_attributes(struct uio_device *idev) | ||
146 | { | ||
147 | int ret; | ||
148 | int mi; | ||
149 | int map_found = 0; | ||
150 | struct uio_mem *mem; | ||
151 | |||
152 | ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp); | ||
153 | if (ret) | ||
154 | goto err_group; | ||
155 | |||
156 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | ||
157 | mem = &idev->info->mem[mi]; | ||
158 | if (mem->size == 0) | ||
159 | break; | ||
160 | if (!map_found) { | ||
161 | map_found = 1; | ||
162 | kobject_set_name(&idev->map_attr_kset.kobj,"maps"); | ||
163 | idev->map_attr_kset.ktype = &map_attr_type; | ||
164 | idev->map_attr_kset.kobj.parent = &idev->dev->kobj; | ||
165 | ret = kset_register(&idev->map_attr_kset); | ||
166 | if (ret) | ||
167 | goto err_remove_group; | ||
168 | } | ||
169 | kobject_init(&mem->kobj); | ||
170 | kobject_set_name(&mem->kobj,"map%d",mi); | ||
171 | mem->kobj.parent = &idev->map_attr_kset.kobj; | ||
172 | mem->kobj.kset = &idev->map_attr_kset; | ||
173 | ret = kobject_add(&mem->kobj); | ||
174 | if (ret) | ||
175 | goto err_remove_maps; | ||
176 | } | ||
177 | |||
178 | return 0; | ||
179 | |||
180 | err_remove_maps: | ||
181 | for (mi--; mi>=0; mi--) { | ||
182 | mem = &idev->info->mem[mi]; | ||
183 | kobject_unregister(&mem->kobj); | ||
184 | } | ||
185 | kset_unregister(&idev->map_attr_kset); /* Needed ? */ | ||
186 | err_remove_group: | ||
187 | sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); | ||
188 | err_group: | ||
189 | dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); | ||
190 | return ret; | ||
191 | } | ||
192 | |||
193 | static void uio_dev_del_attributes(struct uio_device *idev) | ||
194 | { | ||
195 | int mi; | ||
196 | struct uio_mem *mem; | ||
197 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | ||
198 | mem = &idev->info->mem[mi]; | ||
199 | if (mem->size == 0) | ||
200 | break; | ||
201 | kobject_unregister(&mem->kobj); | ||
202 | } | ||
203 | kset_unregister(&idev->map_attr_kset); | ||
204 | sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); | ||
205 | } | ||
206 | |||
207 | static int uio_get_minor(struct uio_device *idev) | ||
208 | { | ||
209 | static DEFINE_MUTEX(minor_lock); | ||
210 | int retval = -ENOMEM; | ||
211 | int id; | ||
212 | |||
213 | mutex_lock(&minor_lock); | ||
214 | if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) | ||
215 | goto exit; | ||
216 | |||
217 | retval = idr_get_new(&uio_idr, idev, &id); | ||
218 | if (retval < 0) { | ||
219 | if (retval == -EAGAIN) | ||
220 | retval = -ENOMEM; | ||
221 | goto exit; | ||
222 | } | ||
223 | idev->minor = id & MAX_ID_MASK; | ||
224 | exit: | ||
225 | mutex_unlock(&minor_lock); | ||
226 | return retval; | ||
227 | } | ||
228 | |||
229 | static void uio_free_minor(struct uio_device *idev) | ||
230 | { | ||
231 | idr_remove(&uio_idr, idev->minor); | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * uio_event_notify - trigger an interrupt event | ||
236 | * @info: UIO device capabilities | ||
237 | */ | ||
238 | void uio_event_notify(struct uio_info *info) | ||
239 | { | ||
240 | struct uio_device *idev = info->uio_dev; | ||
241 | |||
242 | atomic_inc(&idev->event); | ||
243 | wake_up_interruptible(&idev->wait); | ||
244 | kill_fasync(&idev->async_queue, SIGIO, POLL_IN); | ||
245 | } | ||
246 | EXPORT_SYMBOL_GPL(uio_event_notify); | ||
247 | |||
248 | /** | ||
249 | * uio_interrupt - hardware interrupt handler | ||
250 | * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer | ||
251 | * @dev_id: Pointer to the devices uio_device structure | ||
252 | */ | ||
253 | static irqreturn_t uio_interrupt(int irq, void *dev_id) | ||
254 | { | ||
255 | struct uio_device *idev = (struct uio_device *)dev_id; | ||
256 | irqreturn_t ret = idev->info->handler(irq, idev->info); | ||
257 | |||
258 | if (ret == IRQ_HANDLED) | ||
259 | uio_event_notify(idev->info); | ||
260 | |||
261 | return ret; | ||
262 | } | ||
263 | |||
264 | struct uio_listener { | ||
265 | struct uio_device *dev; | ||
266 | s32 event_count; | ||
267 | }; | ||
268 | |||
269 | static int uio_open(struct inode *inode, struct file *filep) | ||
270 | { | ||
271 | struct uio_device *idev; | ||
272 | struct uio_listener *listener; | ||
273 | int ret = 0; | ||
274 | |||
275 | idev = idr_find(&uio_idr, iminor(inode)); | ||
276 | if (!idev) | ||
277 | return -ENODEV; | ||
278 | |||
279 | listener = kmalloc(sizeof(*listener), GFP_KERNEL); | ||
280 | if (!listener) | ||
281 | return -ENOMEM; | ||
282 | |||
283 | listener->dev = idev; | ||
284 | listener->event_count = atomic_read(&idev->event); | ||
285 | filep->private_data = listener; | ||
286 | |||
287 | if (idev->info->open) { | ||
288 | if (!try_module_get(idev->owner)) | ||
289 | return -ENODEV; | ||
290 | ret = idev->info->open(idev->info, inode); | ||
291 | module_put(idev->owner); | ||
292 | } | ||
293 | |||
294 | if (ret) | ||
295 | kfree(listener); | ||
296 | |||
297 | return ret; | ||
298 | } | ||
299 | |||
300 | static int uio_fasync(int fd, struct file *filep, int on) | ||
301 | { | ||
302 | struct uio_listener *listener = filep->private_data; | ||
303 | struct uio_device *idev = listener->dev; | ||
304 | |||
305 | return fasync_helper(fd, filep, on, &idev->async_queue); | ||
306 | } | ||
307 | |||
308 | static int uio_release(struct inode *inode, struct file *filep) | ||
309 | { | ||
310 | int ret = 0; | ||
311 | struct uio_listener *listener = filep->private_data; | ||
312 | struct uio_device *idev = listener->dev; | ||
313 | |||
314 | if (idev->info->release) { | ||
315 | if (!try_module_get(idev->owner)) | ||
316 | return -ENODEV; | ||
317 | ret = idev->info->release(idev->info, inode); | ||
318 | module_put(idev->owner); | ||
319 | } | ||
320 | if (filep->f_flags & FASYNC) | ||
321 | ret = uio_fasync(-1, filep, 0); | ||
322 | kfree(listener); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | static unsigned int uio_poll(struct file *filep, poll_table *wait) | ||
327 | { | ||
328 | struct uio_listener *listener = filep->private_data; | ||
329 | struct uio_device *idev = listener->dev; | ||
330 | |||
331 | if (idev->info->irq == UIO_IRQ_NONE) | ||
332 | return -EIO; | ||
333 | |||
334 | poll_wait(filep, &idev->wait, wait); | ||
335 | if (listener->event_count != atomic_read(&idev->event)) | ||
336 | return POLLIN | POLLRDNORM; | ||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | static ssize_t uio_read(struct file *filep, char __user *buf, | ||
341 | size_t count, loff_t *ppos) | ||
342 | { | ||
343 | struct uio_listener *listener = filep->private_data; | ||
344 | struct uio_device *idev = listener->dev; | ||
345 | DECLARE_WAITQUEUE(wait, current); | ||
346 | ssize_t retval; | ||
347 | s32 event_count; | ||
348 | |||
349 | if (idev->info->irq == UIO_IRQ_NONE) | ||
350 | return -EIO; | ||
351 | |||
352 | if (count != sizeof(s32)) | ||
353 | return -EINVAL; | ||
354 | |||
355 | add_wait_queue(&idev->wait, &wait); | ||
356 | |||
357 | do { | ||
358 | set_current_state(TASK_INTERRUPTIBLE); | ||
359 | |||
360 | event_count = atomic_read(&idev->event); | ||
361 | if (event_count != listener->event_count) { | ||
362 | if (copy_to_user(buf, &event_count, count)) | ||
363 | retval = -EFAULT; | ||
364 | else { | ||
365 | listener->event_count = event_count; | ||
366 | retval = count; | ||
367 | } | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | if (filep->f_flags & O_NONBLOCK) { | ||
372 | retval = -EAGAIN; | ||
373 | break; | ||
374 | } | ||
375 | |||
376 | if (signal_pending(current)) { | ||
377 | retval = -ERESTARTSYS; | ||
378 | break; | ||
379 | } | ||
380 | schedule(); | ||
381 | } while (1); | ||
382 | |||
383 | __set_current_state(TASK_RUNNING); | ||
384 | remove_wait_queue(&idev->wait, &wait); | ||
385 | |||
386 | return retval; | ||
387 | } | ||
388 | |||
389 | static int uio_find_mem_index(struct vm_area_struct *vma) | ||
390 | { | ||
391 | int mi; | ||
392 | struct uio_device *idev = vma->vm_private_data; | ||
393 | |||
394 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { | ||
395 | if (idev->info->mem[mi].size == 0) | ||
396 | return -1; | ||
397 | if (vma->vm_pgoff == mi) | ||
398 | return mi; | ||
399 | } | ||
400 | return -1; | ||
401 | } | ||
402 | |||
403 | static void uio_vma_open(struct vm_area_struct *vma) | ||
404 | { | ||
405 | struct uio_device *idev = vma->vm_private_data; | ||
406 | idev->vma_count++; | ||
407 | } | ||
408 | |||
409 | static void uio_vma_close(struct vm_area_struct *vma) | ||
410 | { | ||
411 | struct uio_device *idev = vma->vm_private_data; | ||
412 | idev->vma_count--; | ||
413 | } | ||
414 | |||
415 | static struct page *uio_vma_nopage(struct vm_area_struct *vma, | ||
416 | unsigned long address, int *type) | ||
417 | { | ||
418 | struct uio_device *idev = vma->vm_private_data; | ||
419 | struct page* page = NOPAGE_SIGBUS; | ||
420 | |||
421 | int mi = uio_find_mem_index(vma); | ||
422 | if (mi < 0) | ||
423 | return page; | ||
424 | |||
425 | if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) | ||
426 | page = virt_to_page(idev->info->mem[mi].addr); | ||
427 | else | ||
428 | page = vmalloc_to_page((void*)idev->info->mem[mi].addr); | ||
429 | get_page(page); | ||
430 | if (type) | ||
431 | *type = VM_FAULT_MINOR; | ||
432 | return page; | ||
433 | } | ||
434 | |||
435 | static struct vm_operations_struct uio_vm_ops = { | ||
436 | .open = uio_vma_open, | ||
437 | .close = uio_vma_close, | ||
438 | .nopage = uio_vma_nopage, | ||
439 | }; | ||
440 | |||
441 | static int uio_mmap_physical(struct vm_area_struct *vma) | ||
442 | { | ||
443 | struct uio_device *idev = vma->vm_private_data; | ||
444 | int mi = uio_find_mem_index(vma); | ||
445 | if (mi < 0) | ||
446 | return -EINVAL; | ||
447 | |||
448 | vma->vm_flags |= VM_IO | VM_RESERVED; | ||
449 | |||
450 | return remap_pfn_range(vma, | ||
451 | vma->vm_start, | ||
452 | idev->info->mem[mi].addr >> PAGE_SHIFT, | ||
453 | vma->vm_end - vma->vm_start, | ||
454 | vma->vm_page_prot); | ||
455 | } | ||
456 | |||
457 | static int uio_mmap_logical(struct vm_area_struct *vma) | ||
458 | { | ||
459 | vma->vm_flags |= VM_RESERVED; | ||
460 | vma->vm_ops = &uio_vm_ops; | ||
461 | uio_vma_open(vma); | ||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static int uio_mmap(struct file *filep, struct vm_area_struct *vma) | ||
466 | { | ||
467 | struct uio_listener *listener = filep->private_data; | ||
468 | struct uio_device *idev = listener->dev; | ||
469 | int mi; | ||
470 | unsigned long requested_pages, actual_pages; | ||
471 | int ret = 0; | ||
472 | |||
473 | if (vma->vm_end < vma->vm_start) | ||
474 | return -EINVAL; | ||
475 | |||
476 | vma->vm_private_data = idev; | ||
477 | |||
478 | mi = uio_find_mem_index(vma); | ||
479 | if (mi < 0) | ||
480 | return -EINVAL; | ||
481 | |||
482 | requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
483 | actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; | ||
484 | if (requested_pages > actual_pages) | ||
485 | return -EINVAL; | ||
486 | |||
487 | if (idev->info->mmap) { | ||
488 | if (!try_module_get(idev->owner)) | ||
489 | return -ENODEV; | ||
490 | ret = idev->info->mmap(idev->info, vma); | ||
491 | module_put(idev->owner); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | switch (idev->info->mem[mi].memtype) { | ||
496 | case UIO_MEM_PHYS: | ||
497 | return uio_mmap_physical(vma); | ||
498 | case UIO_MEM_LOGICAL: | ||
499 | case UIO_MEM_VIRTUAL: | ||
500 | return uio_mmap_logical(vma); | ||
501 | default: | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | } | ||
505 | |||
506 | static struct file_operations uio_fops = { | ||
507 | .owner = THIS_MODULE, | ||
508 | .open = uio_open, | ||
509 | .release = uio_release, | ||
510 | .read = uio_read, | ||
511 | .mmap = uio_mmap, | ||
512 | .poll = uio_poll, | ||
513 | .fasync = uio_fasync, | ||
514 | }; | ||
515 | |||
516 | static int uio_major_init(void) | ||
517 | { | ||
518 | uio_major = register_chrdev(0, "uio", &uio_fops); | ||
519 | if (uio_major < 0) | ||
520 | return uio_major; | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | static void uio_major_cleanup(void) | ||
525 | { | ||
526 | unregister_chrdev(uio_major, "uio"); | ||
527 | } | ||
528 | |||
529 | static int init_uio_class(void) | ||
530 | { | ||
531 | int ret = 0; | ||
532 | |||
533 | if (uio_class != NULL) { | ||
534 | kref_get(&uio_class->kref); | ||
535 | goto exit; | ||
536 | } | ||
537 | |||
538 | /* This is the first time in here, set everything up properly */ | ||
539 | ret = uio_major_init(); | ||
540 | if (ret) | ||
541 | goto exit; | ||
542 | |||
543 | uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); | ||
544 | if (!uio_class) { | ||
545 | ret = -ENOMEM; | ||
546 | goto err_kzalloc; | ||
547 | } | ||
548 | |||
549 | kref_init(&uio_class->kref); | ||
550 | uio_class->class = class_create(THIS_MODULE, "uio"); | ||
551 | if (IS_ERR(uio_class->class)) { | ||
552 | ret = IS_ERR(uio_class->class); | ||
553 | printk(KERN_ERR "class_create failed for uio\n"); | ||
554 | goto err_class_create; | ||
555 | } | ||
556 | return 0; | ||
557 | |||
558 | err_class_create: | ||
559 | kfree(uio_class); | ||
560 | uio_class = NULL; | ||
561 | err_kzalloc: | ||
562 | uio_major_cleanup(); | ||
563 | exit: | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | static void release_uio_class(struct kref *kref) | ||
568 | { | ||
569 | /* Ok, we cheat as we know we only have one uio_class */ | ||
570 | class_destroy(uio_class->class); | ||
571 | kfree(uio_class); | ||
572 | uio_major_cleanup(); | ||
573 | uio_class = NULL; | ||
574 | } | ||
575 | |||
576 | static void uio_class_destroy(void) | ||
577 | { | ||
578 | if (uio_class) | ||
579 | kref_put(&uio_class->kref, release_uio_class); | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * uio_register_device - register a new userspace IO device | ||
584 | * @owner: module that creates the new device | ||
585 | * @parent: parent device | ||
586 | * @info: UIO device capabilities | ||
587 | * | ||
588 | * returns zero on success or a negative error code. | ||
589 | */ | ||
590 | int __uio_register_device(struct module *owner, | ||
591 | struct device *parent, | ||
592 | struct uio_info *info) | ||
593 | { | ||
594 | struct uio_device *idev; | ||
595 | int ret = 0; | ||
596 | |||
597 | if (!parent || !info || !info->name || !info->version) | ||
598 | return -EINVAL; | ||
599 | |||
600 | info->uio_dev = NULL; | ||
601 | |||
602 | ret = init_uio_class(); | ||
603 | if (ret) | ||
604 | return ret; | ||
605 | |||
606 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); | ||
607 | if (!idev) { | ||
608 | ret = -ENOMEM; | ||
609 | goto err_kzalloc; | ||
610 | } | ||
611 | |||
612 | idev->owner = owner; | ||
613 | idev->info = info; | ||
614 | init_waitqueue_head(&idev->wait); | ||
615 | atomic_set(&idev->event, 0); | ||
616 | |||
617 | ret = uio_get_minor(idev); | ||
618 | if (ret) | ||
619 | goto err_get_minor; | ||
620 | |||
621 | idev->dev = device_create(uio_class->class, parent, | ||
622 | MKDEV(uio_major, idev->minor), | ||
623 | "uio%d", idev->minor); | ||
624 | if (IS_ERR(idev->dev)) { | ||
625 | printk(KERN_ERR "UIO: device register failed\n"); | ||
626 | ret = PTR_ERR(idev->dev); | ||
627 | goto err_device_create; | ||
628 | } | ||
629 | dev_set_drvdata(idev->dev, idev); | ||
630 | |||
631 | ret = uio_dev_add_attributes(idev); | ||
632 | if (ret) | ||
633 | goto err_uio_dev_add_attributes; | ||
634 | |||
635 | info->uio_dev = idev; | ||
636 | |||
637 | if (idev->info->irq >= 0) { | ||
638 | ret = request_irq(idev->info->irq, uio_interrupt, | ||
639 | idev->info->irq_flags, idev->info->name, idev); | ||
640 | if (ret) | ||
641 | goto err_request_irq; | ||
642 | } | ||
643 | |||
644 | return 0; | ||
645 | |||
646 | err_request_irq: | ||
647 | uio_dev_del_attributes(idev); | ||
648 | err_uio_dev_add_attributes: | ||
649 | device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); | ||
650 | err_device_create: | ||
651 | uio_free_minor(idev); | ||
652 | err_get_minor: | ||
653 | kfree(idev); | ||
654 | err_kzalloc: | ||
655 | uio_class_destroy(); | ||
656 | return ret; | ||
657 | } | ||
658 | EXPORT_SYMBOL_GPL(__uio_register_device); | ||
659 | |||
660 | /** | ||
661 | * uio_unregister_device - unregister a industrial IO device | ||
662 | * @info: UIO device capabilities | ||
663 | * | ||
664 | */ | ||
665 | void uio_unregister_device(struct uio_info *info) | ||
666 | { | ||
667 | struct uio_device *idev; | ||
668 | |||
669 | if (!info || !info->uio_dev) | ||
670 | return; | ||
671 | |||
672 | idev = info->uio_dev; | ||
673 | |||
674 | uio_free_minor(idev); | ||
675 | |||
676 | if (info->irq >= 0) | ||
677 | free_irq(info->irq, idev); | ||
678 | |||
679 | uio_dev_del_attributes(idev); | ||
680 | |||
681 | dev_set_drvdata(idev->dev, NULL); | ||
682 | device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); | ||
683 | kfree(idev); | ||
684 | uio_class_destroy(); | ||
685 | |||
686 | return; | ||
687 | } | ||
688 | EXPORT_SYMBOL_GPL(uio_unregister_device); | ||
689 | |||
690 | static int __init uio_init(void) | ||
691 | { | ||
692 | return 0; | ||
693 | } | ||
694 | |||
695 | static void __exit uio_exit(void) | ||
696 | { | ||
697 | } | ||
698 | |||
699 | module_init(uio_init) | ||
700 | module_exit(uio_exit) | ||
701 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c new file mode 100644 index 000000000000..838bae460831 --- /dev/null +++ b/drivers/uio/uio_cif.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * UIO Hilscher CIF card driver | ||
3 | * | ||
4 | * (C) 2007 Hans J. Koch <hjk@linutronix.de> | ||
5 | * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de> | ||
6 | * | ||
7 | * Licensed under GPL version 2 only. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/device.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/uio_driver.h> | ||
15 | |||
16 | #include <asm/io.h> | ||
17 | |||
18 | #ifndef PCI_DEVICE_ID_PLX_9030 | ||
19 | #define PCI_DEVICE_ID_PLX_9030 0x9030 | ||
20 | #endif | ||
21 | |||
22 | #define PLX9030_INTCSR 0x4C | ||
23 | #define INTSCR_INT1_ENABLE 0x01 | ||
24 | #define INTSCR_INT1_STATUS 0x04 | ||
25 | #define INT1_ENABLED_AND_ACTIVE (INTSCR_INT1_ENABLE | INTSCR_INT1_STATUS) | ||
26 | |||
27 | #define PCI_SUBVENDOR_ID_PEP 0x1518 | ||
28 | #define CIF_SUBDEVICE_PROFIBUS 0x430 | ||
29 | #define CIF_SUBDEVICE_DEVICENET 0x432 | ||
30 | |||
31 | |||
32 | static irqreturn_t hilscher_handler(int irq, struct uio_info *dev_info) | ||
33 | { | ||
34 | void __iomem *plx_intscr = dev_info->mem[0].internal_addr | ||
35 | + PLX9030_INTCSR; | ||
36 | |||
37 | if ((ioread8(plx_intscr) & INT1_ENABLED_AND_ACTIVE) | ||
38 | != INT1_ENABLED_AND_ACTIVE) | ||
39 | return IRQ_NONE; | ||
40 | |||
41 | /* Disable interrupt */ | ||
42 | iowrite8(ioread8(plx_intscr) & ~INTSCR_INT1_ENABLE, plx_intscr); | ||
43 | return IRQ_HANDLED; | ||
44 | } | ||
45 | |||
46 | static int __devinit hilscher_pci_probe(struct pci_dev *dev, | ||
47 | const struct pci_device_id *id) | ||
48 | { | ||
49 | struct uio_info *info; | ||
50 | |||
51 | info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); | ||
52 | if (!info) | ||
53 | return -ENOMEM; | ||
54 | |||
55 | if (pci_enable_device(dev)) | ||
56 | goto out_free; | ||
57 | |||
58 | if (pci_request_regions(dev, "hilscher")) | ||
59 | goto out_disable; | ||
60 | |||
61 | info->mem[0].addr = pci_resource_start(dev, 0); | ||
62 | if (!info->mem[0].addr) | ||
63 | goto out_release; | ||
64 | info->mem[0].internal_addr = ioremap(pci_resource_start(dev, 0), | ||
65 | pci_resource_len(dev, 0)); | ||
66 | if (!info->mem[0].internal_addr) | ||
67 | goto out_release; | ||
68 | |||
69 | info->mem[0].size = pci_resource_len(dev, 0); | ||
70 | info->mem[0].memtype = UIO_MEM_PHYS; | ||
71 | info->mem[1].addr = pci_resource_start(dev, 2); | ||
72 | info->mem[1].size = pci_resource_len(dev, 2); | ||
73 | info->mem[1].memtype = UIO_MEM_PHYS; | ||
74 | switch (id->subdevice) { | ||
75 | case CIF_SUBDEVICE_PROFIBUS: | ||
76 | info->name = "CIF_Profibus"; | ||
77 | break; | ||
78 | case CIF_SUBDEVICE_DEVICENET: | ||
79 | info->name = "CIF_Devicenet"; | ||
80 | break; | ||
81 | default: | ||
82 | info->name = "CIF_???"; | ||
83 | } | ||
84 | info->version = "0.0.1"; | ||
85 | info->irq = dev->irq; | ||
86 | info->irq_flags = IRQF_DISABLED | IRQF_SHARED; | ||
87 | info->handler = hilscher_handler; | ||
88 | |||
89 | if (uio_register_device(&dev->dev, info)) | ||
90 | goto out_unmap; | ||
91 | |||
92 | pci_set_drvdata(dev, info); | ||
93 | |||
94 | return 0; | ||
95 | out_unmap: | ||
96 | iounmap(info->mem[0].internal_addr); | ||
97 | out_release: | ||
98 | pci_release_regions(dev); | ||
99 | out_disable: | ||
100 | pci_disable_device(dev); | ||
101 | out_free: | ||
102 | kfree (info); | ||
103 | return -ENODEV; | ||
104 | } | ||
105 | |||
106 | static void hilscher_pci_remove(struct pci_dev *dev) | ||
107 | { | ||
108 | struct uio_info *info = pci_get_drvdata(dev); | ||
109 | |||
110 | uio_unregister_device(info); | ||
111 | pci_release_regions(dev); | ||
112 | pci_disable_device(dev); | ||
113 | pci_set_drvdata(dev, NULL); | ||
114 | iounmap(info->mem[0].internal_addr); | ||
115 | |||
116 | kfree (info); | ||
117 | } | ||
118 | |||
119 | static struct pci_device_id hilscher_pci_ids[] = { | ||
120 | { | ||
121 | .vendor = PCI_VENDOR_ID_PLX, | ||
122 | .device = PCI_DEVICE_ID_PLX_9030, | ||
123 | .subvendor = PCI_SUBVENDOR_ID_PEP, | ||
124 | .subdevice = CIF_SUBDEVICE_PROFIBUS, | ||
125 | }, | ||
126 | { | ||
127 | .vendor = PCI_VENDOR_ID_PLX, | ||
128 | .device = PCI_DEVICE_ID_PLX_9030, | ||
129 | .subvendor = PCI_SUBVENDOR_ID_PEP, | ||
130 | .subdevice = CIF_SUBDEVICE_DEVICENET, | ||
131 | }, | ||
132 | { 0, } | ||
133 | }; | ||
134 | |||
135 | static struct pci_driver hilscher_pci_driver = { | ||
136 | .name = "hilscher", | ||
137 | .id_table = hilscher_pci_ids, | ||
138 | .probe = hilscher_pci_probe, | ||
139 | .remove = hilscher_pci_remove, | ||
140 | }; | ||
141 | |||
142 | static int __init hilscher_init_module(void) | ||
143 | { | ||
144 | return pci_register_driver(&hilscher_pci_driver); | ||
145 | } | ||
146 | |||
147 | static void __exit hilscher_exit_module(void) | ||
148 | { | ||
149 | pci_unregister_driver(&hilscher_pci_driver); | ||
150 | } | ||
151 | |||
152 | module_init(hilscher_init_module); | ||
153 | module_exit(hilscher_exit_module); | ||
154 | |||
155 | MODULE_LICENSE("GPL v2"); | ||
156 | MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger"); | ||