diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-29 18:18:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-29 18:18:22 -0400 |
commit | 9a69d1aeccf169d9a1e442c07d3a6e87f06a7b49 (patch) | |
tree | 5597011c3595867bf0e073b8f4bdffefe9238a10 /drivers/infiniband | |
parent | c0341b0f47722fbe5ab45f436fc6ddc1c58c0a6f (diff) | |
parent | 3d27b00457167103fb9f7e23fc2454c801a6b8f0 (diff) |
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (33 commits)
IB/ipath: Fix lockdep error upon "ifconfig ibN down"
IB/ipath: Fix races with ib_resize_cq()
IB/ipath: Support new PCIE device, QLE7142
IB/ipath: Set CPU affinity early
IB/ipath: Fix EEPROM read when driver is compiled with -Os
IB/ipath: Fix and recover TXE piobuf and PBC parity errors
IB/ipath: Change HT CRC message to indicate how to resolve problem
IB/ipath: Clean up module exit code
IB/ipath: Call mtrr_del with correct arguments
IB/ipath: Flush RWQEs if access error or invalid error seen
IB/ipath: Improved support for PowerPC
IB/ipath: Drop unnecessary "(void *)" casts
IB/ipath: Support multiple simultaneous devices of different types
IB/ipath: Fix mismatch in shifts and masks for printing debug info
IB/ipath: Fix compiler warnings and errors on non-x86_64 systems
IB/ipath: Print more informative parity error messages
IB/ipath: Ensure that PD of MR matches PD of QP checking the Rkey
IB/ipath: RC and UC should validate SLID and DLID
IB/ipath: Only allow complete writes to flash
IB/ipath: Count SRQs properly
...
Diffstat (limited to 'drivers/infiniband')
38 files changed, 1973 insertions, 884 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c index 08f46c83a3a4..3aae4978e1cb 100644 --- a/drivers/infiniband/hw/amso1100/c2_ae.c +++ b/drivers/infiniband/hw/amso1100/c2_ae.c | |||
@@ -197,7 +197,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) | |||
197 | "resource=%x, qp_state=%s\n", | 197 | "resource=%x, qp_state=%s\n", |
198 | __FUNCTION__, | 198 | __FUNCTION__, |
199 | to_event_str(event_id), | 199 | to_event_str(event_id), |
200 | be64_to_cpu(wr->ae.ae_generic.user_context), | 200 | (unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context), |
201 | be32_to_cpu(wr->ae.ae_generic.resource_type), | 201 | be32_to_cpu(wr->ae.ae_generic.resource_type), |
202 | be32_to_cpu(wr->ae.ae_generic.resource), | 202 | be32_to_cpu(wr->ae.ae_generic.resource), |
203 | to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); | 203 | to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); |
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c index 1d2529992c0c..028a60bbfca9 100644 --- a/drivers/infiniband/hw/amso1100/c2_alloc.c +++ b/drivers/infiniband/hw/amso1100/c2_alloc.c | |||
@@ -115,7 +115,7 @@ u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, | |||
115 | ((unsigned long) &(head->shared_ptr[mqsp]) - | 115 | ((unsigned long) &(head->shared_ptr[mqsp]) - |
116 | (unsigned long) head); | 116 | (unsigned long) head); |
117 | pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__, | 117 | pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__, |
118 | &(head->shared_ptr[mqsp]), (u64)*dma_addr); | 118 | &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr); |
119 | return &(head->shared_ptr[mqsp]); | 119 | return &(head->shared_ptr[mqsp]); |
120 | } | 120 | } |
121 | return NULL; | 121 | return NULL; |
diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/infiniband/hw/amso1100/c2_cm.c index 485254efdd1e..75b93e9b8810 100644 --- a/drivers/infiniband/hw/amso1100/c2_cm.c +++ b/drivers/infiniband/hw/amso1100/c2_cm.c | |||
@@ -302,7 +302,7 @@ int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
302 | vq_req = vq_req_alloc(c2dev); | 302 | vq_req = vq_req_alloc(c2dev); |
303 | if (!vq_req) { | 303 | if (!vq_req) { |
304 | err = -ENOMEM; | 304 | err = -ENOMEM; |
305 | goto bail1; | 305 | goto bail0; |
306 | } | 306 | } |
307 | vq_req->qp = qp; | 307 | vq_req->qp = qp; |
308 | vq_req->cm_id = cm_id; | 308 | vq_req->cm_id = cm_id; |
@@ -311,7 +311,7 @@ int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
311 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | 311 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); |
312 | if (!wr) { | 312 | if (!wr) { |
313 | err = -ENOMEM; | 313 | err = -ENOMEM; |
314 | goto bail2; | 314 | goto bail1; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* Build the WR */ | 317 | /* Build the WR */ |
@@ -331,7 +331,7 @@ int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
331 | /* Validate private_data length */ | 331 | /* Validate private_data length */ |
332 | if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { | 332 | if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { |
333 | err = -EINVAL; | 333 | err = -EINVAL; |
334 | goto bail2; | 334 | goto bail1; |
335 | } | 335 | } |
336 | 336 | ||
337 | if (iw_param->private_data) { | 337 | if (iw_param->private_data) { |
@@ -348,19 +348,19 @@ int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
348 | err = vq_send_wr(c2dev, (union c2wr *) wr); | 348 | err = vq_send_wr(c2dev, (union c2wr *) wr); |
349 | if (err) { | 349 | if (err) { |
350 | vq_req_put(c2dev, vq_req); | 350 | vq_req_put(c2dev, vq_req); |
351 | goto bail2; | 351 | goto bail1; |
352 | } | 352 | } |
353 | 353 | ||
354 | /* Wait for reply from adapter */ | 354 | /* Wait for reply from adapter */ |
355 | err = vq_wait_for_reply(c2dev, vq_req); | 355 | err = vq_wait_for_reply(c2dev, vq_req); |
356 | if (err) | 356 | if (err) |
357 | goto bail2; | 357 | goto bail1; |
358 | 358 | ||
359 | /* Check that reply is present */ | 359 | /* Check that reply is present */ |
360 | reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; | 360 | reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; |
361 | if (!reply) { | 361 | if (!reply) { |
362 | err = -ENOMEM; | 362 | err = -ENOMEM; |
363 | goto bail2; | 363 | goto bail1; |
364 | } | 364 | } |
365 | 365 | ||
366 | err = c2_errno(reply); | 366 | err = c2_errno(reply); |
@@ -368,9 +368,8 @@ int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
368 | 368 | ||
369 | if (!err) | 369 | if (!err) |
370 | c2_set_qp_state(qp, C2_QP_STATE_RTS); | 370 | c2_set_qp_state(qp, C2_QP_STATE_RTS); |
371 | bail2: | ||
372 | kfree(wr); | ||
373 | bail1: | 371 | bail1: |
372 | kfree(wr); | ||
374 | vq_req_free(c2dev, vq_req); | 373 | vq_req_free(c2dev, vq_req); |
375 | bail0: | 374 | bail0: |
376 | if (err) { | 375 | if (err) { |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index dd6af551108b..da98d9f71429 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -390,14 +390,18 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd, | |||
390 | } | 390 | } |
391 | 391 | ||
392 | mr = kmalloc(sizeof(*mr), GFP_KERNEL); | 392 | mr = kmalloc(sizeof(*mr), GFP_KERNEL); |
393 | if (!mr) | 393 | if (!mr) { |
394 | vfree(page_list); | ||
394 | return ERR_PTR(-ENOMEM); | 395 | return ERR_PTR(-ENOMEM); |
396 | } | ||
395 | 397 | ||
396 | mr->pd = to_c2pd(ib_pd); | 398 | mr->pd = to_c2pd(ib_pd); |
397 | pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " | 399 | pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " |
398 | "*iova_start %llx, first pa %llx, last pa %llx\n", | 400 | "*iova_start %llx, first pa %llx, last pa %llx\n", |
399 | __FUNCTION__, page_shift, pbl_depth, total_len, | 401 | __FUNCTION__, page_shift, pbl_depth, total_len, |
400 | *iova_start, page_list[0], page_list[pbl_depth-1]); | 402 | (unsigned long long) *iova_start, |
403 | (unsigned long long) page_list[0], | ||
404 | (unsigned long long) page_list[pbl_depth-1]); | ||
401 | err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, | 405 | err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, |
402 | (1 << page_shift), pbl_depth, | 406 | (1 << page_shift), pbl_depth, |
403 | total_len, 0, iova_start, | 407 | total_len, 0, iova_start, |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index f49a32b7a8f6..e37c5688c214 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -527,7 +527,7 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
527 | DMA_FROM_DEVICE); | 527 | DMA_FROM_DEVICE); |
528 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); | 528 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); |
529 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, | 529 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, |
530 | (u64)c2dev->rep_vq.host_dma); | 530 | (unsigned long long) c2dev->rep_vq.host_dma); |
531 | c2_mq_rep_init(&c2dev->rep_vq, | 531 | c2_mq_rep_init(&c2dev->rep_vq, |
532 | 1, | 532 | 1, |
533 | qsize, | 533 | qsize, |
@@ -550,7 +550,7 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
550 | DMA_FROM_DEVICE); | 550 | DMA_FROM_DEVICE); |
551 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); | 551 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); |
552 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, | 552 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, |
553 | (u64)c2dev->rep_vq.host_dma); | 553 | (unsigned long long) c2dev->rep_vq.host_dma); |
554 | c2_mq_rep_init(&c2dev->aeq, | 554 | c2_mq_rep_init(&c2dev->aeq, |
555 | 2, | 555 | 2, |
556 | qsize, | 556 | qsize, |
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h index f577905e3aca..54139d398181 100644 --- a/drivers/infiniband/hw/ipath/ipath_common.h +++ b/drivers/infiniband/hw/ipath/ipath_common.h | |||
@@ -141,8 +141,9 @@ struct infinipath_stats { | |||
141 | * packets if ipath not configured, etc.) | 141 | * packets if ipath not configured, etc.) |
142 | */ | 142 | */ |
143 | __u64 sps_krdrops; | 143 | __u64 sps_krdrops; |
144 | __u64 sps_txeparity; /* PIO buffer parity error, recovered */ | ||
144 | /* pad for future growth */ | 145 | /* pad for future growth */ |
145 | __u64 __sps_pad[46]; | 146 | __u64 __sps_pad[45]; |
146 | }; | 147 | }; |
147 | 148 | ||
148 | /* | 149 | /* |
@@ -185,6 +186,9 @@ typedef enum _ipath_ureg { | |||
185 | #define IPATH_RUNTIME_PCIE 0x2 | 186 | #define IPATH_RUNTIME_PCIE 0x2 |
186 | #define IPATH_RUNTIME_FORCE_WC_ORDER 0x4 | 187 | #define IPATH_RUNTIME_FORCE_WC_ORDER 0x4 |
187 | #define IPATH_RUNTIME_RCVHDR_COPY 0x8 | 188 | #define IPATH_RUNTIME_RCVHDR_COPY 0x8 |
189 | #define IPATH_RUNTIME_MASTER 0x10 | ||
190 | #define IPATH_RUNTIME_PBC_REWRITE 0x20 | ||
191 | #define IPATH_RUNTIME_LOOSE_DMA_ALIGN 0x40 | ||
188 | 192 | ||
189 | /* | 193 | /* |
190 | * This structure is returned by ipath_userinit() immediately after | 194 | * This structure is returned by ipath_userinit() immediately after |
@@ -202,7 +206,8 @@ struct ipath_base_info { | |||
202 | /* version of software, for feature checking. */ | 206 | /* version of software, for feature checking. */ |
203 | __u32 spi_sw_version; | 207 | __u32 spi_sw_version; |
204 | /* InfiniPath port assigned, goes into sent packets */ | 208 | /* InfiniPath port assigned, goes into sent packets */ |
205 | __u32 spi_port; | 209 | __u16 spi_port; |
210 | __u16 spi_subport; | ||
206 | /* | 211 | /* |
207 | * IB MTU, packets IB data must be less than this. | 212 | * IB MTU, packets IB data must be less than this. |
208 | * The MTU is in bytes, and will be a multiple of 4 bytes. | 213 | * The MTU is in bytes, and will be a multiple of 4 bytes. |
@@ -218,7 +223,7 @@ struct ipath_base_info { | |||
218 | __u32 spi_tidcnt; | 223 | __u32 spi_tidcnt; |
219 | /* size of the TID Eager list in infinipath, in entries */ | 224 | /* size of the TID Eager list in infinipath, in entries */ |
220 | __u32 spi_tidegrcnt; | 225 | __u32 spi_tidegrcnt; |
221 | /* size of a single receive header queue entry. */ | 226 | /* size of a single receive header queue entry in words. */ |
222 | __u32 spi_rcvhdrent_size; | 227 | __u32 spi_rcvhdrent_size; |
223 | /* | 228 | /* |
224 | * Count of receive header queue entries allocated. | 229 | * Count of receive header queue entries allocated. |
@@ -310,6 +315,12 @@ struct ipath_base_info { | |||
310 | __u32 spi_filler_for_align; | 315 | __u32 spi_filler_for_align; |
311 | /* address of readonly memory copy of the rcvhdrq tail register. */ | 316 | /* address of readonly memory copy of the rcvhdrq tail register. */ |
312 | __u64 spi_rcvhdr_tailaddr; | 317 | __u64 spi_rcvhdr_tailaddr; |
318 | |||
319 | /* shared memory pages for subports if IPATH_RUNTIME_MASTER is set */ | ||
320 | __u64 spi_subport_uregbase; | ||
321 | __u64 spi_subport_rcvegrbuf; | ||
322 | __u64 spi_subport_rcvhdr_base; | ||
323 | |||
313 | } __attribute__ ((aligned(8))); | 324 | } __attribute__ ((aligned(8))); |
314 | 325 | ||
315 | 326 | ||
@@ -328,12 +339,12 @@ struct ipath_base_info { | |||
328 | 339 | ||
329 | /* | 340 | /* |
330 | * Minor version differences are always compatible | 341 | * Minor version differences are always compatible |
331 | * a within a major version, however if if user software is larger | 342 | * a within a major version, however if user software is larger |
332 | * than driver software, some new features and/or structure fields | 343 | * than driver software, some new features and/or structure fields |
333 | * may not be implemented; the user code must deal with this if it | 344 | * may not be implemented; the user code must deal with this if it |
334 | * cares, or it must abort after initialization reports the difference | 345 | * cares, or it must abort after initialization reports the difference. |
335 | */ | 346 | */ |
336 | #define IPATH_USER_SWMINOR 2 | 347 | #define IPATH_USER_SWMINOR 3 |
337 | 348 | ||
338 | #define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR) | 349 | #define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR) |
339 | 350 | ||
@@ -379,7 +390,16 @@ struct ipath_user_info { | |||
379 | */ | 390 | */ |
380 | __u32 spu_rcvhdrsize; | 391 | __u32 spu_rcvhdrsize; |
381 | 392 | ||
382 | __u64 spu_unused; /* kept for compatible layout */ | 393 | /* |
394 | * If two or more processes wish to share a port, each process | ||
395 | * must set the spu_subport_cnt and spu_subport_id to the same | ||
396 | * values. The only restriction on the spu_subport_id is that | ||
397 | * it be unique for a given node. | ||
398 | */ | ||
399 | __u16 spu_subport_cnt; | ||
400 | __u16 spu_subport_id; | ||
401 | |||
402 | __u32 spu_unused; /* kept for compatible layout */ | ||
383 | 403 | ||
384 | /* | 404 | /* |
385 | * address of struct base_info to write to | 405 | * address of struct base_info to write to |
@@ -392,19 +412,25 @@ struct ipath_user_info { | |||
392 | 412 | ||
393 | #define IPATH_CMD_MIN 16 | 413 | #define IPATH_CMD_MIN 16 |
394 | 414 | ||
395 | #define IPATH_CMD_USER_INIT 16 /* set up userspace */ | 415 | #define __IPATH_CMD_USER_INIT 16 /* old set up userspace (for old user code) */ |
396 | #define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */ | 416 | #define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */ |
397 | #define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */ | 417 | #define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */ |
398 | #define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */ | 418 | #define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */ |
399 | #define IPATH_CMD_TID_FREE 20 /* free expected TID entries */ | 419 | #define IPATH_CMD_TID_FREE 20 /* free expected TID entries */ |
400 | #define IPATH_CMD_SET_PART_KEY 21 /* add partition key */ | 420 | #define IPATH_CMD_SET_PART_KEY 21 /* add partition key */ |
421 | #define IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes */ | ||
422 | #define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */ | ||
423 | #define IPATH_CMD_USER_INIT 24 /* set up userspace */ | ||
401 | 424 | ||
402 | #define IPATH_CMD_MAX 21 | 425 | #define IPATH_CMD_MAX 24 |
403 | 426 | ||
404 | struct ipath_port_info { | 427 | struct ipath_port_info { |
405 | __u32 num_active; /* number of active units */ | 428 | __u32 num_active; /* number of active units */ |
406 | __u32 unit; /* unit (chip) assigned to caller */ | 429 | __u32 unit; /* unit (chip) assigned to caller */ |
407 | __u32 port; /* port on unit assigned to caller */ | 430 | __u16 port; /* port on unit assigned to caller */ |
431 | __u16 subport; /* subport on unit assigned to caller */ | ||
432 | __u16 num_ports; /* number of ports available on unit */ | ||
433 | __u16 num_subports; /* number of subport slaves opened on port */ | ||
408 | }; | 434 | }; |
409 | 435 | ||
410 | struct ipath_tid_info { | 436 | struct ipath_tid_info { |
@@ -435,6 +461,8 @@ struct ipath_cmd { | |||
435 | __u32 recv_ctrl; | 461 | __u32 recv_ctrl; |
436 | /* partition key to set */ | 462 | /* partition key to set */ |
437 | __u16 part_key; | 463 | __u16 part_key; |
464 | /* user address of __u32 bitmask of active slaves */ | ||
465 | __u64 slave_mask_addr; | ||
438 | } cmd; | 466 | } cmd; |
439 | }; | 467 | }; |
440 | 468 | ||
@@ -596,6 +624,10 @@ struct infinipath_counters { | |||
596 | 624 | ||
597 | /* K_PktFlags bits */ | 625 | /* K_PktFlags bits */ |
598 | #define INFINIPATH_KPF_INTR 0x1 | 626 | #define INFINIPATH_KPF_INTR 0x1 |
627 | #define INFINIPATH_KPF_SUBPORT_MASK 0x3 | ||
628 | #define INFINIPATH_KPF_SUBPORT_SHIFT 1 | ||
629 | |||
630 | #define INFINIPATH_MAX_SUBPORT 4 | ||
599 | 631 | ||
600 | /* SendPIO per-buffer control */ | 632 | /* SendPIO per-buffer control */ |
601 | #define INFINIPATH_SP_TEST 0x40 | 633 | #define INFINIPATH_SP_TEST 0x40 |
@@ -610,7 +642,7 @@ struct ipath_header { | |||
610 | /* | 642 | /* |
611 | * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset - | 643 | * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset - |
612 | * 14 bits before ECO change ~28 Dec 03. After that, Vers 4, | 644 | * 14 bits before ECO change ~28 Dec 03. After that, Vers 4, |
613 | * Port 3, TID 11, offset 14. | 645 | * Port 4, TID 11, offset 13. |
614 | */ | 646 | */ |
615 | __le32 ver_port_tid_offset; | 647 | __le32 ver_port_tid_offset; |
616 | __le16 chksum; | 648 | __le16 chksum; |
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 049221bc590e..87462e0cb4d2 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -46,7 +46,7 @@ | |||
46 | */ | 46 | */ |
47 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | 47 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) |
48 | { | 48 | { |
49 | struct ipath_cq_wc *wc = cq->queue; | 49 | struct ipath_cq_wc *wc; |
50 | unsigned long flags; | 50 | unsigned long flags; |
51 | u32 head; | 51 | u32 head; |
52 | u32 next; | 52 | u32 next; |
@@ -57,6 +57,7 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
57 | * Note that the head pointer might be writable by user processes. | 57 | * Note that the head pointer might be writable by user processes. |
58 | * Take care to verify it is a sane value. | 58 | * Take care to verify it is a sane value. |
59 | */ | 59 | */ |
60 | wc = cq->queue; | ||
60 | head = wc->head; | 61 | head = wc->head; |
61 | if (head >= (unsigned) cq->ibcq.cqe) { | 62 | if (head >= (unsigned) cq->ibcq.cqe) { |
62 | head = cq->ibcq.cqe; | 63 | head = cq->ibcq.cqe; |
@@ -109,21 +110,27 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
109 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | 110 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) |
110 | { | 111 | { |
111 | struct ipath_cq *cq = to_icq(ibcq); | 112 | struct ipath_cq *cq = to_icq(ibcq); |
112 | struct ipath_cq_wc *wc = cq->queue; | 113 | struct ipath_cq_wc *wc; |
113 | unsigned long flags; | 114 | unsigned long flags; |
114 | int npolled; | 115 | int npolled; |
116 | u32 tail; | ||
115 | 117 | ||
116 | spin_lock_irqsave(&cq->lock, flags); | 118 | spin_lock_irqsave(&cq->lock, flags); |
117 | 119 | ||
120 | wc = cq->queue; | ||
121 | tail = wc->tail; | ||
122 | if (tail > (u32) cq->ibcq.cqe) | ||
123 | tail = (u32) cq->ibcq.cqe; | ||
118 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | 124 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { |
119 | if (wc->tail == wc->head) | 125 | if (tail == wc->head) |
120 | break; | 126 | break; |
121 | *entry = wc->queue[wc->tail]; | 127 | *entry = wc->queue[tail]; |
122 | if (wc->tail >= cq->ibcq.cqe) | 128 | if (tail >= cq->ibcq.cqe) |
123 | wc->tail = 0; | 129 | tail = 0; |
124 | else | 130 | else |
125 | wc->tail++; | 131 | tail++; |
126 | } | 132 | } |
133 | wc->tail = tail; | ||
127 | 134 | ||
128 | spin_unlock_irqrestore(&cq->lock, flags); | 135 | spin_unlock_irqrestore(&cq->lock, flags); |
129 | 136 | ||
@@ -177,11 +184,6 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | |||
177 | goto done; | 184 | goto done; |
178 | } | 185 | } |
179 | 186 | ||
180 | if (dev->n_cqs_allocated == ib_ipath_max_cqs) { | ||
181 | ret = ERR_PTR(-ENOMEM); | ||
182 | goto done; | ||
183 | } | ||
184 | |||
185 | /* Allocate the completion queue structure. */ | 187 | /* Allocate the completion queue structure. */ |
186 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); | 188 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); |
187 | if (!cq) { | 189 | if (!cq) { |
@@ -237,6 +239,16 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | |||
237 | } else | 239 | } else |
238 | cq->ip = NULL; | 240 | cq->ip = NULL; |
239 | 241 | ||
242 | spin_lock(&dev->n_cqs_lock); | ||
243 | if (dev->n_cqs_allocated == ib_ipath_max_cqs) { | ||
244 | spin_unlock(&dev->n_cqs_lock); | ||
245 | ret = ERR_PTR(-ENOMEM); | ||
246 | goto bail_wc; | ||
247 | } | ||
248 | |||
249 | dev->n_cqs_allocated++; | ||
250 | spin_unlock(&dev->n_cqs_lock); | ||
251 | |||
240 | /* | 252 | /* |
241 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. | 253 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. |
242 | * The number of entries should be >= the number requested or return | 254 | * The number of entries should be >= the number requested or return |
@@ -253,7 +265,6 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | |||
253 | 265 | ||
254 | ret = &cq->ibcq; | 266 | ret = &cq->ibcq; |
255 | 267 | ||
256 | dev->n_cqs_allocated++; | ||
257 | goto done; | 268 | goto done; |
258 | 269 | ||
259 | bail_wc: | 270 | bail_wc: |
@@ -280,7 +291,9 @@ int ipath_destroy_cq(struct ib_cq *ibcq) | |||
280 | struct ipath_cq *cq = to_icq(ibcq); | 291 | struct ipath_cq *cq = to_icq(ibcq); |
281 | 292 | ||
282 | tasklet_kill(&cq->comptask); | 293 | tasklet_kill(&cq->comptask); |
294 | spin_lock(&dev->n_cqs_lock); | ||
283 | dev->n_cqs_allocated--; | 295 | dev->n_cqs_allocated--; |
296 | spin_unlock(&dev->n_cqs_lock); | ||
284 | if (cq->ip) | 297 | if (cq->ip) |
285 | kref_put(&cq->ip->ref, ipath_release_mmap_info); | 298 | kref_put(&cq->ip->ref, ipath_release_mmap_info); |
286 | else | 299 | else |
@@ -316,10 +329,16 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
316 | return 0; | 329 | return 0; |
317 | } | 330 | } |
318 | 331 | ||
332 | /** | ||
333 | * ipath_resize_cq - change the size of the CQ | ||
334 | * @ibcq: the completion queue | ||
335 | * | ||
336 | * Returns 0 for success. | ||
337 | */ | ||
319 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | 338 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) |
320 | { | 339 | { |
321 | struct ipath_cq *cq = to_icq(ibcq); | 340 | struct ipath_cq *cq = to_icq(ibcq); |
322 | struct ipath_cq_wc *old_wc = cq->queue; | 341 | struct ipath_cq_wc *old_wc; |
323 | struct ipath_cq_wc *wc; | 342 | struct ipath_cq_wc *wc; |
324 | u32 head, tail, n; | 343 | u32 head, tail, n; |
325 | int ret; | 344 | int ret; |
@@ -355,6 +374,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |||
355 | * Make sure head and tail are sane since they | 374 | * Make sure head and tail are sane since they |
356 | * might be user writable. | 375 | * might be user writable. |
357 | */ | 376 | */ |
377 | old_wc = cq->queue; | ||
358 | head = old_wc->head; | 378 | head = old_wc->head; |
359 | if (head > (u32) cq->ibcq.cqe) | 379 | if (head > (u32) cq->ibcq.cqe) |
360 | head = (u32) cq->ibcq.cqe; | 380 | head = (u32) cq->ibcq.cqe; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 2108466c7e33..12cefa658f3b 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -95,16 +95,6 @@ const char *ipath_ibcstatus_str[] = { | |||
95 | "RecovIdle", | 95 | "RecovIdle", |
96 | }; | 96 | }; |
97 | 97 | ||
98 | /* | ||
99 | * These variables are initialized in the chip-specific files | ||
100 | * but are defined here. | ||
101 | */ | ||
102 | u16 ipath_gpio_sda_num, ipath_gpio_scl_num; | ||
103 | u64 ipath_gpio_sda, ipath_gpio_scl; | ||
104 | u64 infinipath_i_bitsextant; | ||
105 | ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant; | ||
106 | u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask; | ||
107 | |||
108 | static void __devexit ipath_remove_one(struct pci_dev *); | 98 | static void __devexit ipath_remove_one(struct pci_dev *); |
109 | static int __devinit ipath_init_one(struct pci_dev *, | 99 | static int __devinit ipath_init_one(struct pci_dev *, |
110 | const struct pci_device_id *); | 100 | const struct pci_device_id *); |
@@ -527,28 +517,146 @@ bail: | |||
527 | return ret; | 517 | return ret; |
528 | } | 518 | } |
529 | 519 | ||
520 | static void __devexit cleanup_device(struct ipath_devdata *dd) | ||
521 | { | ||
522 | int port; | ||
523 | |||
524 | ipath_shutdown_device(dd); | ||
525 | |||
526 | if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { | ||
527 | /* can't do anything more with chip; needs re-init */ | ||
528 | *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT; | ||
529 | if (dd->ipath_kregbase) { | ||
530 | /* | ||
531 | * if we haven't already cleaned up before these are | ||
532 | * to ensure any register reads/writes "fail" until | ||
533 | * re-init | ||
534 | */ | ||
535 | dd->ipath_kregbase = NULL; | ||
536 | dd->ipath_uregbase = 0; | ||
537 | dd->ipath_sregbase = 0; | ||
538 | dd->ipath_cregbase = 0; | ||
539 | dd->ipath_kregsize = 0; | ||
540 | } | ||
541 | ipath_disable_wc(dd); | ||
542 | } | ||
543 | |||
544 | if (dd->ipath_pioavailregs_dma) { | ||
545 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | ||
546 | (void *) dd->ipath_pioavailregs_dma, | ||
547 | dd->ipath_pioavailregs_phys); | ||
548 | dd->ipath_pioavailregs_dma = NULL; | ||
549 | } | ||
550 | if (dd->ipath_dummy_hdrq) { | ||
551 | dma_free_coherent(&dd->pcidev->dev, | ||
552 | dd->ipath_pd[0]->port_rcvhdrq_size, | ||
553 | dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys); | ||
554 | dd->ipath_dummy_hdrq = NULL; | ||
555 | } | ||
556 | |||
557 | if (dd->ipath_pageshadow) { | ||
558 | struct page **tmpp = dd->ipath_pageshadow; | ||
559 | dma_addr_t *tmpd = dd->ipath_physshadow; | ||
560 | int i, cnt = 0; | ||
561 | |||
562 | ipath_cdbg(VERBOSE, "Unlocking any expTID pages still " | ||
563 | "locked\n"); | ||
564 | for (port = 0; port < dd->ipath_cfgports; port++) { | ||
565 | int port_tidbase = port * dd->ipath_rcvtidcnt; | ||
566 | int maxtid = port_tidbase + dd->ipath_rcvtidcnt; | ||
567 | for (i = port_tidbase; i < maxtid; i++) { | ||
568 | if (!tmpp[i]) | ||
569 | continue; | ||
570 | pci_unmap_page(dd->pcidev, tmpd[i], | ||
571 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
572 | ipath_release_user_pages(&tmpp[i], 1); | ||
573 | tmpp[i] = NULL; | ||
574 | cnt++; | ||
575 | } | ||
576 | } | ||
577 | if (cnt) { | ||
578 | ipath_stats.sps_pageunlocks += cnt; | ||
579 | ipath_cdbg(VERBOSE, "There were still %u expTID " | ||
580 | "entries locked\n", cnt); | ||
581 | } | ||
582 | if (ipath_stats.sps_pagelocks || | ||
583 | ipath_stats.sps_pageunlocks) | ||
584 | ipath_cdbg(VERBOSE, "%llu pages locked, %llu " | ||
585 | "unlocked via ipath_m{un}lock\n", | ||
586 | (unsigned long long) | ||
587 | ipath_stats.sps_pagelocks, | ||
588 | (unsigned long long) | ||
589 | ipath_stats.sps_pageunlocks); | ||
590 | |||
591 | ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n", | ||
592 | dd->ipath_pageshadow); | ||
593 | vfree(dd->ipath_pageshadow); | ||
594 | dd->ipath_pageshadow = NULL; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * free any resources still in use (usually just kernel ports) | ||
599 | * at unload; we do for portcnt, not cfgports, because cfgports | ||
600 | * could have changed while we were loaded. | ||
601 | */ | ||
602 | for (port = 0; port < dd->ipath_portcnt; port++) { | ||
603 | struct ipath_portdata *pd = dd->ipath_pd[port]; | ||
604 | dd->ipath_pd[port] = NULL; | ||
605 | ipath_free_pddata(dd, pd); | ||
606 | } | ||
607 | kfree(dd->ipath_pd); | ||
608 | /* | ||
609 | * debuggability, in case some cleanup path tries to use it | ||
610 | * after this | ||
611 | */ | ||
612 | dd->ipath_pd = NULL; | ||
613 | } | ||
614 | |||
530 | static void __devexit ipath_remove_one(struct pci_dev *pdev) | 615 | static void __devexit ipath_remove_one(struct pci_dev *pdev) |
531 | { | 616 | { |
532 | struct ipath_devdata *dd; | 617 | struct ipath_devdata *dd = pci_get_drvdata(pdev); |
533 | 618 | ||
534 | ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev); | 619 | ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd); |
535 | if (!pdev) | 620 | |
536 | return; | 621 | if (dd->verbs_dev) |
622 | ipath_unregister_ib_device(dd->verbs_dev); | ||
537 | 623 | ||
538 | dd = pci_get_drvdata(pdev); | ||
539 | ipath_unregister_ib_device(dd->verbs_dev); | ||
540 | ipath_diag_remove(dd); | 624 | ipath_diag_remove(dd); |
541 | ipath_user_remove(dd); | 625 | ipath_user_remove(dd); |
542 | ipathfs_remove_device(dd); | 626 | ipathfs_remove_device(dd); |
543 | ipath_device_remove_group(&pdev->dev, dd); | 627 | ipath_device_remove_group(&pdev->dev, dd); |
628 | |||
544 | ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, " | 629 | ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, " |
545 | "unit %u\n", dd, (u32) dd->ipath_unit); | 630 | "unit %u\n", dd, (u32) dd->ipath_unit); |
546 | if (dd->ipath_kregbase) { | 631 | |
547 | ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", | 632 | cleanup_device(dd); |
548 | dd->ipath_kregbase); | 633 | |
549 | iounmap((volatile void __iomem *) dd->ipath_kregbase); | 634 | /* |
550 | dd->ipath_kregbase = NULL; | 635 | * turn off rcv, send, and interrupts for all ports, all drivers |
551 | } | 636 | * should also hard reset the chip here? |
637 | * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs | ||
638 | * for all versions of the driver, if they were allocated | ||
639 | */ | ||
640 | if (pdev->irq) { | ||
641 | ipath_cdbg(VERBOSE, | ||
642 | "unit %u free_irq of irq %x\n", | ||
643 | dd->ipath_unit, pdev->irq); | ||
644 | free_irq(pdev->irq, dd); | ||
645 | } else | ||
646 | ipath_dbg("irq is 0, not doing free_irq " | ||
647 | "for unit %u\n", dd->ipath_unit); | ||
648 | /* | ||
649 | * we check for NULL here, because it's outside | ||
650 | * the kregbase check, and we need to call it | ||
651 | * after the free_irq. Thus it's possible that | ||
652 | * the function pointers were never initialized. | ||
653 | */ | ||
654 | if (dd->ipath_f_cleanup) | ||
655 | /* clean up chip-specific stuff */ | ||
656 | dd->ipath_f_cleanup(dd); | ||
657 | |||
658 | ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase); | ||
659 | iounmap((volatile void __iomem *) dd->ipath_kregbase); | ||
552 | pci_release_regions(pdev); | 660 | pci_release_regions(pdev); |
553 | ipath_cdbg(VERBOSE, "calling pci_disable_device\n"); | 661 | ipath_cdbg(VERBOSE, "calling pci_disable_device\n"); |
554 | pci_disable_device(pdev); | 662 | pci_disable_device(pdev); |
@@ -760,8 +868,8 @@ static void get_rhf_errstring(u32 err, char *msg, size_t len) | |||
760 | static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum, | 868 | static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum, |
761 | int err) | 869 | int err) |
762 | { | 870 | { |
763 | return dd->ipath_port0_skbs ? | 871 | return dd->ipath_port0_skbinfo ? |
764 | (void *)dd->ipath_port0_skbs[bufnum]->data : NULL; | 872 | (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL; |
765 | } | 873 | } |
766 | 874 | ||
767 | /** | 875 | /** |
@@ -783,31 +891,34 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, | |||
783 | */ | 891 | */ |
784 | 892 | ||
785 | /* | 893 | /* |
786 | * We need 4 extra bytes for unaligned transfer copying | 894 | * We need 2 extra bytes for ipath_ether data sent in the |
895 | * key header. In order to keep everything dword aligned, | ||
896 | * we'll reserve 4 bytes. | ||
787 | */ | 897 | */ |
898 | len = dd->ipath_ibmaxlen + 4; | ||
899 | |||
788 | if (dd->ipath_flags & IPATH_4BYTE_TID) { | 900 | if (dd->ipath_flags & IPATH_4BYTE_TID) { |
789 | /* we need a 4KB multiple alignment, and there is no way | 901 | /* We need a 2KB multiple alignment, and there is no way |
790 | * to do it except to allocate extra and then skb_reserve | 902 | * to do it except to allocate extra and then skb_reserve |
791 | * enough to bring it up to the right alignment. | 903 | * enough to bring it up to the right alignment. |
792 | */ | 904 | */ |
793 | len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1; | 905 | len += 2047; |
794 | } | 906 | } |
795 | else | 907 | |
796 | len = dd->ipath_ibmaxlen + 4; | ||
797 | skb = __dev_alloc_skb(len, gfp_mask); | 908 | skb = __dev_alloc_skb(len, gfp_mask); |
798 | if (!skb) { | 909 | if (!skb) { |
799 | ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n", | 910 | ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n", |
800 | len); | 911 | len); |
801 | goto bail; | 912 | goto bail; |
802 | } | 913 | } |
914 | |||
915 | skb_reserve(skb, 4); | ||
916 | |||
803 | if (dd->ipath_flags & IPATH_4BYTE_TID) { | 917 | if (dd->ipath_flags & IPATH_4BYTE_TID) { |
804 | u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4); | 918 | u32 una = (unsigned long)skb->data & 2047; |
805 | if (una) | 919 | if (una) |
806 | skb_reserve(skb, 4 + (1 << 11) - una); | 920 | skb_reserve(skb, 2048 - una); |
807 | else | 921 | } |
808 | skb_reserve(skb, 4); | ||
809 | } else | ||
810 | skb_reserve(skb, 4); | ||
811 | 922 | ||
812 | bail: | 923 | bail: |
813 | return skb; | 924 | return skb; |
@@ -1326,6 +1437,9 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, | |||
1326 | "for port %u rcvhdrqtailaddr failed\n", | 1437 | "for port %u rcvhdrqtailaddr failed\n", |
1327 | pd->port_port); | 1438 | pd->port_port); |
1328 | ret = -ENOMEM; | 1439 | ret = -ENOMEM; |
1440 | dma_free_coherent(&dd->pcidev->dev, amt, | ||
1441 | pd->port_rcvhdrq, pd->port_rcvhdrq_phys); | ||
1442 | pd->port_rcvhdrq = NULL; | ||
1329 | goto bail; | 1443 | goto bail; |
1330 | } | 1444 | } |
1331 | pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail; | 1445 | pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail; |
@@ -1347,12 +1461,13 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, | |||
1347 | ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " | 1461 | ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " |
1348 | "hdrtailaddr@%p %llx physical\n", | 1462 | "hdrtailaddr@%p %llx physical\n", |
1349 | pd->port_port, pd->port_rcvhdrq, | 1463 | pd->port_port, pd->port_rcvhdrq, |
1350 | pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr, | 1464 | (unsigned long long) pd->port_rcvhdrq_phys, |
1351 | (unsigned long long)pd->port_rcvhdrqtailaddr_phys); | 1465 | pd->port_rcvhdrtail_kvaddr, (unsigned long long) |
1466 | pd->port_rcvhdrqtailaddr_phys); | ||
1352 | 1467 | ||
1353 | /* clear for security and sanity on each use */ | 1468 | /* clear for security and sanity on each use */ |
1354 | memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size); | 1469 | memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size); |
1355 | memset((void *)pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE); | 1470 | memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE); |
1356 | 1471 | ||
1357 | /* | 1472 | /* |
1358 | * tell chip each time we init it, even if we are re-using previous | 1473 | * tell chip each time we init it, even if we are re-using previous |
@@ -1805,7 +1920,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd) | |||
1805 | pd->port_rcvhdrq = NULL; | 1920 | pd->port_rcvhdrq = NULL; |
1806 | if (pd->port_rcvhdrtail_kvaddr) { | 1921 | if (pd->port_rcvhdrtail_kvaddr) { |
1807 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | 1922 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, |
1808 | (void *)pd->port_rcvhdrtail_kvaddr, | 1923 | pd->port_rcvhdrtail_kvaddr, |
1809 | pd->port_rcvhdrqtailaddr_phys); | 1924 | pd->port_rcvhdrqtailaddr_phys); |
1810 | pd->port_rcvhdrtail_kvaddr = NULL; | 1925 | pd->port_rcvhdrtail_kvaddr = NULL; |
1811 | } | 1926 | } |
@@ -1824,24 +1939,32 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd) | |||
1824 | dma_free_coherent(&dd->pcidev->dev, size, | 1939 | dma_free_coherent(&dd->pcidev->dev, size, |
1825 | base, pd->port_rcvegrbuf_phys[e]); | 1940 | base, pd->port_rcvegrbuf_phys[e]); |
1826 | } | 1941 | } |
1827 | vfree(pd->port_rcvegrbuf); | 1942 | kfree(pd->port_rcvegrbuf); |
1828 | pd->port_rcvegrbuf = NULL; | 1943 | pd->port_rcvegrbuf = NULL; |
1829 | vfree(pd->port_rcvegrbuf_phys); | 1944 | kfree(pd->port_rcvegrbuf_phys); |
1830 | pd->port_rcvegrbuf_phys = NULL; | 1945 | pd->port_rcvegrbuf_phys = NULL; |
1831 | pd->port_rcvegrbuf_chunks = 0; | 1946 | pd->port_rcvegrbuf_chunks = 0; |
1832 | } else if (pd->port_port == 0 && dd->ipath_port0_skbs) { | 1947 | } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) { |
1833 | unsigned e; | 1948 | unsigned e; |
1834 | struct sk_buff **skbs = dd->ipath_port0_skbs; | 1949 | struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo; |
1835 | 1950 | ||
1836 | dd->ipath_port0_skbs = NULL; | 1951 | dd->ipath_port0_skbinfo = NULL; |
1837 | ipath_cdbg(VERBOSE, "free closed port %d ipath_port0_skbs " | 1952 | ipath_cdbg(VERBOSE, "free closed port %d " |
1838 | "@ %p\n", pd->port_port, skbs); | 1953 | "ipath_port0_skbinfo @ %p\n", pd->port_port, |
1954 | skbinfo); | ||
1839 | for (e = 0; e < dd->ipath_rcvegrcnt; e++) | 1955 | for (e = 0; e < dd->ipath_rcvegrcnt; e++) |
1840 | if (skbs[e]) | 1956 | if (skbinfo[e].skb) { |
1841 | dev_kfree_skb(skbs[e]); | 1957 | pci_unmap_single(dd->pcidev, skbinfo[e].phys, |
1842 | vfree(skbs); | 1958 | dd->ipath_ibmaxlen, |
1959 | PCI_DMA_FROMDEVICE); | ||
1960 | dev_kfree_skb(skbinfo[e].skb); | ||
1961 | } | ||
1962 | vfree(skbinfo); | ||
1843 | } | 1963 | } |
1844 | kfree(pd->port_tid_pg_list); | 1964 | kfree(pd->port_tid_pg_list); |
1965 | vfree(pd->subport_uregbase); | ||
1966 | vfree(pd->subport_rcvegrbuf); | ||
1967 | vfree(pd->subport_rcvhdr_base); | ||
1845 | kfree(pd); | 1968 | kfree(pd); |
1846 | } | 1969 | } |
1847 | 1970 | ||
@@ -1907,150 +2030,12 @@ bail: | |||
1907 | return ret; | 2030 | return ret; |
1908 | } | 2031 | } |
1909 | 2032 | ||
1910 | static void cleanup_device(struct ipath_devdata *dd) | ||
1911 | { | ||
1912 | int port; | ||
1913 | |||
1914 | ipath_shutdown_device(dd); | ||
1915 | |||
1916 | if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { | ||
1917 | /* can't do anything more with chip; needs re-init */ | ||
1918 | *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT; | ||
1919 | if (dd->ipath_kregbase) { | ||
1920 | /* | ||
1921 | * if we haven't already cleaned up before these are | ||
1922 | * to ensure any register reads/writes "fail" until | ||
1923 | * re-init | ||
1924 | */ | ||
1925 | dd->ipath_kregbase = NULL; | ||
1926 | dd->ipath_uregbase = 0; | ||
1927 | dd->ipath_sregbase = 0; | ||
1928 | dd->ipath_cregbase = 0; | ||
1929 | dd->ipath_kregsize = 0; | ||
1930 | } | ||
1931 | ipath_disable_wc(dd); | ||
1932 | } | ||
1933 | |||
1934 | if (dd->ipath_pioavailregs_dma) { | ||
1935 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | ||
1936 | (void *) dd->ipath_pioavailregs_dma, | ||
1937 | dd->ipath_pioavailregs_phys); | ||
1938 | dd->ipath_pioavailregs_dma = NULL; | ||
1939 | } | ||
1940 | if (dd->ipath_dummy_hdrq) { | ||
1941 | dma_free_coherent(&dd->pcidev->dev, | ||
1942 | dd->ipath_pd[0]->port_rcvhdrq_size, | ||
1943 | dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys); | ||
1944 | dd->ipath_dummy_hdrq = NULL; | ||
1945 | } | ||
1946 | |||
1947 | if (dd->ipath_pageshadow) { | ||
1948 | struct page **tmpp = dd->ipath_pageshadow; | ||
1949 | int i, cnt = 0; | ||
1950 | |||
1951 | ipath_cdbg(VERBOSE, "Unlocking any expTID pages still " | ||
1952 | "locked\n"); | ||
1953 | for (port = 0; port < dd->ipath_cfgports; port++) { | ||
1954 | int port_tidbase = port * dd->ipath_rcvtidcnt; | ||
1955 | int maxtid = port_tidbase + dd->ipath_rcvtidcnt; | ||
1956 | for (i = port_tidbase; i < maxtid; i++) { | ||
1957 | if (!tmpp[i]) | ||
1958 | continue; | ||
1959 | ipath_release_user_pages(&tmpp[i], 1); | ||
1960 | tmpp[i] = NULL; | ||
1961 | cnt++; | ||
1962 | } | ||
1963 | } | ||
1964 | if (cnt) { | ||
1965 | ipath_stats.sps_pageunlocks += cnt; | ||
1966 | ipath_cdbg(VERBOSE, "There were still %u expTID " | ||
1967 | "entries locked\n", cnt); | ||
1968 | } | ||
1969 | if (ipath_stats.sps_pagelocks || | ||
1970 | ipath_stats.sps_pageunlocks) | ||
1971 | ipath_cdbg(VERBOSE, "%llu pages locked, %llu " | ||
1972 | "unlocked via ipath_m{un}lock\n", | ||
1973 | (unsigned long long) | ||
1974 | ipath_stats.sps_pagelocks, | ||
1975 | (unsigned long long) | ||
1976 | ipath_stats.sps_pageunlocks); | ||
1977 | |||
1978 | ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n", | ||
1979 | dd->ipath_pageshadow); | ||
1980 | vfree(dd->ipath_pageshadow); | ||
1981 | dd->ipath_pageshadow = NULL; | ||
1982 | } | ||
1983 | |||
1984 | /* | ||
1985 | * free any resources still in use (usually just kernel ports) | ||
1986 | * at unload; we do for portcnt, not cfgports, because cfgports | ||
1987 | * could have changed while we were loaded. | ||
1988 | */ | ||
1989 | for (port = 0; port < dd->ipath_portcnt; port++) { | ||
1990 | struct ipath_portdata *pd = dd->ipath_pd[port]; | ||
1991 | dd->ipath_pd[port] = NULL; | ||
1992 | ipath_free_pddata(dd, pd); | ||
1993 | } | ||
1994 | kfree(dd->ipath_pd); | ||
1995 | /* | ||
1996 | * debuggability, in case some cleanup path tries to use it | ||
1997 | * after this | ||
1998 | */ | ||
1999 | dd->ipath_pd = NULL; | ||
2000 | } | ||
2001 | |||
2002 | static void __exit infinipath_cleanup(void) | 2033 | static void __exit infinipath_cleanup(void) |
2003 | { | 2034 | { |
2004 | struct ipath_devdata *dd, *tmp; | ||
2005 | unsigned long flags; | ||
2006 | |||
2007 | ipath_diagpkt_remove(); | ||
2008 | |||
2009 | ipath_exit_ipathfs(); | 2035 | ipath_exit_ipathfs(); |
2010 | 2036 | ||
2011 | ipath_driver_remove_group(&ipath_driver.driver); | 2037 | ipath_driver_remove_group(&ipath_driver.driver); |
2012 | 2038 | ||
2013 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
2014 | |||
2015 | /* | ||
2016 | * turn off rcv, send, and interrupts for all ports, all drivers | ||
2017 | * should also hard reset the chip here? | ||
2018 | * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs | ||
2019 | * for all versions of the driver, if they were allocated | ||
2020 | */ | ||
2021 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | ||
2022 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
2023 | |||
2024 | if (dd->ipath_kregbase) | ||
2025 | cleanup_device(dd); | ||
2026 | |||
2027 | if (dd->pcidev) { | ||
2028 | if (dd->pcidev->irq) { | ||
2029 | ipath_cdbg(VERBOSE, | ||
2030 | "unit %u free_irq of irq %x\n", | ||
2031 | dd->ipath_unit, dd->pcidev->irq); | ||
2032 | free_irq(dd->pcidev->irq, dd); | ||
2033 | } else | ||
2034 | ipath_dbg("irq is 0, not doing free_irq " | ||
2035 | "for unit %u\n", dd->ipath_unit); | ||
2036 | |||
2037 | /* | ||
2038 | * we check for NULL here, because it's outside | ||
2039 | * the kregbase check, and we need to call it | ||
2040 | * after the free_irq. Thus it's possible that | ||
2041 | * the function pointers were never initialized. | ||
2042 | */ | ||
2043 | if (dd->ipath_f_cleanup) | ||
2044 | /* clean up chip-specific stuff */ | ||
2045 | dd->ipath_f_cleanup(dd); | ||
2046 | |||
2047 | dd->pcidev = NULL; | ||
2048 | } | ||
2049 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
2050 | } | ||
2051 | |||
2052 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
2053 | |||
2054 | ipath_cdbg(VERBOSE, "Unregistering pci driver\n"); | 2039 | ipath_cdbg(VERBOSE, "Unregistering pci driver\n"); |
2055 | pci_unregister_driver(&ipath_driver); | 2040 | pci_unregister_driver(&ipath_driver); |
2056 | 2041 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index 3313356ab93a..a4019a6b7560 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c | |||
@@ -100,9 +100,9 @@ static int i2c_gpio_set(struct ipath_devdata *dd, | |||
100 | gpioval = &dd->ipath_gpio_out; | 100 | gpioval = &dd->ipath_gpio_out; |
101 | read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl); | 101 | read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl); |
102 | if (line == i2c_line_scl) | 102 | if (line == i2c_line_scl) |
103 | mask = ipath_gpio_scl; | 103 | mask = dd->ipath_gpio_scl; |
104 | else | 104 | else |
105 | mask = ipath_gpio_sda; | 105 | mask = dd->ipath_gpio_sda; |
106 | 106 | ||
107 | if (new_line_state == i2c_line_high) | 107 | if (new_line_state == i2c_line_high) |
108 | /* tri-state the output rather than force high */ | 108 | /* tri-state the output rather than force high */ |
@@ -119,12 +119,12 @@ static int i2c_gpio_set(struct ipath_devdata *dd, | |||
119 | write_val = 0x0UL; | 119 | write_val = 0x0UL; |
120 | 120 | ||
121 | if (line == i2c_line_scl) { | 121 | if (line == i2c_line_scl) { |
122 | write_val <<= ipath_gpio_scl_num; | 122 | write_val <<= dd->ipath_gpio_scl_num; |
123 | *gpioval = *gpioval & ~(1UL << ipath_gpio_scl_num); | 123 | *gpioval = *gpioval & ~(1UL << dd->ipath_gpio_scl_num); |
124 | *gpioval |= write_val; | 124 | *gpioval |= write_val; |
125 | } else { | 125 | } else { |
126 | write_val <<= ipath_gpio_sda_num; | 126 | write_val <<= dd->ipath_gpio_sda_num; |
127 | *gpioval = *gpioval & ~(1UL << ipath_gpio_sda_num); | 127 | *gpioval = *gpioval & ~(1UL << dd->ipath_gpio_sda_num); |
128 | *gpioval |= write_val; | 128 | *gpioval |= write_val; |
129 | } | 129 | } |
130 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval); | 130 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval); |
@@ -157,9 +157,9 @@ static int i2c_gpio_get(struct ipath_devdata *dd, | |||
157 | read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl); | 157 | read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl); |
158 | /* config line to be an input */ | 158 | /* config line to be an input */ |
159 | if (line == i2c_line_scl) | 159 | if (line == i2c_line_scl) |
160 | mask = ipath_gpio_scl; | 160 | mask = dd->ipath_gpio_scl; |
161 | else | 161 | else |
162 | mask = ipath_gpio_sda; | 162 | mask = dd->ipath_gpio_sda; |
163 | write_val = read_val & ~mask; | 163 | write_val = read_val & ~mask; |
164 | ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val); | 164 | ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val); |
165 | read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); | 165 | read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); |
@@ -187,6 +187,7 @@ bail: | |||
187 | static void i2c_wait_for_writes(struct ipath_devdata *dd) | 187 | static void i2c_wait_for_writes(struct ipath_devdata *dd) |
188 | { | 188 | { |
189 | (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 189 | (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); |
190 | rmb(); | ||
190 | } | 191 | } |
191 | 192 | ||
192 | static void scl_out(struct ipath_devdata *dd, u8 bit) | 193 | static void scl_out(struct ipath_devdata *dd, u8 bit) |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 29930e22318e..a9ddc6911f66 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -41,6 +41,12 @@ | |||
41 | #include "ipath_kernel.h" | 41 | #include "ipath_kernel.h" |
42 | #include "ipath_common.h" | 42 | #include "ipath_common.h" |
43 | 43 | ||
44 | /* | ||
45 | * mmap64 doesn't allow all 64 bits for 32-bit applications | ||
46 | * so only use the low 43 bits. | ||
47 | */ | ||
48 | #define MMAP64_MASK 0x7FFFFFFFFFFUL | ||
49 | |||
44 | static int ipath_open(struct inode *, struct file *); | 50 | static int ipath_open(struct inode *, struct file *); |
45 | static int ipath_close(struct inode *, struct file *); | 51 | static int ipath_close(struct inode *, struct file *); |
46 | static ssize_t ipath_write(struct file *, const char __user *, size_t, | 52 | static ssize_t ipath_write(struct file *, const char __user *, size_t, |
@@ -57,18 +63,35 @@ static struct file_operations ipath_file_ops = { | |||
57 | .mmap = ipath_mmap | 63 | .mmap = ipath_mmap |
58 | }; | 64 | }; |
59 | 65 | ||
60 | static int ipath_get_base_info(struct ipath_portdata *pd, | 66 | static int ipath_get_base_info(struct file *fp, |
61 | void __user *ubase, size_t ubase_size) | 67 | void __user *ubase, size_t ubase_size) |
62 | { | 68 | { |
69 | struct ipath_portdata *pd = port_fp(fp); | ||
63 | int ret = 0; | 70 | int ret = 0; |
64 | struct ipath_base_info *kinfo = NULL; | 71 | struct ipath_base_info *kinfo = NULL; |
65 | struct ipath_devdata *dd = pd->port_dd; | 72 | struct ipath_devdata *dd = pd->port_dd; |
73 | unsigned subport_cnt; | ||
74 | int shared, master; | ||
75 | size_t sz; | ||
76 | |||
77 | subport_cnt = pd->port_subport_cnt; | ||
78 | if (!subport_cnt) { | ||
79 | shared = 0; | ||
80 | master = 0; | ||
81 | subport_cnt = 1; | ||
82 | } else { | ||
83 | shared = 1; | ||
84 | master = !subport_fp(fp); | ||
85 | } | ||
66 | 86 | ||
67 | if (ubase_size < sizeof(*kinfo)) { | 87 | sz = sizeof(*kinfo); |
88 | /* If port sharing is not requested, allow the old size structure */ | ||
89 | if (!shared) | ||
90 | sz -= 3 * sizeof(u64); | ||
91 | if (ubase_size < sz) { | ||
68 | ipath_cdbg(PROC, | 92 | ipath_cdbg(PROC, |
69 | "Base size %lu, need %lu (version mismatch?)\n", | 93 | "Base size %zu, need %zu (version mismatch?)\n", |
70 | (unsigned long) ubase_size, | 94 | ubase_size, sz); |
71 | (unsigned long) sizeof(*kinfo)); | ||
72 | ret = -EINVAL; | 95 | ret = -EINVAL; |
73 | goto bail; | 96 | goto bail; |
74 | } | 97 | } |
@@ -95,7 +118,9 @@ static int ipath_get_base_info(struct ipath_portdata *pd, | |||
95 | kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; | 118 | kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; |
96 | kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / | 119 | kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / |
97 | pd->port_rcvegrbuf_chunks; | 120 | pd->port_rcvegrbuf_chunks; |
98 | kinfo->spi_tidcnt = dd->ipath_rcvtidcnt; | 121 | kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; |
122 | if (master) | ||
123 | kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; | ||
99 | /* | 124 | /* |
100 | * for this use, may be ipath_cfgports summed over all chips that | 125 | * for this use, may be ipath_cfgports summed over all chips that |
101 | * are are configured and present | 126 | * are are configured and present |
@@ -118,31 +143,75 @@ static int ipath_get_base_info(struct ipath_portdata *pd, | |||
118 | * page_address() macro worked, but in 2.6.11, even that returns the | 143 | * page_address() macro worked, but in 2.6.11, even that returns the |
119 | * full 64 bit address (upper bits all 1's). So far, using the | 144 | * full 64 bit address (upper bits all 1's). So far, using the |
120 | * physical addresses (or chip offsets, for chip mapping) works, but | 145 | * physical addresses (or chip offsets, for chip mapping) works, but |
121 | * no doubt some future kernel release will chang that, and we'll be | 146 | * no doubt some future kernel release will change that, and we'll be |
122 | * on to yet another method of dealing with this | 147 | * on to yet another method of dealing with this. |
123 | */ | 148 | */ |
124 | kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; | 149 | kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; |
125 | kinfo->spi_rcvhdr_tailaddr = (u64)pd->port_rcvhdrqtailaddr_phys; | 150 | kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; |
126 | kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; | 151 | kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; |
127 | kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; | 152 | kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; |
128 | kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + | 153 | kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + |
129 | (void *) dd->ipath_statusp - | 154 | (void *) dd->ipath_statusp - |
130 | (void *) dd->ipath_pioavailregs_dma; | 155 | (void *) dd->ipath_pioavailregs_dma; |
131 | kinfo->spi_piobufbase = (u64) pd->port_piobufs; | 156 | if (!shared) { |
132 | kinfo->__spi_uregbase = | 157 | kinfo->spi_piocnt = dd->ipath_pbufsport; |
133 | dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | 158 | kinfo->spi_piobufbase = (u64) pd->port_piobufs; |
159 | kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + | ||
160 | dd->ipath_palign * pd->port_port; | ||
161 | } else if (master) { | ||
162 | kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + | ||
163 | (dd->ipath_pbufsport % subport_cnt); | ||
164 | /* Master's PIO buffers are after all the slave's */ | ||
165 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + | ||
166 | dd->ipath_palign * | ||
167 | (dd->ipath_pbufsport - kinfo->spi_piocnt); | ||
168 | kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + | ||
169 | dd->ipath_palign * pd->port_port; | ||
170 | } else { | ||
171 | unsigned slave = subport_fp(fp) - 1; | ||
172 | |||
173 | kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; | ||
174 | kinfo->spi_piobufbase = (u64) pd->port_piobufs + | ||
175 | dd->ipath_palign * kinfo->spi_piocnt * slave; | ||
176 | kinfo->__spi_uregbase = ((u64) pd->subport_uregbase + | ||
177 | PAGE_SIZE * slave) & MMAP64_MASK; | ||
134 | 178 | ||
135 | kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1); | 179 | kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base + |
136 | kinfo->spi_piocnt = dd->ipath_pbufsport; | 180 | pd->port_rcvhdrq_size * slave) & MMAP64_MASK; |
181 | kinfo->spi_rcvhdr_tailaddr = | ||
182 | (u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK; | ||
183 | kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf + | ||
184 | dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) & | ||
185 | MMAP64_MASK; | ||
186 | } | ||
187 | |||
188 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / | ||
189 | dd->ipath_palign; | ||
137 | kinfo->spi_pioalign = dd->ipath_palign; | 190 | kinfo->spi_pioalign = dd->ipath_palign; |
138 | 191 | ||
139 | kinfo->spi_qpair = IPATH_KD_QP; | 192 | kinfo->spi_qpair = IPATH_KD_QP; |
140 | kinfo->spi_piosize = dd->ipath_ibmaxlen; | 193 | kinfo->spi_piosize = dd->ipath_ibmaxlen; |
141 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ | 194 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ |
142 | kinfo->spi_port = pd->port_port; | 195 | kinfo->spi_port = pd->port_port; |
196 | kinfo->spi_subport = subport_fp(fp); | ||
143 | kinfo->spi_sw_version = IPATH_KERN_SWVERSION; | 197 | kinfo->spi_sw_version = IPATH_KERN_SWVERSION; |
144 | kinfo->spi_hw_version = dd->ipath_revision; | 198 | kinfo->spi_hw_version = dd->ipath_revision; |
145 | 199 | ||
200 | if (master) { | ||
201 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; | ||
202 | kinfo->spi_subport_uregbase = | ||
203 | (u64) pd->subport_uregbase & MMAP64_MASK; | ||
204 | kinfo->spi_subport_rcvegrbuf = | ||
205 | (u64) pd->subport_rcvegrbuf & MMAP64_MASK; | ||
206 | kinfo->spi_subport_rcvhdr_base = | ||
207 | (u64) pd->subport_rcvhdr_base & MMAP64_MASK; | ||
208 | ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", | ||
209 | kinfo->spi_port, kinfo->spi_runtime_flags, | ||
210 | (unsigned long long) kinfo->spi_subport_uregbase, | ||
211 | (unsigned long long) kinfo->spi_subport_rcvegrbuf, | ||
212 | (unsigned long long) kinfo->spi_subport_rcvhdr_base); | ||
213 | } | ||
214 | |||
146 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) | 215 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) |
147 | ret = -EFAULT; | 216 | ret = -EFAULT; |
148 | 217 | ||
@@ -154,6 +223,7 @@ bail: | |||
154 | /** | 223 | /** |
155 | * ipath_tid_update - update a port TID | 224 | * ipath_tid_update - update a port TID |
156 | * @pd: the port | 225 | * @pd: the port |
226 | * @fp: the ipath device file | ||
157 | * @ti: the TID information | 227 | * @ti: the TID information |
158 | * | 228 | * |
159 | * The new implementation as of Oct 2004 is that the driver assigns | 229 | * The new implementation as of Oct 2004 is that the driver assigns |
@@ -176,11 +246,11 @@ bail: | |||
176 | * virtually contiguous pages, that should change to improve | 246 | * virtually contiguous pages, that should change to improve |
177 | * performance. | 247 | * performance. |
178 | */ | 248 | */ |
179 | static int ipath_tid_update(struct ipath_portdata *pd, | 249 | static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, |
180 | const struct ipath_tid_info *ti) | 250 | const struct ipath_tid_info *ti) |
181 | { | 251 | { |
182 | int ret = 0, ntids; | 252 | int ret = 0, ntids; |
183 | u32 tid, porttid, cnt, i, tidcnt; | 253 | u32 tid, porttid, cnt, i, tidcnt, tidoff; |
184 | u16 *tidlist; | 254 | u16 *tidlist; |
185 | struct ipath_devdata *dd = pd->port_dd; | 255 | struct ipath_devdata *dd = pd->port_dd; |
186 | u64 physaddr; | 256 | u64 physaddr; |
@@ -188,6 +258,7 @@ static int ipath_tid_update(struct ipath_portdata *pd, | |||
188 | u64 __iomem *tidbase; | 258 | u64 __iomem *tidbase; |
189 | unsigned long tidmap[8]; | 259 | unsigned long tidmap[8]; |
190 | struct page **pagep = NULL; | 260 | struct page **pagep = NULL; |
261 | unsigned subport = subport_fp(fp); | ||
191 | 262 | ||
192 | if (!dd->ipath_pageshadow) { | 263 | if (!dd->ipath_pageshadow) { |
193 | ret = -ENOMEM; | 264 | ret = -ENOMEM; |
@@ -204,20 +275,34 @@ static int ipath_tid_update(struct ipath_portdata *pd, | |||
204 | ret = -EFAULT; | 275 | ret = -EFAULT; |
205 | goto done; | 276 | goto done; |
206 | } | 277 | } |
207 | tidcnt = dd->ipath_rcvtidcnt; | 278 | porttid = pd->port_port * dd->ipath_rcvtidcnt; |
208 | if (cnt >= tidcnt) { | 279 | if (!pd->port_subport_cnt) { |
280 | tidcnt = dd->ipath_rcvtidcnt; | ||
281 | tid = pd->port_tidcursor; | ||
282 | tidoff = 0; | ||
283 | } else if (!subport) { | ||
284 | tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + | ||
285 | (dd->ipath_rcvtidcnt % pd->port_subport_cnt); | ||
286 | tidoff = dd->ipath_rcvtidcnt - tidcnt; | ||
287 | porttid += tidoff; | ||
288 | tid = tidcursor_fp(fp); | ||
289 | } else { | ||
290 | tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; | ||
291 | tidoff = tidcnt * (subport - 1); | ||
292 | porttid += tidoff; | ||
293 | tid = tidcursor_fp(fp); | ||
294 | } | ||
295 | if (cnt > tidcnt) { | ||
209 | /* make sure it all fits in port_tid_pg_list */ | 296 | /* make sure it all fits in port_tid_pg_list */ |
210 | dev_info(&dd->pcidev->dev, "Process tried to allocate %u " | 297 | dev_info(&dd->pcidev->dev, "Process tried to allocate %u " |
211 | "TIDs, only trying max (%u)\n", cnt, tidcnt); | 298 | "TIDs, only trying max (%u)\n", cnt, tidcnt); |
212 | cnt = tidcnt; | 299 | cnt = tidcnt; |
213 | } | 300 | } |
214 | pagep = (struct page **)pd->port_tid_pg_list; | 301 | pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; |
215 | tidlist = (u16 *) (&pagep[cnt]); | 302 | tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; |
216 | 303 | ||
217 | memset(tidmap, 0, sizeof(tidmap)); | 304 | memset(tidmap, 0, sizeof(tidmap)); |
218 | tid = pd->port_tidcursor; | ||
219 | /* before decrement; chip actual # */ | 305 | /* before decrement; chip actual # */ |
220 | porttid = pd->port_port * tidcnt; | ||
221 | ntids = tidcnt; | 306 | ntids = tidcnt; |
222 | tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + | 307 | tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + |
223 | dd->ipath_rcvtidbase + | 308 | dd->ipath_rcvtidbase + |
@@ -274,16 +359,19 @@ static int ipath_tid_update(struct ipath_portdata *pd, | |||
274 | ret = -ENOMEM; | 359 | ret = -ENOMEM; |
275 | break; | 360 | break; |
276 | } | 361 | } |
277 | tidlist[i] = tid; | 362 | tidlist[i] = tid + tidoff; |
278 | ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " | 363 | ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " |
279 | "vaddr %lx\n", i, tid, vaddr); | 364 | "vaddr %lx\n", i, tid + tidoff, vaddr); |
280 | /* we "know" system pages and TID pages are same size */ | 365 | /* we "know" system pages and TID pages are same size */ |
281 | dd->ipath_pageshadow[porttid + tid] = pagep[i]; | 366 | dd->ipath_pageshadow[porttid + tid] = pagep[i]; |
367 | dd->ipath_physshadow[porttid + tid] = ipath_map_page( | ||
368 | dd->pcidev, pagep[i], 0, PAGE_SIZE, | ||
369 | PCI_DMA_FROMDEVICE); | ||
282 | /* | 370 | /* |
283 | * don't need atomic or it's overhead | 371 | * don't need atomic or it's overhead |
284 | */ | 372 | */ |
285 | __set_bit(tid, tidmap); | 373 | __set_bit(tid, tidmap); |
286 | physaddr = page_to_phys(pagep[i]); | 374 | physaddr = dd->ipath_physshadow[porttid + tid]; |
287 | ipath_stats.sps_pagelocks++; | 375 | ipath_stats.sps_pagelocks++; |
288 | ipath_cdbg(VERBOSE, | 376 | ipath_cdbg(VERBOSE, |
289 | "TID %u, vaddr %lx, physaddr %llx pgp %p\n", | 377 | "TID %u, vaddr %lx, physaddr %llx pgp %p\n", |
@@ -317,6 +405,9 @@ static int ipath_tid_update(struct ipath_portdata *pd, | |||
317 | tid); | 405 | tid); |
318 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, | 406 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, |
319 | dd->ipath_tidinvalid); | 407 | dd->ipath_tidinvalid); |
408 | pci_unmap_page(dd->pcidev, | ||
409 | dd->ipath_physshadow[porttid + tid], | ||
410 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
320 | dd->ipath_pageshadow[porttid + tid] = NULL; | 411 | dd->ipath_pageshadow[porttid + tid] = NULL; |
321 | ipath_stats.sps_pageunlocks++; | 412 | ipath_stats.sps_pageunlocks++; |
322 | } | 413 | } |
@@ -341,7 +432,10 @@ static int ipath_tid_update(struct ipath_portdata *pd, | |||
341 | } | 432 | } |
342 | if (tid == tidcnt) | 433 | if (tid == tidcnt) |
343 | tid = 0; | 434 | tid = 0; |
344 | pd->port_tidcursor = tid; | 435 | if (!pd->port_subport_cnt) |
436 | pd->port_tidcursor = tid; | ||
437 | else | ||
438 | tidcursor_fp(fp) = tid; | ||
345 | } | 439 | } |
346 | 440 | ||
347 | done: | 441 | done: |
@@ -354,6 +448,7 @@ done: | |||
354 | /** | 448 | /** |
355 | * ipath_tid_free - free a port TID | 449 | * ipath_tid_free - free a port TID |
356 | * @pd: the port | 450 | * @pd: the port |
451 | * @subport: the subport | ||
357 | * @ti: the TID info | 452 | * @ti: the TID info |
358 | * | 453 | * |
359 | * right now we are unlocking one page at a time, but since | 454 | * right now we are unlocking one page at a time, but since |
@@ -367,7 +462,7 @@ done: | |||
367 | * they pass in to us. | 462 | * they pass in to us. |
368 | */ | 463 | */ |
369 | 464 | ||
370 | static int ipath_tid_free(struct ipath_portdata *pd, | 465 | static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, |
371 | const struct ipath_tid_info *ti) | 466 | const struct ipath_tid_info *ti) |
372 | { | 467 | { |
373 | int ret = 0; | 468 | int ret = 0; |
@@ -388,11 +483,20 @@ static int ipath_tid_free(struct ipath_portdata *pd, | |||
388 | } | 483 | } |
389 | 484 | ||
390 | porttid = pd->port_port * dd->ipath_rcvtidcnt; | 485 | porttid = pd->port_port * dd->ipath_rcvtidcnt; |
486 | if (!pd->port_subport_cnt) | ||
487 | tidcnt = dd->ipath_rcvtidcnt; | ||
488 | else if (!subport) { | ||
489 | tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + | ||
490 | (dd->ipath_rcvtidcnt % pd->port_subport_cnt); | ||
491 | porttid += dd->ipath_rcvtidcnt - tidcnt; | ||
492 | } else { | ||
493 | tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; | ||
494 | porttid += tidcnt * (subport - 1); | ||
495 | } | ||
391 | tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + | 496 | tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + |
392 | dd->ipath_rcvtidbase + | 497 | dd->ipath_rcvtidbase + |
393 | porttid * sizeof(*tidbase)); | 498 | porttid * sizeof(*tidbase)); |
394 | 499 | ||
395 | tidcnt = dd->ipath_rcvtidcnt; | ||
396 | limit = sizeof(tidmap) * BITS_PER_BYTE; | 500 | limit = sizeof(tidmap) * BITS_PER_BYTE; |
397 | if (limit > tidcnt) | 501 | if (limit > tidcnt) |
398 | /* just in case size changes in future */ | 502 | /* just in case size changes in future */ |
@@ -417,6 +521,9 @@ static int ipath_tid_free(struct ipath_portdata *pd, | |||
417 | pd->port_pid, tid); | 521 | pd->port_pid, tid); |
418 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, | 522 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, |
419 | dd->ipath_tidinvalid); | 523 | dd->ipath_tidinvalid); |
524 | pci_unmap_page(dd->pcidev, | ||
525 | dd->ipath_physshadow[porttid + tid], | ||
526 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
420 | ipath_release_user_pages( | 527 | ipath_release_user_pages( |
421 | &dd->ipath_pageshadow[porttid + tid], 1); | 528 | &dd->ipath_pageshadow[porttid + tid], 1); |
422 | dd->ipath_pageshadow[porttid + tid] = NULL; | 529 | dd->ipath_pageshadow[porttid + tid] = NULL; |
@@ -581,20 +688,24 @@ bail: | |||
581 | /** | 688 | /** |
582 | * ipath_manage_rcvq - manage a port's receive queue | 689 | * ipath_manage_rcvq - manage a port's receive queue |
583 | * @pd: the port | 690 | * @pd: the port |
691 | * @subport: the subport | ||
584 | * @start_stop: action to carry out | 692 | * @start_stop: action to carry out |
585 | * | 693 | * |
586 | * start_stop == 0 disables receive on the port, for use in queue | 694 | * start_stop == 0 disables receive on the port, for use in queue |
587 | * overflow conditions. start_stop==1 re-enables, to be used to | 695 | * overflow conditions. start_stop==1 re-enables, to be used to |
588 | * re-init the software copy of the head register | 696 | * re-init the software copy of the head register |
589 | */ | 697 | */ |
590 | static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop) | 698 | static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, |
699 | int start_stop) | ||
591 | { | 700 | { |
592 | struct ipath_devdata *dd = pd->port_dd; | 701 | struct ipath_devdata *dd = pd->port_dd; |
593 | u64 tval; | 702 | u64 tval; |
594 | 703 | ||
595 | ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n", | 704 | ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", |
596 | start_stop ? "en" : "dis", dd->ipath_unit, | 705 | start_stop ? "en" : "dis", dd->ipath_unit, |
597 | pd->port_port); | 706 | pd->port_port, subport); |
707 | if (subport) | ||
708 | goto bail; | ||
598 | /* atomically clear receive enable port. */ | 709 | /* atomically clear receive enable port. */ |
599 | if (start_stop) { | 710 | if (start_stop) { |
600 | /* | 711 | /* |
@@ -609,7 +720,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop) | |||
609 | * updated and correct itself, even in the face of software | 720 | * updated and correct itself, even in the face of software |
610 | * bugs. | 721 | * bugs. |
611 | */ | 722 | */ |
612 | *pd->port_rcvhdrtail_kvaddr = 0; | 723 | *(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0; |
613 | set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, | 724 | set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, |
614 | &dd->ipath_rcvctrl); | 725 | &dd->ipath_rcvctrl); |
615 | } else | 726 | } else |
@@ -630,6 +741,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop) | |||
630 | tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); | 741 | tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); |
631 | } | 742 | } |
632 | /* always; new head should be equal to new tail; see above */ | 743 | /* always; new head should be equal to new tail; see above */ |
744 | bail: | ||
633 | return 0; | 745 | return 0; |
634 | } | 746 | } |
635 | 747 | ||
@@ -687,6 +799,36 @@ static void ipath_clean_part_key(struct ipath_portdata *pd, | |||
687 | } | 799 | } |
688 | } | 800 | } |
689 | 801 | ||
802 | /* | ||
803 | * Initialize the port data with the receive buffer sizes | ||
804 | * so this can be done while the master port is locked. | ||
805 | * Otherwise, there is a race with a slave opening the port | ||
806 | * and seeing these fields uninitialized. | ||
807 | */ | ||
808 | static void init_user_egr_sizes(struct ipath_portdata *pd) | ||
809 | { | ||
810 | struct ipath_devdata *dd = pd->port_dd; | ||
811 | unsigned egrperchunk, egrcnt, size; | ||
812 | |||
813 | /* | ||
814 | * to avoid wasting a lot of memory, we allocate 32KB chunks of | ||
815 | * physically contiguous memory, advance through it until used up | ||
816 | * and then allocate more. Of course, we need memory to store those | ||
817 | * extra pointers, now. Started out with 256KB, but under heavy | ||
818 | * memory pressure (creating large files and then copying them over | ||
819 | * NFS while doing lots of MPI jobs), we hit some allocation | ||
820 | * failures, even though we can sleep... (2.6.10) Still get | ||
821 | * failures at 64K. 32K is the lowest we can go without wasting | ||
822 | * additional memory. | ||
823 | */ | ||
824 | size = 0x8000; | ||
825 | egrperchunk = size / dd->ipath_rcvegrbufsize; | ||
826 | egrcnt = dd->ipath_rcvegrcnt; | ||
827 | pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; | ||
828 | pd->port_rcvegrbufs_perchunk = egrperchunk; | ||
829 | pd->port_rcvegrbuf_size = size; | ||
830 | } | ||
831 | |||
690 | /** | 832 | /** |
691 | * ipath_create_user_egr - allocate eager TID buffers | 833 | * ipath_create_user_egr - allocate eager TID buffers |
692 | * @pd: the port to allocate TID buffers for | 834 | * @pd: the port to allocate TID buffers for |
@@ -702,7 +844,7 @@ static void ipath_clean_part_key(struct ipath_portdata *pd, | |||
702 | static int ipath_create_user_egr(struct ipath_portdata *pd) | 844 | static int ipath_create_user_egr(struct ipath_portdata *pd) |
703 | { | 845 | { |
704 | struct ipath_devdata *dd = pd->port_dd; | 846 | struct ipath_devdata *dd = pd->port_dd; |
705 | unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff; | 847 | unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; |
706 | size_t size; | 848 | size_t size; |
707 | int ret; | 849 | int ret; |
708 | gfp_t gfp_flags; | 850 | gfp_t gfp_flags; |
@@ -722,31 +864,18 @@ static int ipath_create_user_egr(struct ipath_portdata *pd) | |||
722 | ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " | 864 | ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " |
723 | "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); | 865 | "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); |
724 | 866 | ||
725 | /* | 867 | chunk = pd->port_rcvegrbuf_chunks; |
726 | * to avoid wasting a lot of memory, we allocate 32KB chunks of | 868 | egrperchunk = pd->port_rcvegrbufs_perchunk; |
727 | * physically contiguous memory, advance through it until used up | 869 | size = pd->port_rcvegrbuf_size; |
728 | * and then allocate more. Of course, we need memory to store those | 870 | pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), |
729 | * extra pointers, now. Started out with 256KB, but under heavy | 871 | GFP_KERNEL); |
730 | * memory pressure (creating large files and then copying them over | ||
731 | * NFS while doing lots of MPI jobs), we hit some allocation | ||
732 | * failures, even though we can sleep... (2.6.10) Still get | ||
733 | * failures at 64K. 32K is the lowest we can go without wasting | ||
734 | * additional memory. | ||
735 | */ | ||
736 | size = 0x8000; | ||
737 | alloced = ALIGN(egrsize * egrcnt, size); | ||
738 | egrperchunk = size / egrsize; | ||
739 | chunk = (egrcnt + egrperchunk - 1) / egrperchunk; | ||
740 | pd->port_rcvegrbuf_chunks = chunk; | ||
741 | pd->port_rcvegrbufs_perchunk = egrperchunk; | ||
742 | pd->port_rcvegrbuf_size = size; | ||
743 | pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0])); | ||
744 | if (!pd->port_rcvegrbuf) { | 872 | if (!pd->port_rcvegrbuf) { |
745 | ret = -ENOMEM; | 873 | ret = -ENOMEM; |
746 | goto bail; | 874 | goto bail; |
747 | } | 875 | } |
748 | pd->port_rcvegrbuf_phys = | 876 | pd->port_rcvegrbuf_phys = |
749 | vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0])); | 877 | kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), |
878 | GFP_KERNEL); | ||
750 | if (!pd->port_rcvegrbuf_phys) { | 879 | if (!pd->port_rcvegrbuf_phys) { |
751 | ret = -ENOMEM; | 880 | ret = -ENOMEM; |
752 | goto bail_rcvegrbuf; | 881 | goto bail_rcvegrbuf; |
@@ -791,105 +920,23 @@ bail_rcvegrbuf_phys: | |||
791 | pd->port_rcvegrbuf_phys[e]); | 920 | pd->port_rcvegrbuf_phys[e]); |
792 | 921 | ||
793 | } | 922 | } |
794 | vfree(pd->port_rcvegrbuf_phys); | 923 | kfree(pd->port_rcvegrbuf_phys); |
795 | pd->port_rcvegrbuf_phys = NULL; | 924 | pd->port_rcvegrbuf_phys = NULL; |
796 | bail_rcvegrbuf: | 925 | bail_rcvegrbuf: |
797 | vfree(pd->port_rcvegrbuf); | 926 | kfree(pd->port_rcvegrbuf); |
798 | pd->port_rcvegrbuf = NULL; | 927 | pd->port_rcvegrbuf = NULL; |
799 | bail: | 928 | bail: |
800 | return ret; | 929 | return ret; |
801 | } | 930 | } |
802 | 931 | ||
803 | static int ipath_do_user_init(struct ipath_portdata *pd, | ||
804 | const struct ipath_user_info *uinfo) | ||
805 | { | ||
806 | int ret = 0; | ||
807 | struct ipath_devdata *dd = pd->port_dd; | ||
808 | u32 head32; | ||
809 | |||
810 | /* for now, if major version is different, bail */ | ||
811 | if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) { | ||
812 | dev_info(&dd->pcidev->dev, | ||
813 | "User major version %d not same as driver " | ||
814 | "major %d\n", uinfo->spu_userversion >> 16, | ||
815 | IPATH_USER_SWMAJOR); | ||
816 | ret = -ENODEV; | ||
817 | goto done; | ||
818 | } | ||
819 | |||
820 | if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) | ||
821 | ipath_dbg("User minor version %d not same as driver " | ||
822 | "minor %d\n", uinfo->spu_userversion & 0xffff, | ||
823 | IPATH_USER_SWMINOR); | ||
824 | |||
825 | if (uinfo->spu_rcvhdrsize) { | ||
826 | ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); | ||
827 | if (ret) | ||
828 | goto done; | ||
829 | } | ||
830 | |||
831 | /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ | ||
832 | |||
833 | /* for right now, kernel piobufs are at end, so port 1 is at 0 */ | ||
834 | pd->port_piobufs = dd->ipath_piobufbase + | ||
835 | dd->ipath_pbufsport * (pd->port_port - | ||
836 | 1) * dd->ipath_palign; | ||
837 | ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", | ||
838 | pd->port_port, pd->port_piobufs); | ||
839 | |||
840 | /* | ||
841 | * Now allocate the rcvhdr Q and eager TIDs; skip the TID | ||
842 | * array for time being. If pd->port_port > chip-supported, | ||
843 | * we need to do extra stuff here to handle by handling overflow | ||
844 | * through port 0, someday | ||
845 | */ | ||
846 | ret = ipath_create_rcvhdrq(dd, pd); | ||
847 | if (!ret) | ||
848 | ret = ipath_create_user_egr(pd); | ||
849 | if (ret) | ||
850 | goto done; | ||
851 | |||
852 | /* | ||
853 | * set the eager head register for this port to the current values | ||
854 | * of the tail pointers, since we don't know if they were | ||
855 | * updated on last use of the port. | ||
856 | */ | ||
857 | head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); | ||
858 | ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); | ||
859 | dd->ipath_lastegrheads[pd->port_port] = -1; | ||
860 | dd->ipath_lastrcvhdrqtails[pd->port_port] = -1; | ||
861 | ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", | ||
862 | pd->port_port, head32); | ||
863 | pd->port_tidcursor = 0; /* start at beginning after open */ | ||
864 | /* | ||
865 | * now enable the port; the tail registers will be written to memory | ||
866 | * by the chip as soon as it sees the write to | ||
867 | * dd->ipath_kregs->kr_rcvctrl. The update only happens on | ||
868 | * transition from 0 to 1, so clear it first, then set it as part of | ||
869 | * enabling the port. This will (very briefly) affect any other | ||
870 | * open ports, but it shouldn't be long enough to be an issue. | ||
871 | * We explictly set the in-memory copy to 0 beforehand, so we don't | ||
872 | * have to wait to be sure the DMA update has happened. | ||
873 | */ | ||
874 | *pd->port_rcvhdrtail_kvaddr = 0ULL; | ||
875 | set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, | ||
876 | &dd->ipath_rcvctrl); | ||
877 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | ||
878 | dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); | ||
879 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | ||
880 | dd->ipath_rcvctrl); | ||
881 | done: | ||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | 932 | ||
886 | /* common code for the mappings on dma_alloc_coherent mem */ | 933 | /* common code for the mappings on dma_alloc_coherent mem */ |
887 | static int ipath_mmap_mem(struct vm_area_struct *vma, | 934 | static int ipath_mmap_mem(struct vm_area_struct *vma, |
888 | struct ipath_portdata *pd, unsigned len, | 935 | struct ipath_portdata *pd, unsigned len, int write_ok, |
889 | int write_ok, dma_addr_t addr, char *what) | 936 | void *kvaddr, char *what) |
890 | { | 937 | { |
891 | struct ipath_devdata *dd = pd->port_dd; | 938 | struct ipath_devdata *dd = pd->port_dd; |
892 | unsigned pfn = (unsigned long)addr >> PAGE_SHIFT; | 939 | unsigned long pfn; |
893 | int ret; | 940 | int ret; |
894 | 941 | ||
895 | if ((vma->vm_end - vma->vm_start) > len) { | 942 | if ((vma->vm_end - vma->vm_start) > len) { |
@@ -912,17 +959,17 @@ static int ipath_mmap_mem(struct vm_area_struct *vma, | |||
912 | vma->vm_flags &= ~VM_MAYWRITE; | 959 | vma->vm_flags &= ~VM_MAYWRITE; |
913 | } | 960 | } |
914 | 961 | ||
962 | pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; | ||
915 | ret = remap_pfn_range(vma, vma->vm_start, pfn, | 963 | ret = remap_pfn_range(vma, vma->vm_start, pfn, |
916 | len, vma->vm_page_prot); | 964 | len, vma->vm_page_prot); |
917 | if (ret) | 965 | if (ret) |
918 | dev_info(&dd->pcidev->dev, | 966 | dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " |
919 | "%s port%u mmap of %lx, %x bytes r%c failed: %d\n", | 967 | "bytes r%c failed: %d\n", what, pd->port_port, |
920 | what, pd->port_port, (unsigned long)addr, len, | 968 | pfn, len, write_ok?'w':'o', ret); |
921 | write_ok?'w':'o', ret); | ||
922 | else | 969 | else |
923 | ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes r%c\n", | 970 | ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " |
924 | what, pd->port_port, (unsigned long)addr, len, | 971 | "r%c\n", what, pd->port_port, pfn, len, |
925 | write_ok?'w':'o'); | 972 | write_ok?'w':'o'); |
926 | bail: | 973 | bail: |
927 | return ret; | 974 | return ret; |
928 | } | 975 | } |
@@ -957,7 +1004,8 @@ static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, | |||
957 | 1004 | ||
958 | static int mmap_piobufs(struct vm_area_struct *vma, | 1005 | static int mmap_piobufs(struct vm_area_struct *vma, |
959 | struct ipath_devdata *dd, | 1006 | struct ipath_devdata *dd, |
960 | struct ipath_portdata *pd) | 1007 | struct ipath_portdata *pd, |
1008 | unsigned piobufs, unsigned piocnt) | ||
961 | { | 1009 | { |
962 | unsigned long phys; | 1010 | unsigned long phys; |
963 | int ret; | 1011 | int ret; |
@@ -968,16 +1016,15 @@ static int mmap_piobufs(struct vm_area_struct *vma, | |||
968 | * process data, and catches users who might try to read the i/o | 1016 | * process data, and catches users who might try to read the i/o |
969 | * space due to a bug. | 1017 | * space due to a bug. |
970 | */ | 1018 | */ |
971 | if ((vma->vm_end - vma->vm_start) > | 1019 | if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { |
972 | (dd->ipath_pbufsport * dd->ipath_palign)) { | ||
973 | dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " | 1020 | dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " |
974 | "reqlen %lx > PAGE\n", | 1021 | "reqlen %lx > PAGE\n", |
975 | vma->vm_end - vma->vm_start); | 1022 | vma->vm_end - vma->vm_start); |
976 | ret = -EFAULT; | 1023 | ret = -EINVAL; |
977 | goto bail; | 1024 | goto bail; |
978 | } | 1025 | } |
979 | 1026 | ||
980 | phys = dd->ipath_physaddr + pd->port_piobufs; | 1027 | phys = dd->ipath_physaddr + piobufs; |
981 | 1028 | ||
982 | /* | 1029 | /* |
983 | * Don't mark this as non-cached, or we don't get the | 1030 | * Don't mark this as non-cached, or we don't get the |
@@ -1011,7 +1058,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |||
1011 | struct ipath_devdata *dd = pd->port_dd; | 1058 | struct ipath_devdata *dd = pd->port_dd; |
1012 | unsigned long start, size; | 1059 | unsigned long start, size; |
1013 | size_t total_size, i; | 1060 | size_t total_size, i; |
1014 | dma_addr_t *phys; | 1061 | unsigned long pfn; |
1015 | int ret; | 1062 | int ret; |
1016 | 1063 | ||
1017 | size = pd->port_rcvegrbuf_size; | 1064 | size = pd->port_rcvegrbuf_size; |
@@ -1021,7 +1068,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |||
1021 | "reqlen %lx > actual %lx\n", | 1068 | "reqlen %lx > actual %lx\n", |
1022 | vma->vm_end - vma->vm_start, | 1069 | vma->vm_end - vma->vm_start, |
1023 | (unsigned long) total_size); | 1070 | (unsigned long) total_size); |
1024 | ret = -EFAULT; | 1071 | ret = -EINVAL; |
1025 | goto bail; | 1072 | goto bail; |
1026 | } | 1073 | } |
1027 | 1074 | ||
@@ -1035,11 +1082,11 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |||
1035 | vma->vm_flags &= ~VM_MAYWRITE; | 1082 | vma->vm_flags &= ~VM_MAYWRITE; |
1036 | 1083 | ||
1037 | start = vma->vm_start; | 1084 | start = vma->vm_start; |
1038 | phys = pd->port_rcvegrbuf_phys; | ||
1039 | 1085 | ||
1040 | for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { | 1086 | for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { |
1041 | ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT, | 1087 | pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; |
1042 | size, vma->vm_page_prot); | 1088 | ret = remap_pfn_range(vma, start, pfn, size, |
1089 | vma->vm_page_prot); | ||
1043 | if (ret < 0) | 1090 | if (ret < 0) |
1044 | goto bail; | 1091 | goto bail; |
1045 | } | 1092 | } |
@@ -1049,6 +1096,122 @@ bail: | |||
1049 | return ret; | 1096 | return ret; |
1050 | } | 1097 | } |
1051 | 1098 | ||
1099 | /* | ||
1100 | * ipath_file_vma_nopage - handle a VMA page fault. | ||
1101 | */ | ||
1102 | static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma, | ||
1103 | unsigned long address, int *type) | ||
1104 | { | ||
1105 | unsigned long offset = address - vma->vm_start; | ||
1106 | struct page *page = NOPAGE_SIGBUS; | ||
1107 | void *pageptr; | ||
1108 | |||
1109 | /* | ||
1110 | * Convert the vmalloc address into a struct page. | ||
1111 | */ | ||
1112 | pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT)); | ||
1113 | page = vmalloc_to_page(pageptr); | ||
1114 | if (!page) | ||
1115 | goto out; | ||
1116 | |||
1117 | /* Increment the reference count. */ | ||
1118 | get_page(page); | ||
1119 | if (type) | ||
1120 | *type = VM_FAULT_MINOR; | ||
1121 | out: | ||
1122 | return page; | ||
1123 | } | ||
1124 | |||
1125 | static struct vm_operations_struct ipath_file_vm_ops = { | ||
1126 | .nopage = ipath_file_vma_nopage, | ||
1127 | }; | ||
1128 | |||
1129 | static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, | ||
1130 | struct ipath_portdata *pd, unsigned subport) | ||
1131 | { | ||
1132 | unsigned long len; | ||
1133 | struct ipath_devdata *dd; | ||
1134 | void *addr; | ||
1135 | size_t size; | ||
1136 | int ret; | ||
1137 | |||
1138 | /* If the port is not shared, all addresses should be physical */ | ||
1139 | if (!pd->port_subport_cnt) { | ||
1140 | ret = -EINVAL; | ||
1141 | goto bail; | ||
1142 | } | ||
1143 | |||
1144 | dd = pd->port_dd; | ||
1145 | size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; | ||
1146 | |||
1147 | /* | ||
1148 | * Master has all the slave uregbase, rcvhdrq, and | ||
1149 | * rcvegrbufs mmapped. | ||
1150 | */ | ||
1151 | if (subport == 0) { | ||
1152 | unsigned num_slaves = pd->port_subport_cnt - 1; | ||
1153 | |||
1154 | if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) { | ||
1155 | addr = pd->subport_uregbase; | ||
1156 | size = PAGE_SIZE * num_slaves; | ||
1157 | } else if (pgaddr == ((u64) pd->subport_rcvhdr_base & | ||
1158 | MMAP64_MASK)) { | ||
1159 | addr = pd->subport_rcvhdr_base; | ||
1160 | size = pd->port_rcvhdrq_size * num_slaves; | ||
1161 | } else if (pgaddr == ((u64) pd->subport_rcvegrbuf & | ||
1162 | MMAP64_MASK)) { | ||
1163 | addr = pd->subport_rcvegrbuf; | ||
1164 | size *= num_slaves; | ||
1165 | } else { | ||
1166 | ret = -EINVAL; | ||
1167 | goto bail; | ||
1168 | } | ||
1169 | } else if (pgaddr == (((u64) pd->subport_uregbase + | ||
1170 | PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) { | ||
1171 | addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1); | ||
1172 | size = PAGE_SIZE; | ||
1173 | } else if (pgaddr == (((u64) pd->subport_rcvhdr_base + | ||
1174 | pd->port_rcvhdrq_size * (subport - 1)) & | ||
1175 | MMAP64_MASK)) { | ||
1176 | addr = pd->subport_rcvhdr_base + | ||
1177 | pd->port_rcvhdrq_size * (subport - 1); | ||
1178 | size = pd->port_rcvhdrq_size; | ||
1179 | } else if (pgaddr == (((u64) pd->subport_rcvegrbuf + | ||
1180 | size * (subport - 1)) & MMAP64_MASK)) { | ||
1181 | addr = pd->subport_rcvegrbuf + size * (subport - 1); | ||
1182 | /* rcvegrbufs are read-only on the slave */ | ||
1183 | if (vma->vm_flags & VM_WRITE) { | ||
1184 | dev_info(&dd->pcidev->dev, | ||
1185 | "Can't map eager buffers as " | ||
1186 | "writable (flags=%lx)\n", vma->vm_flags); | ||
1187 | ret = -EPERM; | ||
1188 | goto bail; | ||
1189 | } | ||
1190 | /* | ||
1191 | * Don't allow permission to later change to writeable | ||
1192 | * with mprotect. | ||
1193 | */ | ||
1194 | vma->vm_flags &= ~VM_MAYWRITE; | ||
1195 | } else { | ||
1196 | ret = -EINVAL; | ||
1197 | goto bail; | ||
1198 | } | ||
1199 | len = vma->vm_end - vma->vm_start; | ||
1200 | if (len > size) { | ||
1201 | ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); | ||
1202 | ret = -EINVAL; | ||
1203 | goto bail; | ||
1204 | } | ||
1205 | |||
1206 | vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; | ||
1207 | vma->vm_ops = &ipath_file_vm_ops; | ||
1208 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; | ||
1209 | ret = 0; | ||
1210 | |||
1211 | bail: | ||
1212 | return ret; | ||
1213 | } | ||
1214 | |||
1052 | /** | 1215 | /** |
1053 | * ipath_mmap - mmap various structures into user space | 1216 | * ipath_mmap - mmap various structures into user space |
1054 | * @fp: the file pointer | 1217 | * @fp: the file pointer |
@@ -1064,73 +1227,99 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |||
1064 | struct ipath_portdata *pd; | 1227 | struct ipath_portdata *pd; |
1065 | struct ipath_devdata *dd; | 1228 | struct ipath_devdata *dd; |
1066 | u64 pgaddr, ureg; | 1229 | u64 pgaddr, ureg; |
1230 | unsigned piobufs, piocnt; | ||
1067 | int ret; | 1231 | int ret; |
1068 | 1232 | ||
1069 | pd = port_fp(fp); | 1233 | pd = port_fp(fp); |
1234 | if (!pd) { | ||
1235 | ret = -EINVAL; | ||
1236 | goto bail; | ||
1237 | } | ||
1070 | dd = pd->port_dd; | 1238 | dd = pd->port_dd; |
1071 | 1239 | ||
1072 | /* | 1240 | /* |
1073 | * This is the ipath_do_user_init() code, mapping the shared buffers | 1241 | * This is the ipath_do_user_init() code, mapping the shared buffers |
1074 | * into the user process. The address referred to by vm_pgoff is the | 1242 | * into the user process. The address referred to by vm_pgoff is the |
1075 | * virtual, not physical, address; we only do one mmap for each | 1243 | * file offset passed via mmap(). For shared ports, this is the |
1076 | * space mapped. | 1244 | * kernel vmalloc() address of the pages to share with the master. |
1245 | * For non-shared or master ports, this is a physical address. | ||
1246 | * We only do one mmap for each space mapped. | ||
1077 | */ | 1247 | */ |
1078 | pgaddr = vma->vm_pgoff << PAGE_SHIFT; | 1248 | pgaddr = vma->vm_pgoff << PAGE_SHIFT; |
1079 | 1249 | ||
1080 | /* | 1250 | /* |
1081 | * Must fit in 40 bits for our hardware; some checked elsewhere, | 1251 | * Check for 0 in case one of the allocations failed, but user |
1082 | * but we'll be paranoid. Check for 0 is mostly in case one of the | 1252 | * called mmap anyway. |
1083 | * allocations failed, but user called mmap anyway. We want to catch | ||
1084 | * that before it can match. | ||
1085 | */ | 1253 | */ |
1086 | if (!pgaddr || pgaddr >= (1ULL<<40)) { | 1254 | if (!pgaddr) { |
1087 | ipath_dev_err(dd, "Bad phys addr %llx, start %lx, end %lx\n", | 1255 | ret = -EINVAL; |
1088 | (unsigned long long)pgaddr, vma->vm_start, vma->vm_end); | 1256 | goto bail; |
1089 | return -EINVAL; | ||
1090 | } | 1257 | } |
1091 | 1258 | ||
1092 | /* just the offset of the port user registers, not physical addr */ | 1259 | ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", |
1093 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | ||
1094 | |||
1095 | ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n", | ||
1096 | (unsigned long long) pgaddr, vma->vm_start, | 1260 | (unsigned long long) pgaddr, vma->vm_start, |
1097 | vma->vm_end - vma->vm_start); | 1261 | vma->vm_end - vma->vm_start, dd->ipath_unit, |
1262 | pd->port_port, subport_fp(fp)); | ||
1098 | 1263 | ||
1099 | if (vma->vm_start & (PAGE_SIZE-1)) { | 1264 | /* |
1100 | ipath_dev_err(dd, | 1265 | * Physical addresses must fit in 40 bits for our hardware. |
1101 | "vm_start not aligned: %lx, end=%lx phys %lx\n", | 1266 | * Check for kernel virtual addresses first, anything else must |
1102 | vma->vm_start, vma->vm_end, (unsigned long)pgaddr); | 1267 | * match a HW or memory address. |
1103 | ret = -EINVAL; | 1268 | */ |
1269 | if (pgaddr >= (1ULL<<40)) { | ||
1270 | ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); | ||
1271 | goto bail; | ||
1104 | } | 1272 | } |
1105 | else if (pgaddr == ureg) | 1273 | |
1274 | if (!pd->port_subport_cnt) { | ||
1275 | /* port is not shared */ | ||
1276 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | ||
1277 | piocnt = dd->ipath_pbufsport; | ||
1278 | piobufs = pd->port_piobufs; | ||
1279 | } else if (!subport_fp(fp)) { | ||
1280 | /* caller is the master */ | ||
1281 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | ||
1282 | piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + | ||
1283 | (dd->ipath_pbufsport % pd->port_subport_cnt); | ||
1284 | piobufs = pd->port_piobufs + | ||
1285 | dd->ipath_palign * (dd->ipath_pbufsport - piocnt); | ||
1286 | } else { | ||
1287 | unsigned slave = subport_fp(fp) - 1; | ||
1288 | |||
1289 | /* caller is a slave */ | ||
1290 | ureg = 0; | ||
1291 | piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; | ||
1292 | piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; | ||
1293 | } | ||
1294 | |||
1295 | if (pgaddr == ureg) | ||
1106 | ret = mmap_ureg(vma, dd, ureg); | 1296 | ret = mmap_ureg(vma, dd, ureg); |
1107 | else if (pgaddr == pd->port_piobufs) | 1297 | else if (pgaddr == piobufs) |
1108 | ret = mmap_piobufs(vma, dd, pd); | 1298 | ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); |
1109 | else if (pgaddr == (u64) pd->port_rcvegr_phys) | 1299 | else if (pgaddr == dd->ipath_pioavailregs_phys) |
1300 | /* in-memory copy of pioavail registers */ | ||
1301 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, | ||
1302 | (void *) dd->ipath_pioavailregs_dma, | ||
1303 | "pioavail registers"); | ||
1304 | else if (subport_fp(fp)) | ||
1305 | /* Subports don't mmap the physical receive buffers */ | ||
1306 | ret = -EINVAL; | ||
1307 | else if (pgaddr == pd->port_rcvegr_phys) | ||
1110 | ret = mmap_rcvegrbufs(vma, pd); | 1308 | ret = mmap_rcvegrbufs(vma, pd); |
1111 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) { | 1309 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) |
1112 | /* | 1310 | /* |
1113 | * The rcvhdrq itself; readonly except on HT (so have | 1311 | * The rcvhdrq itself; readonly except on HT (so have |
1114 | * to allow writable mapping), multiple pages, contiguous | 1312 | * to allow writable mapping), multiple pages, contiguous |
1115 | * from an i/o perspective. | 1313 | * from an i/o perspective. |
1116 | */ | 1314 | */ |
1117 | unsigned total_size = | 1315 | ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, |
1118 | ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize | 1316 | pd->port_rcvhdrq, |
1119 | * sizeof(u32), PAGE_SIZE); | ||
1120 | ret = ipath_mmap_mem(vma, pd, total_size, 1, | ||
1121 | pd->port_rcvhdrq_phys, | ||
1122 | "rcvhdrq"); | 1317 | "rcvhdrq"); |
1123 | } | 1318 | else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) |
1124 | else if (pgaddr == (u64)pd->port_rcvhdrqtailaddr_phys) | ||
1125 | /* in-memory copy of rcvhdrq tail register */ | 1319 | /* in-memory copy of rcvhdrq tail register */ |
1126 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, | 1320 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, |
1127 | pd->port_rcvhdrqtailaddr_phys, | 1321 | pd->port_rcvhdrtail_kvaddr, |
1128 | "rcvhdrq tail"); | 1322 | "rcvhdrq tail"); |
1129 | else if (pgaddr == dd->ipath_pioavailregs_phys) | ||
1130 | /* in-memory copy of pioavail registers */ | ||
1131 | ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, | ||
1132 | dd->ipath_pioavailregs_phys, | ||
1133 | "pioavail registers"); | ||
1134 | else | 1323 | else |
1135 | ret = -EINVAL; | 1324 | ret = -EINVAL; |
1136 | 1325 | ||
@@ -1138,9 +1327,10 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |||
1138 | 1327 | ||
1139 | if (ret < 0) | 1328 | if (ret < 0) |
1140 | dev_info(&dd->pcidev->dev, | 1329 | dev_info(&dd->pcidev->dev, |
1141 | "Failure %d on addr %lx, off %lx\n", | 1330 | "Failure %d on off %llx len %lx\n", |
1142 | -ret, vma->vm_start, vma->vm_pgoff); | 1331 | -ret, (unsigned long long)pgaddr, |
1143 | 1332 | vma->vm_end - vma->vm_start); | |
1333 | bail: | ||
1144 | return ret; | 1334 | return ret; |
1145 | } | 1335 | } |
1146 | 1336 | ||
@@ -1154,6 +1344,8 @@ static unsigned int ipath_poll(struct file *fp, | |||
1154 | struct ipath_devdata *dd; | 1344 | struct ipath_devdata *dd; |
1155 | 1345 | ||
1156 | pd = port_fp(fp); | 1346 | pd = port_fp(fp); |
1347 | if (!pd) | ||
1348 | goto bail; | ||
1157 | dd = pd->port_dd; | 1349 | dd = pd->port_dd; |
1158 | 1350 | ||
1159 | bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT; | 1351 | bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT; |
@@ -1176,7 +1368,7 @@ static unsigned int ipath_poll(struct file *fp, | |||
1176 | 1368 | ||
1177 | if (tail == head) { | 1369 | if (tail == head) { |
1178 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); | 1370 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); |
1179 | if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ | 1371 | if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ |
1180 | (void)ipath_write_ureg(dd, ur_rcvhdrhead, | 1372 | (void)ipath_write_ureg(dd, ur_rcvhdrhead, |
1181 | dd->ipath_rhdrhead_intr_off | 1373 | dd->ipath_rhdrhead_intr_off |
1182 | | head, pd->port_port); | 1374 | | head, pd->port_port); |
@@ -1200,18 +1392,80 @@ static unsigned int ipath_poll(struct file *fp, | |||
1200 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | 1392 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, |
1201 | dd->ipath_rcvctrl); | 1393 | dd->ipath_rcvctrl); |
1202 | 1394 | ||
1395 | bail: | ||
1203 | return pollflag; | 1396 | return pollflag; |
1204 | } | 1397 | } |
1205 | 1398 | ||
1399 | static int init_subports(struct ipath_devdata *dd, | ||
1400 | struct ipath_portdata *pd, | ||
1401 | const struct ipath_user_info *uinfo) | ||
1402 | { | ||
1403 | int ret = 0; | ||
1404 | unsigned num_slaves; | ||
1405 | size_t size; | ||
1406 | |||
1407 | /* Old user binaries don't know about subports */ | ||
1408 | if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) | ||
1409 | goto bail; | ||
1410 | /* | ||
1411 | * If the user is requesting zero or one port, | ||
1412 | * skip the subport allocation. | ||
1413 | */ | ||
1414 | if (uinfo->spu_subport_cnt <= 1) | ||
1415 | goto bail; | ||
1416 | if (uinfo->spu_subport_cnt > 4) { | ||
1417 | ret = -EINVAL; | ||
1418 | goto bail; | ||
1419 | } | ||
1420 | |||
1421 | num_slaves = uinfo->spu_subport_cnt - 1; | ||
1422 | pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves); | ||
1423 | if (!pd->subport_uregbase) { | ||
1424 | ret = -ENOMEM; | ||
1425 | goto bail; | ||
1426 | } | ||
1427 | /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ | ||
1428 | size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * | ||
1429 | sizeof(u32), PAGE_SIZE) * num_slaves; | ||
1430 | pd->subport_rcvhdr_base = vmalloc(size); | ||
1431 | if (!pd->subport_rcvhdr_base) { | ||
1432 | ret = -ENOMEM; | ||
1433 | goto bail_ureg; | ||
1434 | } | ||
1435 | |||
1436 | pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * | ||
1437 | pd->port_rcvegrbuf_size * | ||
1438 | num_slaves); | ||
1439 | if (!pd->subport_rcvegrbuf) { | ||
1440 | ret = -ENOMEM; | ||
1441 | goto bail_rhdr; | ||
1442 | } | ||
1443 | |||
1444 | pd->port_subport_cnt = uinfo->spu_subport_cnt; | ||
1445 | pd->port_subport_id = uinfo->spu_subport_id; | ||
1446 | pd->active_slaves = 1; | ||
1447 | goto bail; | ||
1448 | |||
1449 | bail_rhdr: | ||
1450 | vfree(pd->subport_rcvhdr_base); | ||
1451 | bail_ureg: | ||
1452 | vfree(pd->subport_uregbase); | ||
1453 | pd->subport_uregbase = NULL; | ||
1454 | bail: | ||
1455 | return ret; | ||
1456 | } | ||
1457 | |||
1206 | static int try_alloc_port(struct ipath_devdata *dd, int port, | 1458 | static int try_alloc_port(struct ipath_devdata *dd, int port, |
1207 | struct file *fp) | 1459 | struct file *fp, |
1460 | const struct ipath_user_info *uinfo) | ||
1208 | { | 1461 | { |
1462 | struct ipath_portdata *pd; | ||
1209 | int ret; | 1463 | int ret; |
1210 | 1464 | ||
1211 | if (!dd->ipath_pd[port]) { | 1465 | if (!(pd = dd->ipath_pd[port])) { |
1212 | void *p, *ptmp; | 1466 | void *ptmp; |
1213 | 1467 | ||
1214 | p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); | 1468 | pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); |
1215 | 1469 | ||
1216 | /* | 1470 | /* |
1217 | * Allocate memory for use in ipath_tid_update() just once | 1471 | * Allocate memory for use in ipath_tid_update() just once |
@@ -1221,34 +1475,36 @@ static int try_alloc_port(struct ipath_devdata *dd, int port, | |||
1221 | ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + | 1475 | ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + |
1222 | dd->ipath_rcvtidcnt * sizeof(struct page **), | 1476 | dd->ipath_rcvtidcnt * sizeof(struct page **), |
1223 | GFP_KERNEL); | 1477 | GFP_KERNEL); |
1224 | if (!p || !ptmp) { | 1478 | if (!pd || !ptmp) { |
1225 | ipath_dev_err(dd, "Unable to allocate portdata " | 1479 | ipath_dev_err(dd, "Unable to allocate portdata " |
1226 | "memory, failing open\n"); | 1480 | "memory, failing open\n"); |
1227 | ret = -ENOMEM; | 1481 | ret = -ENOMEM; |
1228 | kfree(p); | 1482 | kfree(pd); |
1229 | kfree(ptmp); | 1483 | kfree(ptmp); |
1230 | goto bail; | 1484 | goto bail; |
1231 | } | 1485 | } |
1232 | dd->ipath_pd[port] = p; | 1486 | dd->ipath_pd[port] = pd; |
1233 | dd->ipath_pd[port]->port_port = port; | 1487 | dd->ipath_pd[port]->port_port = port; |
1234 | dd->ipath_pd[port]->port_dd = dd; | 1488 | dd->ipath_pd[port]->port_dd = dd; |
1235 | dd->ipath_pd[port]->port_tid_pg_list = ptmp; | 1489 | dd->ipath_pd[port]->port_tid_pg_list = ptmp; |
1236 | init_waitqueue_head(&dd->ipath_pd[port]->port_wait); | 1490 | init_waitqueue_head(&dd->ipath_pd[port]->port_wait); |
1237 | } | 1491 | } |
1238 | if (!dd->ipath_pd[port]->port_cnt) { | 1492 | if (!pd->port_cnt) { |
1239 | dd->ipath_pd[port]->port_cnt = 1; | 1493 | pd->userversion = uinfo->spu_userversion; |
1240 | fp->private_data = (void *) dd->ipath_pd[port]; | 1494 | init_user_egr_sizes(pd); |
1495 | if ((ret = init_subports(dd, pd, uinfo)) != 0) | ||
1496 | goto bail; | ||
1241 | ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", | 1497 | ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", |
1242 | current->comm, current->pid, dd->ipath_unit, | 1498 | current->comm, current->pid, dd->ipath_unit, |
1243 | port); | 1499 | port); |
1244 | dd->ipath_pd[port]->port_pid = current->pid; | 1500 | pd->port_cnt = 1; |
1245 | strncpy(dd->ipath_pd[port]->port_comm, current->comm, | 1501 | port_fp(fp) = pd; |
1246 | sizeof(dd->ipath_pd[port]->port_comm)); | 1502 | pd->port_pid = current->pid; |
1503 | strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); | ||
1247 | ipath_stats.sps_ports++; | 1504 | ipath_stats.sps_ports++; |
1248 | ret = 0; | 1505 | ret = 0; |
1249 | goto bail; | 1506 | } else |
1250 | } | 1507 | ret = -EBUSY; |
1251 | ret = -EBUSY; | ||
1252 | 1508 | ||
1253 | bail: | 1509 | bail: |
1254 | return ret; | 1510 | return ret; |
@@ -1264,7 +1520,8 @@ static inline int usable(struct ipath_devdata *dd) | |||
1264 | | IPATH_LINKUNK)); | 1520 | | IPATH_LINKUNK)); |
1265 | } | 1521 | } |
1266 | 1522 | ||
1267 | static int find_free_port(int unit, struct file *fp) | 1523 | static int find_free_port(int unit, struct file *fp, |
1524 | const struct ipath_user_info *uinfo) | ||
1268 | { | 1525 | { |
1269 | struct ipath_devdata *dd = ipath_lookup(unit); | 1526 | struct ipath_devdata *dd = ipath_lookup(unit); |
1270 | int ret, i; | 1527 | int ret, i; |
@@ -1279,8 +1536,8 @@ static int find_free_port(int unit, struct file *fp) | |||
1279 | goto bail; | 1536 | goto bail; |
1280 | } | 1537 | } |
1281 | 1538 | ||
1282 | for (i = 0; i < dd->ipath_cfgports; i++) { | 1539 | for (i = 1; i < dd->ipath_cfgports; i++) { |
1283 | ret = try_alloc_port(dd, i, fp); | 1540 | ret = try_alloc_port(dd, i, fp, uinfo); |
1284 | if (ret != -EBUSY) | 1541 | if (ret != -EBUSY) |
1285 | goto bail; | 1542 | goto bail; |
1286 | } | 1543 | } |
@@ -1290,13 +1547,14 @@ bail: | |||
1290 | return ret; | 1547 | return ret; |
1291 | } | 1548 | } |
1292 | 1549 | ||
1293 | static int find_best_unit(struct file *fp) | 1550 | static int find_best_unit(struct file *fp, |
1551 | const struct ipath_user_info *uinfo) | ||
1294 | { | 1552 | { |
1295 | int ret = 0, i, prefunit = -1, devmax; | 1553 | int ret = 0, i, prefunit = -1, devmax; |
1296 | int maxofallports, npresent, nup; | 1554 | int maxofallports, npresent, nup; |
1297 | int ndev; | 1555 | int ndev; |
1298 | 1556 | ||
1299 | (void) ipath_count_units(&npresent, &nup, &maxofallports); | 1557 | devmax = ipath_count_units(&npresent, &nup, &maxofallports); |
1300 | 1558 | ||
1301 | /* | 1559 | /* |
1302 | * This code is present to allow a knowledgeable person to | 1560 | * This code is present to allow a knowledgeable person to |
@@ -1343,8 +1601,6 @@ static int find_best_unit(struct file *fp) | |||
1343 | 1601 | ||
1344 | if (prefunit != -1) | 1602 | if (prefunit != -1) |
1345 | devmax = prefunit + 1; | 1603 | devmax = prefunit + 1; |
1346 | else | ||
1347 | devmax = ipath_count_units(NULL, NULL, NULL); | ||
1348 | recheck: | 1604 | recheck: |
1349 | for (i = 1; i < maxofallports; i++) { | 1605 | for (i = 1; i < maxofallports; i++) { |
1350 | for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; | 1606 | for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; |
@@ -1359,7 +1615,7 @@ recheck: | |||
1359 | * next. | 1615 | * next. |
1360 | */ | 1616 | */ |
1361 | continue; | 1617 | continue; |
1362 | ret = try_alloc_port(dd, i, fp); | 1618 | ret = try_alloc_port(dd, i, fp, uinfo); |
1363 | if (!ret) | 1619 | if (!ret) |
1364 | goto done; | 1620 | goto done; |
1365 | } | 1621 | } |
@@ -1395,22 +1651,183 @@ done: | |||
1395 | return ret; | 1651 | return ret; |
1396 | } | 1652 | } |
1397 | 1653 | ||
1654 | static int find_shared_port(struct file *fp, | ||
1655 | const struct ipath_user_info *uinfo) | ||
1656 | { | ||
1657 | int devmax, ndev, i; | ||
1658 | int ret = 0; | ||
1659 | |||
1660 | devmax = ipath_count_units(NULL, NULL, NULL); | ||
1661 | |||
1662 | for (ndev = 0; ndev < devmax; ndev++) { | ||
1663 | struct ipath_devdata *dd = ipath_lookup(ndev); | ||
1664 | |||
1665 | if (!dd) | ||
1666 | continue; | ||
1667 | for (i = 1; i < dd->ipath_cfgports; i++) { | ||
1668 | struct ipath_portdata *pd = dd->ipath_pd[i]; | ||
1669 | |||
1670 | /* Skip ports which are not yet open */ | ||
1671 | if (!pd || !pd->port_cnt) | ||
1672 | continue; | ||
1673 | /* Skip port if it doesn't match the requested one */ | ||
1674 | if (pd->port_subport_id != uinfo->spu_subport_id) | ||
1675 | continue; | ||
1676 | /* Verify the sharing process matches the master */ | ||
1677 | if (pd->port_subport_cnt != uinfo->spu_subport_cnt || | ||
1678 | pd->userversion != uinfo->spu_userversion || | ||
1679 | pd->port_cnt >= pd->port_subport_cnt) { | ||
1680 | ret = -EINVAL; | ||
1681 | goto done; | ||
1682 | } | ||
1683 | port_fp(fp) = pd; | ||
1684 | subport_fp(fp) = pd->port_cnt++; | ||
1685 | tidcursor_fp(fp) = 0; | ||
1686 | pd->active_slaves |= 1 << subport_fp(fp); | ||
1687 | ipath_cdbg(PROC, | ||
1688 | "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", | ||
1689 | current->comm, current->pid, | ||
1690 | subport_fp(fp), | ||
1691 | pd->port_comm, pd->port_pid, | ||
1692 | dd->ipath_unit, pd->port_port); | ||
1693 | ret = 1; | ||
1694 | goto done; | ||
1695 | } | ||
1696 | } | ||
1697 | |||
1698 | done: | ||
1699 | return ret; | ||
1700 | } | ||
1701 | |||
1398 | static int ipath_open(struct inode *in, struct file *fp) | 1702 | static int ipath_open(struct inode *in, struct file *fp) |
1399 | { | 1703 | { |
1400 | int ret, user_minor; | 1704 | /* The real work is performed later in ipath_assign_port() */ |
1705 | fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); | ||
1706 | return fp->private_data ? 0 : -ENOMEM; | ||
1707 | } | ||
1708 | |||
1709 | |||
1710 | /* Get port early, so can set affinity prior to memory allocation */ | ||
1711 | static int ipath_assign_port(struct file *fp, | ||
1712 | const struct ipath_user_info *uinfo) | ||
1713 | { | ||
1714 | int ret; | ||
1715 | int i_minor; | ||
1716 | unsigned swminor; | ||
1717 | |||
1718 | /* Check to be sure we haven't already initialized this file */ | ||
1719 | if (port_fp(fp)) { | ||
1720 | ret = -EINVAL; | ||
1721 | goto done; | ||
1722 | } | ||
1723 | |||
1724 | /* for now, if major version is different, bail */ | ||
1725 | if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) { | ||
1726 | ipath_dbg("User major version %d not same as driver " | ||
1727 | "major %d\n", uinfo->spu_userversion >> 16, | ||
1728 | IPATH_USER_SWMAJOR); | ||
1729 | ret = -ENODEV; | ||
1730 | goto done; | ||
1731 | } | ||
1732 | |||
1733 | swminor = uinfo->spu_userversion & 0xffff; | ||
1734 | if (swminor != IPATH_USER_SWMINOR) | ||
1735 | ipath_dbg("User minor version %d not same as driver " | ||
1736 | "minor %d\n", swminor, IPATH_USER_SWMINOR); | ||
1401 | 1737 | ||
1402 | mutex_lock(&ipath_mutex); | 1738 | mutex_lock(&ipath_mutex); |
1403 | 1739 | ||
1404 | user_minor = iminor(in) - IPATH_USER_MINOR_BASE; | 1740 | if (swminor == IPATH_USER_SWMINOR && uinfo->spu_subport_cnt && |
1741 | (ret = find_shared_port(fp, uinfo))) { | ||
1742 | mutex_unlock(&ipath_mutex); | ||
1743 | if (ret > 0) | ||
1744 | ret = 0; | ||
1745 | goto done; | ||
1746 | } | ||
1747 | |||
1748 | i_minor = iminor(fp->f_dentry->d_inode) - IPATH_USER_MINOR_BASE; | ||
1405 | ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", | 1749 | ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", |
1406 | (long)in->i_rdev, user_minor); | 1750 | (long)fp->f_dentry->d_inode->i_rdev, i_minor); |
1407 | 1751 | ||
1408 | if (user_minor) | 1752 | if (i_minor) |
1409 | ret = find_free_port(user_minor - 1, fp); | 1753 | ret = find_free_port(i_minor - 1, fp, uinfo); |
1410 | else | 1754 | else |
1411 | ret = find_best_unit(fp); | 1755 | ret = find_best_unit(fp, uinfo); |
1412 | 1756 | ||
1413 | mutex_unlock(&ipath_mutex); | 1757 | mutex_unlock(&ipath_mutex); |
1758 | |||
1759 | done: | ||
1760 | return ret; | ||
1761 | } | ||
1762 | |||
1763 | |||
1764 | static int ipath_do_user_init(struct file *fp, | ||
1765 | const struct ipath_user_info *uinfo) | ||
1766 | { | ||
1767 | int ret; | ||
1768 | struct ipath_portdata *pd; | ||
1769 | struct ipath_devdata *dd; | ||
1770 | u32 head32; | ||
1771 | |||
1772 | pd = port_fp(fp); | ||
1773 | dd = pd->port_dd; | ||
1774 | |||
1775 | if (uinfo->spu_rcvhdrsize) { | ||
1776 | ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); | ||
1777 | if (ret) | ||
1778 | goto done; | ||
1779 | } | ||
1780 | |||
1781 | /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ | ||
1782 | |||
1783 | /* for right now, kernel piobufs are at end, so port 1 is at 0 */ | ||
1784 | pd->port_piobufs = dd->ipath_piobufbase + | ||
1785 | dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign; | ||
1786 | ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", | ||
1787 | pd->port_port, pd->port_piobufs); | ||
1788 | |||
1789 | /* | ||
1790 | * Now allocate the rcvhdr Q and eager TIDs; skip the TID | ||
1791 | * array for time being. If pd->port_port > chip-supported, | ||
1792 | * we need to do extra stuff here to handle by handling overflow | ||
1793 | * through port 0, someday | ||
1794 | */ | ||
1795 | ret = ipath_create_rcvhdrq(dd, pd); | ||
1796 | if (!ret) | ||
1797 | ret = ipath_create_user_egr(pd); | ||
1798 | if (ret) | ||
1799 | goto done; | ||
1800 | |||
1801 | /* | ||
1802 | * set the eager head register for this port to the current values | ||
1803 | * of the tail pointers, since we don't know if they were | ||
1804 | * updated on last use of the port. | ||
1805 | */ | ||
1806 | head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); | ||
1807 | ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); | ||
1808 | dd->ipath_lastegrheads[pd->port_port] = -1; | ||
1809 | dd->ipath_lastrcvhdrqtails[pd->port_port] = -1; | ||
1810 | ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", | ||
1811 | pd->port_port, head32); | ||
1812 | pd->port_tidcursor = 0; /* start at beginning after open */ | ||
1813 | /* | ||
1814 | * now enable the port; the tail registers will be written to memory | ||
1815 | * by the chip as soon as it sees the write to | ||
1816 | * dd->ipath_kregs->kr_rcvctrl. The update only happens on | ||
1817 | * transition from 0 to 1, so clear it first, then set it as part of | ||
1818 | * enabling the port. This will (very briefly) affect any other | ||
1819 | * open ports, but it shouldn't be long enough to be an issue. | ||
1820 | * We explictly set the in-memory copy to 0 beforehand, so we don't | ||
1821 | * have to wait to be sure the DMA update has happened. | ||
1822 | */ | ||
1823 | *(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL; | ||
1824 | set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, | ||
1825 | &dd->ipath_rcvctrl); | ||
1826 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | ||
1827 | dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); | ||
1828 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | ||
1829 | dd->ipath_rcvctrl); | ||
1830 | done: | ||
1414 | return ret; | 1831 | return ret; |
1415 | } | 1832 | } |
1416 | 1833 | ||
@@ -1433,6 +1850,8 @@ static void unlock_expected_tids(struct ipath_portdata *pd) | |||
1433 | if (!dd->ipath_pageshadow[i]) | 1850 | if (!dd->ipath_pageshadow[i]) |
1434 | continue; | 1851 | continue; |
1435 | 1852 | ||
1853 | pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], | ||
1854 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
1436 | ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i], | 1855 | ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i], |
1437 | 1); | 1856 | 1); |
1438 | dd->ipath_pageshadow[i] = NULL; | 1857 | dd->ipath_pageshadow[i] = NULL; |
@@ -1453,6 +1872,7 @@ static void unlock_expected_tids(struct ipath_portdata *pd) | |||
1453 | static int ipath_close(struct inode *in, struct file *fp) | 1872 | static int ipath_close(struct inode *in, struct file *fp) |
1454 | { | 1873 | { |
1455 | int ret = 0; | 1874 | int ret = 0; |
1875 | struct ipath_filedata *fd; | ||
1456 | struct ipath_portdata *pd; | 1876 | struct ipath_portdata *pd; |
1457 | struct ipath_devdata *dd; | 1877 | struct ipath_devdata *dd; |
1458 | unsigned port; | 1878 | unsigned port; |
@@ -1462,9 +1882,24 @@ static int ipath_close(struct inode *in, struct file *fp) | |||
1462 | 1882 | ||
1463 | mutex_lock(&ipath_mutex); | 1883 | mutex_lock(&ipath_mutex); |
1464 | 1884 | ||
1465 | pd = port_fp(fp); | 1885 | fd = (struct ipath_filedata *) fp->private_data; |
1466 | port = pd->port_port; | ||
1467 | fp->private_data = NULL; | 1886 | fp->private_data = NULL; |
1887 | pd = fd->pd; | ||
1888 | if (!pd) { | ||
1889 | mutex_unlock(&ipath_mutex); | ||
1890 | goto bail; | ||
1891 | } | ||
1892 | if (--pd->port_cnt) { | ||
1893 | /* | ||
1894 | * XXX If the master closes the port before the slave(s), | ||
1895 | * revoke the mmap for the eager receive queue so | ||
1896 | * the slave(s) don't wait for receive data forever. | ||
1897 | */ | ||
1898 | pd->active_slaves &= ~(1 << fd->subport); | ||
1899 | mutex_unlock(&ipath_mutex); | ||
1900 | goto bail; | ||
1901 | } | ||
1902 | port = pd->port_port; | ||
1468 | dd = pd->port_dd; | 1903 | dd = pd->port_dd; |
1469 | 1904 | ||
1470 | if (pd->port_hdrqfull) { | 1905 | if (pd->port_hdrqfull) { |
@@ -1503,8 +1938,6 @@ static int ipath_close(struct inode *in, struct file *fp) | |||
1503 | 1938 | ||
1504 | /* clean up the pkeys for this port user */ | 1939 | /* clean up the pkeys for this port user */ |
1505 | ipath_clean_part_key(pd, dd); | 1940 | ipath_clean_part_key(pd, dd); |
1506 | |||
1507 | |||
1508 | /* | 1941 | /* |
1509 | * be paranoid, and never write 0's to these, just use an | 1942 | * be paranoid, and never write 0's to these, just use an |
1510 | * unused part of the port 0 tail page. Of course, | 1943 | * unused part of the port 0 tail page. Of course, |
@@ -1523,39 +1956,49 @@ static int ipath_close(struct inode *in, struct file *fp) | |||
1523 | i = dd->ipath_pbufsport * (port - 1); | 1956 | i = dd->ipath_pbufsport * (port - 1); |
1524 | ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); | 1957 | ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); |
1525 | 1958 | ||
1959 | dd->ipath_f_clear_tids(dd, pd->port_port); | ||
1960 | |||
1526 | if (dd->ipath_pageshadow) | 1961 | if (dd->ipath_pageshadow) |
1527 | unlock_expected_tids(pd); | 1962 | unlock_expected_tids(pd); |
1528 | ipath_stats.sps_ports--; | 1963 | ipath_stats.sps_ports--; |
1529 | ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", | 1964 | ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", |
1530 | pd->port_comm, pd->port_pid, | 1965 | pd->port_comm, pd->port_pid, |
1531 | dd->ipath_unit, port); | 1966 | dd->ipath_unit, port); |
1532 | |||
1533 | dd->ipath_f_clear_tids(dd, pd->port_port); | ||
1534 | } | 1967 | } |
1535 | 1968 | ||
1536 | pd->port_cnt = 0; | ||
1537 | pd->port_pid = 0; | 1969 | pd->port_pid = 0; |
1538 | |||
1539 | dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ | 1970 | dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ |
1540 | mutex_unlock(&ipath_mutex); | 1971 | mutex_unlock(&ipath_mutex); |
1541 | ipath_free_pddata(dd, pd); /* after releasing the mutex */ | 1972 | ipath_free_pddata(dd, pd); /* after releasing the mutex */ |
1542 | 1973 | ||
1974 | bail: | ||
1975 | kfree(fd); | ||
1543 | return ret; | 1976 | return ret; |
1544 | } | 1977 | } |
1545 | 1978 | ||
1546 | static int ipath_port_info(struct ipath_portdata *pd, | 1979 | static int ipath_port_info(struct ipath_portdata *pd, u16 subport, |
1547 | struct ipath_port_info __user *uinfo) | 1980 | struct ipath_port_info __user *uinfo) |
1548 | { | 1981 | { |
1549 | struct ipath_port_info info; | 1982 | struct ipath_port_info info; |
1550 | int nup; | 1983 | int nup; |
1551 | int ret; | 1984 | int ret; |
1985 | size_t sz; | ||
1552 | 1986 | ||
1553 | (void) ipath_count_units(NULL, &nup, NULL); | 1987 | (void) ipath_count_units(NULL, &nup, NULL); |
1554 | info.num_active = nup; | 1988 | info.num_active = nup; |
1555 | info.unit = pd->port_dd->ipath_unit; | 1989 | info.unit = pd->port_dd->ipath_unit; |
1556 | info.port = pd->port_port; | 1990 | info.port = pd->port_port; |
1991 | info.subport = subport; | ||
1992 | /* Don't return new fields if old library opened the port. */ | ||
1993 | if ((pd->userversion & 0xffff) == IPATH_USER_SWMINOR) { | ||
1994 | /* Number of user ports available for this device. */ | ||
1995 | info.num_ports = pd->port_dd->ipath_cfgports - 1; | ||
1996 | info.num_subports = pd->port_subport_cnt; | ||
1997 | sz = sizeof(info); | ||
1998 | } else | ||
1999 | sz = sizeof(info) - 2 * sizeof(u16); | ||
1557 | 2000 | ||
1558 | if (copy_to_user(uinfo, &info, sizeof(info))) { | 2001 | if (copy_to_user(uinfo, &info, sz)) { |
1559 | ret = -EFAULT; | 2002 | ret = -EFAULT; |
1560 | goto bail; | 2003 | goto bail; |
1561 | } | 2004 | } |
@@ -1565,6 +2008,16 @@ bail: | |||
1565 | return ret; | 2008 | return ret; |
1566 | } | 2009 | } |
1567 | 2010 | ||
2011 | static int ipath_get_slave_info(struct ipath_portdata *pd, | ||
2012 | void __user *slave_mask_addr) | ||
2013 | { | ||
2014 | int ret = 0; | ||
2015 | |||
2016 | if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) | ||
2017 | ret = -EFAULT; | ||
2018 | return ret; | ||
2019 | } | ||
2020 | |||
1568 | static ssize_t ipath_write(struct file *fp, const char __user *data, | 2021 | static ssize_t ipath_write(struct file *fp, const char __user *data, |
1569 | size_t count, loff_t *off) | 2022 | size_t count, loff_t *off) |
1570 | { | 2023 | { |
@@ -1591,6 +2044,8 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, | |||
1591 | consumed = sizeof(cmd.type); | 2044 | consumed = sizeof(cmd.type); |
1592 | 2045 | ||
1593 | switch (cmd.type) { | 2046 | switch (cmd.type) { |
2047 | case IPATH_CMD_ASSIGN_PORT: | ||
2048 | case __IPATH_CMD_USER_INIT: | ||
1594 | case IPATH_CMD_USER_INIT: | 2049 | case IPATH_CMD_USER_INIT: |
1595 | copy = sizeof(cmd.cmd.user_info); | 2050 | copy = sizeof(cmd.cmd.user_info); |
1596 | dest = &cmd.cmd.user_info; | 2051 | dest = &cmd.cmd.user_info; |
@@ -1617,6 +2072,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, | |||
1617 | dest = &cmd.cmd.part_key; | 2072 | dest = &cmd.cmd.part_key; |
1618 | src = &ucmd->cmd.part_key; | 2073 | src = &ucmd->cmd.part_key; |
1619 | break; | 2074 | break; |
2075 | case IPATH_CMD_SLAVE_INFO: | ||
2076 | copy = sizeof(cmd.cmd.slave_mask_addr); | ||
2077 | dest = &cmd.cmd.slave_mask_addr; | ||
2078 | src = &ucmd->cmd.slave_mask_addr; | ||
2079 | break; | ||
1620 | default: | 2080 | default: |
1621 | ret = -EINVAL; | 2081 | ret = -EINVAL; |
1622 | goto bail; | 2082 | goto bail; |
@@ -1634,34 +2094,55 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, | |||
1634 | 2094 | ||
1635 | consumed += copy; | 2095 | consumed += copy; |
1636 | pd = port_fp(fp); | 2096 | pd = port_fp(fp); |
2097 | if (!pd && cmd.type != __IPATH_CMD_USER_INIT && | ||
2098 | cmd.type != IPATH_CMD_ASSIGN_PORT) { | ||
2099 | ret = -EINVAL; | ||
2100 | goto bail; | ||
2101 | } | ||
1637 | 2102 | ||
1638 | switch (cmd.type) { | 2103 | switch (cmd.type) { |
2104 | case IPATH_CMD_ASSIGN_PORT: | ||
2105 | ret = ipath_assign_port(fp, &cmd.cmd.user_info); | ||
2106 | if (ret) | ||
2107 | goto bail; | ||
2108 | break; | ||
2109 | case __IPATH_CMD_USER_INIT: | ||
2110 | /* backwards compatibility, get port first */ | ||
2111 | ret = ipath_assign_port(fp, &cmd.cmd.user_info); | ||
2112 | if (ret) | ||
2113 | goto bail; | ||
2114 | /* and fall through to current version. */ | ||
1639 | case IPATH_CMD_USER_INIT: | 2115 | case IPATH_CMD_USER_INIT: |
1640 | ret = ipath_do_user_init(pd, &cmd.cmd.user_info); | 2116 | ret = ipath_do_user_init(fp, &cmd.cmd.user_info); |
1641 | if (ret < 0) | 2117 | if (ret) |
1642 | goto bail; | 2118 | goto bail; |
1643 | ret = ipath_get_base_info( | 2119 | ret = ipath_get_base_info( |
1644 | pd, (void __user *) (unsigned long) | 2120 | fp, (void __user *) (unsigned long) |
1645 | cmd.cmd.user_info.spu_base_info, | 2121 | cmd.cmd.user_info.spu_base_info, |
1646 | cmd.cmd.user_info.spu_base_info_size); | 2122 | cmd.cmd.user_info.spu_base_info_size); |
1647 | break; | 2123 | break; |
1648 | case IPATH_CMD_RECV_CTRL: | 2124 | case IPATH_CMD_RECV_CTRL: |
1649 | ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl); | 2125 | ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); |
1650 | break; | 2126 | break; |
1651 | case IPATH_CMD_PORT_INFO: | 2127 | case IPATH_CMD_PORT_INFO: |
1652 | ret = ipath_port_info(pd, | 2128 | ret = ipath_port_info(pd, subport_fp(fp), |
1653 | (struct ipath_port_info __user *) | 2129 | (struct ipath_port_info __user *) |
1654 | (unsigned long) cmd.cmd.port_info); | 2130 | (unsigned long) cmd.cmd.port_info); |
1655 | break; | 2131 | break; |
1656 | case IPATH_CMD_TID_UPDATE: | 2132 | case IPATH_CMD_TID_UPDATE: |
1657 | ret = ipath_tid_update(pd, &cmd.cmd.tid_info); | 2133 | ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); |
1658 | break; | 2134 | break; |
1659 | case IPATH_CMD_TID_FREE: | 2135 | case IPATH_CMD_TID_FREE: |
1660 | ret = ipath_tid_free(pd, &cmd.cmd.tid_info); | 2136 | ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); |
1661 | break; | 2137 | break; |
1662 | case IPATH_CMD_SET_PART_KEY: | 2138 | case IPATH_CMD_SET_PART_KEY: |
1663 | ret = ipath_set_part_key(pd, cmd.cmd.part_key); | 2139 | ret = ipath_set_part_key(pd, cmd.cmd.part_key); |
1664 | break; | 2140 | break; |
2141 | case IPATH_CMD_SLAVE_INFO: | ||
2142 | ret = ipath_get_slave_info(pd, | ||
2143 | (void __user *) (unsigned long) | ||
2144 | cmd.cmd.slave_mask_addr); | ||
2145 | break; | ||
1665 | } | 2146 | } |
1666 | 2147 | ||
1667 | if (ret >= 0) | 2148 | if (ret >= 0) |
@@ -1858,4 +2339,3 @@ void ipath_user_remove(struct ipath_devdata *dd) | |||
1858 | bail: | 2339 | bail: |
1859 | return; | 2340 | return; |
1860 | } | 2341 | } |
1861 | |||
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index c8a8af0fe471..a507d0b5be6c 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -356,19 +356,16 @@ static ssize_t flash_write(struct file *file, const char __user *buf, | |||
356 | 356 | ||
357 | pos = *ppos; | 357 | pos = *ppos; |
358 | 358 | ||
359 | if ( pos < 0) { | 359 | if (pos != 0) { |
360 | ret = -EINVAL; | 360 | ret = -EINVAL; |
361 | goto bail; | 361 | goto bail; |
362 | } | 362 | } |
363 | 363 | ||
364 | if (pos >= sizeof(struct ipath_flash)) { | 364 | if (count != sizeof(struct ipath_flash)) { |
365 | ret = 0; | 365 | ret = -EINVAL; |
366 | goto bail; | 366 | goto bail; |
367 | } | 367 | } |
368 | 368 | ||
369 | if (count > sizeof(struct ipath_flash) - pos) | ||
370 | count = sizeof(struct ipath_flash) - pos; | ||
371 | |||
372 | tmp = kmalloc(count, GFP_KERNEL); | 369 | tmp = kmalloc(count, GFP_KERNEL); |
373 | if (!tmp) { | 370 | if (!tmp) { |
374 | ret = -ENOMEM; | 371 | ret = -ENOMEM; |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 5c9b509e40e4..9e4e8d4c6e20 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c | |||
@@ -252,8 +252,8 @@ static const struct ipath_cregs ipath_ht_cregs = { | |||
252 | }; | 252 | }; |
253 | 253 | ||
254 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | 254 | /* kr_intstatus, kr_intclear, kr_intmask bits */ |
255 | #define INFINIPATH_I_RCVURG_MASK 0x1FF | 255 | #define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1) |
256 | #define INFINIPATH_I_RCVAVAIL_MASK 0x1FF | 256 | #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1) |
257 | 257 | ||
258 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | 258 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ |
259 | #define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0 | 259 | #define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0 |
@@ -338,7 +338,7 @@ static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs, | |||
338 | if (crcbits) { | 338 | if (crcbits) { |
339 | u16 ctrl0, ctrl1; | 339 | u16 ctrl0, ctrl1; |
340 | snprintf(bitsmsg, sizeof bitsmsg, | 340 | snprintf(bitsmsg, sizeof bitsmsg, |
341 | "[HT%s lane %s CRC (%llx); ignore till reload]", | 341 | "[HT%s lane %s CRC (%llx); powercycle to completely clear]", |
342 | !(crcbits & _IPATH_HTLINK1_CRCBITS) ? | 342 | !(crcbits & _IPATH_HTLINK1_CRCBITS) ? |
343 | "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS) | 343 | "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS) |
344 | ? "1 (B)" : "0+1 (A+B)"), | 344 | ? "1 (B)" : "0+1 (A+B)"), |
@@ -389,17 +389,28 @@ static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs, | |||
389 | _IPATH_HTLINK1_CRCBITS))); | 389 | _IPATH_HTLINK1_CRCBITS))); |
390 | } | 390 | } |
391 | 391 | ||
392 | /* 6110 specific hardware errors... */ | ||
393 | static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = { | ||
394 | INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"), | ||
395 | INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"), | ||
396 | INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"), | ||
397 | INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"), | ||
398 | INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"), | ||
399 | INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"), | ||
400 | INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), | ||
401 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), | ||
402 | }; | ||
403 | |||
392 | /** | 404 | /** |
393 | * ipath_ht_handle_hwerrors - display hardware errors | 405 | * ipath_ht_handle_hwerrors - display hardware errors. |
394 | * @dd: the infinipath device | 406 | * @dd: the infinipath device |
395 | * @msg: the output buffer | 407 | * @msg: the output buffer |
396 | * @msgl: the size of the output buffer | 408 | * @msgl: the size of the output buffer |
397 | * | 409 | * |
398 | * Use same msg buffer as regular errors to avoid | 410 | * Use same msg buffer as regular errors to avoid excessive stack |
399 | * excessive stack use. Most hardware errors are catastrophic, but for | 411 | * use. Most hardware errors are catastrophic, but for right now, |
400 | * right now, we'll print them and continue. | 412 | * we'll print them and continue. We reuse the same message buffer as |
401 | * We reuse the same message buffer as ipath_handle_errors() to avoid | 413 | * ipath_handle_errors() to avoid excessive stack usage. |
402 | * excessive stack usage. | ||
403 | */ | 414 | */ |
404 | static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | 415 | static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, |
405 | size_t msgl) | 416 | size_t msgl) |
@@ -440,19 +451,49 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
440 | * make sure we get this much out, unless told to be quiet, | 451 | * make sure we get this much out, unless told to be quiet, |
441 | * or it's occurred within the last 5 seconds | 452 | * or it's occurred within the last 5 seconds |
442 | */ | 453 | */ |
443 | if ((hwerrs & ~dd->ipath_lasthwerror) || | 454 | if ((hwerrs & ~(dd->ipath_lasthwerror | |
455 | ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
456 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
457 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) || | ||
444 | (ipath_debug & __IPATH_VERBDBG)) | 458 | (ipath_debug & __IPATH_VERBDBG)) |
445 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " | 459 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " |
446 | "(cleared)\n", (unsigned long long) hwerrs); | 460 | "(cleared)\n", (unsigned long long) hwerrs); |
447 | dd->ipath_lasthwerror |= hwerrs; | 461 | dd->ipath_lasthwerror |= hwerrs; |
448 | 462 | ||
449 | if (hwerrs & ~infinipath_hwe_bitsextant) | 463 | if (hwerrs & ~dd->ipath_hwe_bitsextant) |
450 | ipath_dev_err(dd, "hwerror interrupt with unknown errors " | 464 | ipath_dev_err(dd, "hwerror interrupt with unknown errors " |
451 | "%llx set\n", (unsigned long long) | 465 | "%llx set\n", (unsigned long long) |
452 | (hwerrs & ~infinipath_hwe_bitsextant)); | 466 | (hwerrs & ~dd->ipath_hwe_bitsextant)); |
453 | 467 | ||
454 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); | 468 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); |
455 | if (ctrl & INFINIPATH_C_FREEZEMODE) { | 469 | if (ctrl & INFINIPATH_C_FREEZEMODE) { |
470 | /* | ||
471 | * parity errors in send memory are recoverable, | ||
472 | * just cancel the send (if indicated in * sendbuffererror), | ||
473 | * count the occurrence, unfreeze (if no other handled | ||
474 | * hardware error bits are set), and continue. They can | ||
475 | * occur if a processor speculative read is done to the PIO | ||
476 | * buffer while we are sending a packet, for example. | ||
477 | */ | ||
478 | if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
479 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
480 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | ||
481 | ipath_stats.sps_txeparity++; | ||
482 | ipath_dbg("Recovering from TXE parity error (%llu), " | ||
483 | "hwerrstatus=%llx\n", | ||
484 | (unsigned long long) ipath_stats.sps_txeparity, | ||
485 | (unsigned long long) hwerrs); | ||
486 | ipath_disarm_senderrbufs(dd); | ||
487 | hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
488 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
489 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); | ||
490 | if (!hwerrs) { /* else leave in freeze mode */ | ||
491 | ipath_write_kreg(dd, | ||
492 | dd->ipath_kregs->kr_control, | ||
493 | dd->ipath_control); | ||
494 | return; | ||
495 | } | ||
496 | } | ||
456 | if (hwerrs) { | 497 | if (hwerrs) { |
457 | /* | 498 | /* |
458 | * if any set that we aren't ignoring; only | 499 | * if any set that we aren't ignoring; only |
@@ -499,44 +540,16 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
499 | bits); | 540 | bits); |
500 | strlcat(msg, bitsmsg, msgl); | 541 | strlcat(msg, bitsmsg, msgl); |
501 | } | 542 | } |
502 | if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK | 543 | |
503 | << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) { | 544 | ipath_format_hwerrors(hwerrs, |
504 | bits = (u32) ((hwerrs >> | 545 | ipath_6110_hwerror_msgs, |
505 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) & | 546 | sizeof(ipath_6110_hwerror_msgs) / |
506 | INFINIPATH_HWE_RXEMEMPARITYERR_MASK); | 547 | sizeof(ipath_6110_hwerror_msgs[0]), |
507 | snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ", | 548 | msg, msgl); |
508 | bits); | ||
509 | strlcat(msg, bitsmsg, msgl); | ||
510 | } | ||
511 | if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK | ||
512 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | ||
513 | bits = (u32) ((hwerrs >> | ||
514 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) & | ||
515 | INFINIPATH_HWE_TXEMEMPARITYERR_MASK); | ||
516 | snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ", | ||
517 | bits); | ||
518 | strlcat(msg, bitsmsg, msgl); | ||
519 | } | ||
520 | if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR) | ||
521 | strlcat(msg, "[IB2IPATH Parity]", msgl); | ||
522 | if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR) | ||
523 | strlcat(msg, "[IPATH2IB Parity]", msgl); | ||
524 | if (hwerrs & INFINIPATH_HWE_HTCBUSIREQPARITYERR) | ||
525 | strlcat(msg, "[HTC Ireq Parity]", msgl); | ||
526 | if (hwerrs & INFINIPATH_HWE_HTCBUSTREQPARITYERR) | ||
527 | strlcat(msg, "[HTC Treq Parity]", msgl); | ||
528 | if (hwerrs & INFINIPATH_HWE_HTCBUSTRESPPARITYERR) | ||
529 | strlcat(msg, "[HTC Tresp Parity]", msgl); | ||
530 | 549 | ||
531 | if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS)) | 550 | if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS)) |
532 | hwerr_crcbits(dd, hwerrs, msg, msgl); | 551 | hwerr_crcbits(dd, hwerrs, msg, msgl); |
533 | 552 | ||
534 | if (hwerrs & INFINIPATH_HWE_HTCMISCERR5) | ||
535 | strlcat(msg, "[HT core Misc5]", msgl); | ||
536 | if (hwerrs & INFINIPATH_HWE_HTCMISCERR6) | ||
537 | strlcat(msg, "[HT core Misc6]", msgl); | ||
538 | if (hwerrs & INFINIPATH_HWE_HTCMISCERR7) | ||
539 | strlcat(msg, "[HT core Misc7]", msgl); | ||
540 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { | 553 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { |
541 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", | 554 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", |
542 | msgl); | 555 | msgl); |
@@ -573,11 +586,6 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
573 | dd->ipath_hwerrmask); | 586 | dd->ipath_hwerrmask); |
574 | } | 587 | } |
575 | 588 | ||
576 | if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR) | ||
577 | strlcat(msg, "[Rx Dsync]", msgl); | ||
578 | if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) | ||
579 | strlcat(msg, "[SerDes PLL]", msgl); | ||
580 | |||
581 | ipath_dev_err(dd, "%s hardware error\n", msg); | 589 | ipath_dev_err(dd, "%s hardware error\n", msg); |
582 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) | 590 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) |
583 | /* | 591 | /* |
@@ -1080,21 +1088,21 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd, | |||
1080 | ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); | 1088 | ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); |
1081 | } | 1089 | } |
1082 | 1090 | ||
1083 | static void ipath_init_ht_variables(void) | 1091 | static void ipath_init_ht_variables(struct ipath_devdata *dd) |
1084 | { | 1092 | { |
1085 | ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; | 1093 | dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; |
1086 | ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; | 1094 | dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; |
1087 | ipath_gpio_sda = IPATH_GPIO_SDA; | 1095 | dd->ipath_gpio_sda = IPATH_GPIO_SDA; |
1088 | ipath_gpio_scl = IPATH_GPIO_SCL; | 1096 | dd->ipath_gpio_scl = IPATH_GPIO_SCL; |
1089 | 1097 | ||
1090 | infinipath_i_bitsextant = | 1098 | dd->ipath_i_bitsextant = |
1091 | (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | | 1099 | (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | |
1092 | (INFINIPATH_I_RCVAVAIL_MASK << | 1100 | (INFINIPATH_I_RCVAVAIL_MASK << |
1093 | INFINIPATH_I_RCVAVAIL_SHIFT) | | 1101 | INFINIPATH_I_RCVAVAIL_SHIFT) | |
1094 | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | | 1102 | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | |
1095 | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; | 1103 | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; |
1096 | 1104 | ||
1097 | infinipath_e_bitsextant = | 1105 | dd->ipath_e_bitsextant = |
1098 | INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | | 1106 | INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | |
1099 | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | | 1107 | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | |
1100 | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | | 1108 | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | |
@@ -1112,7 +1120,7 @@ static void ipath_init_ht_variables(void) | |||
1112 | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | | 1120 | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | |
1113 | INFINIPATH_E_HARDWARE; | 1121 | INFINIPATH_E_HARDWARE; |
1114 | 1122 | ||
1115 | infinipath_hwe_bitsextant = | 1123 | dd->ipath_hwe_bitsextant = |
1116 | (INFINIPATH_HWE_HTCMEMPARITYERR_MASK << | 1124 | (INFINIPATH_HWE_HTCMEMPARITYERR_MASK << |
1117 | INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) | | 1125 | INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) | |
1118 | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << | 1126 | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << |
@@ -1141,8 +1149,8 @@ static void ipath_init_ht_variables(void) | |||
1141 | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | | 1149 | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | |
1142 | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; | 1150 | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; |
1143 | 1151 | ||
1144 | infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; | 1152 | dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; |
1145 | infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; | 1153 | dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; |
1146 | } | 1154 | } |
1147 | 1155 | ||
1148 | /** | 1156 | /** |
@@ -1607,5 +1615,5 @@ void ipath_init_iba6110_funcs(struct ipath_devdata *dd) | |||
1607 | * do very early init that is needed before ipath_f_bus is | 1615 | * do very early init that is needed before ipath_f_bus is |
1608 | * called | 1616 | * called |
1609 | */ | 1617 | */ |
1610 | ipath_init_ht_variables(); | 1618 | ipath_init_ht_variables(dd); |
1611 | } | 1619 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index d86516d23df6..a72ab9de386a 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -263,8 +263,8 @@ static const struct ipath_cregs ipath_pe_cregs = { | |||
263 | }; | 263 | }; |
264 | 264 | ||
265 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | 265 | /* kr_intstatus, kr_intclear, kr_intmask bits */ |
266 | #define INFINIPATH_I_RCVURG_MASK 0x1F | 266 | #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) |
267 | #define INFINIPATH_I_RCVAVAIL_MASK 0x1F | 267 | #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) |
268 | 268 | ||
269 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | 269 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ |
270 | #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL | 270 | #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL |
@@ -294,6 +294,33 @@ static const struct ipath_cregs ipath_pe_cregs = { | |||
294 | #define IPATH_GPIO_SCL (1ULL << \ | 294 | #define IPATH_GPIO_SCL (1ULL << \ |
295 | (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) | 295 | (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) |
296 | 296 | ||
297 | /* | ||
298 | * Rev2 silicon allows suppressing check for ArmLaunch errors. | ||
299 | * this can speed up short packet sends on systems that do | ||
300 | * not guaranteee write-order. | ||
301 | */ | ||
302 | #define INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR (1ULL<<63) | ||
303 | |||
304 | /* 6120 specific hardware errors... */ | ||
305 | static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { | ||
306 | INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), | ||
307 | INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"), | ||
308 | /* | ||
309 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
310 | * parity or memory parity error failures, because most likely we | ||
311 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
312 | * might see them, if they are in parts of the PCIe core that aren't | ||
313 | * essential. | ||
314 | */ | ||
315 | INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"), | ||
316 | INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"), | ||
317 | INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), | ||
318 | INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), | ||
319 | INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), | ||
320 | INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), | ||
321 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), | ||
322 | }; | ||
323 | |||
297 | /** | 324 | /** |
298 | * ipath_pe_handle_hwerrors - display hardware errors. | 325 | * ipath_pe_handle_hwerrors - display hardware errors. |
299 | * @dd: the infinipath device | 326 | * @dd: the infinipath device |
@@ -343,19 +370,49 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
343 | * make sure we get this much out, unless told to be quiet, | 370 | * make sure we get this much out, unless told to be quiet, |
344 | * or it's occurred within the last 5 seconds | 371 | * or it's occurred within the last 5 seconds |
345 | */ | 372 | */ |
346 | if ((hwerrs & ~dd->ipath_lasthwerror) || | 373 | if ((hwerrs & ~(dd->ipath_lasthwerror | |
374 | ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
375 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
376 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) || | ||
347 | (ipath_debug & __IPATH_VERBDBG)) | 377 | (ipath_debug & __IPATH_VERBDBG)) |
348 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " | 378 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " |
349 | "(cleared)\n", (unsigned long long) hwerrs); | 379 | "(cleared)\n", (unsigned long long) hwerrs); |
350 | dd->ipath_lasthwerror |= hwerrs; | 380 | dd->ipath_lasthwerror |= hwerrs; |
351 | 381 | ||
352 | if (hwerrs & ~infinipath_hwe_bitsextant) | 382 | if (hwerrs & ~dd->ipath_hwe_bitsextant) |
353 | ipath_dev_err(dd, "hwerror interrupt with unknown errors " | 383 | ipath_dev_err(dd, "hwerror interrupt with unknown errors " |
354 | "%llx set\n", (unsigned long long) | 384 | "%llx set\n", (unsigned long long) |
355 | (hwerrs & ~infinipath_hwe_bitsextant)); | 385 | (hwerrs & ~dd->ipath_hwe_bitsextant)); |
356 | 386 | ||
357 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); | 387 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); |
358 | if (ctrl & INFINIPATH_C_FREEZEMODE) { | 388 | if (ctrl & INFINIPATH_C_FREEZEMODE) { |
389 | /* | ||
390 | * parity errors in send memory are recoverable, | ||
391 | * just cancel the send (if indicated in * sendbuffererror), | ||
392 | * count the occurrence, unfreeze (if no other handled | ||
393 | * hardware error bits are set), and continue. They can | ||
394 | * occur if a processor speculative read is done to the PIO | ||
395 | * buffer while we are sending a packet, for example. | ||
396 | */ | ||
397 | if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
398 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
399 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | ||
400 | ipath_stats.sps_txeparity++; | ||
401 | ipath_dbg("Recovering from TXE parity error (%llu), " | ||
402 | "hwerrstatus=%llx\n", | ||
403 | (unsigned long long) ipath_stats.sps_txeparity, | ||
404 | (unsigned long long) hwerrs); | ||
405 | ipath_disarm_senderrbufs(dd); | ||
406 | hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
407 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
408 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); | ||
409 | if (!hwerrs) { /* else leave in freeze mode */ | ||
410 | ipath_write_kreg(dd, | ||
411 | dd->ipath_kregs->kr_control, | ||
412 | dd->ipath_control); | ||
413 | return; | ||
414 | } | ||
415 | } | ||
359 | if (hwerrs) { | 416 | if (hwerrs) { |
360 | /* | 417 | /* |
361 | * if any set that we aren't ignoring only make the | 418 | * if any set that we aren't ignoring only make the |
@@ -379,9 +436,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
379 | } else { | 436 | } else { |
380 | ipath_dbg("Clearing freezemode on ignored hardware " | 437 | ipath_dbg("Clearing freezemode on ignored hardware " |
381 | "error\n"); | 438 | "error\n"); |
382 | ctrl &= ~INFINIPATH_C_FREEZEMODE; | ||
383 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | 439 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, |
384 | ctrl); | 440 | dd->ipath_control); |
385 | } | 441 | } |
386 | } | 442 | } |
387 | 443 | ||
@@ -396,24 +452,13 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
396 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | 452 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, |
397 | dd->ipath_hwerrmask); | 453 | dd->ipath_hwerrmask); |
398 | } | 454 | } |
399 | if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK | 455 | |
400 | << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) { | 456 | ipath_format_hwerrors(hwerrs, |
401 | bits = (u32) ((hwerrs >> | 457 | ipath_6120_hwerror_msgs, |
402 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) & | 458 | sizeof(ipath_6120_hwerror_msgs)/ |
403 | INFINIPATH_HWE_RXEMEMPARITYERR_MASK); | 459 | sizeof(ipath_6120_hwerror_msgs[0]), |
404 | snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ", | 460 | msg, msgl); |
405 | bits); | 461 | |
406 | strlcat(msg, bitsmsg, msgl); | ||
407 | } | ||
408 | if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK | ||
409 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | ||
410 | bits = (u32) ((hwerrs >> | ||
411 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) & | ||
412 | INFINIPATH_HWE_TXEMEMPARITYERR_MASK); | ||
413 | snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ", | ||
414 | bits); | ||
415 | strlcat(msg, bitsmsg, msgl); | ||
416 | } | ||
417 | if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK | 462 | if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK |
418 | << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { | 463 | << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { |
419 | bits = (u32) ((hwerrs >> | 464 | bits = (u32) ((hwerrs >> |
@@ -423,10 +468,6 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
423 | "[PCIe Mem Parity Errs %x] ", bits); | 468 | "[PCIe Mem Parity Errs %x] ", bits); |
424 | strlcat(msg, bitsmsg, msgl); | 469 | strlcat(msg, bitsmsg, msgl); |
425 | } | 470 | } |
426 | if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR) | ||
427 | strlcat(msg, "[IB2IPATH Parity]", msgl); | ||
428 | if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR) | ||
429 | strlcat(msg, "[IPATH2IB Parity]", msgl); | ||
430 | 471 | ||
431 | #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ | 472 | #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ |
432 | INFINIPATH_HWE_COREPLL_RFSLIP ) | 473 | INFINIPATH_HWE_COREPLL_RFSLIP ) |
@@ -452,34 +493,6 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
452 | dd->ipath_hwerrmask); | 493 | dd->ipath_hwerrmask); |
453 | } | 494 | } |
454 | 495 | ||
455 | if (hwerrs & INFINIPATH_HWE_PCIEPOISONEDTLP) | ||
456 | strlcat(msg, "[PCIe Poisoned TLP]", msgl); | ||
457 | if (hwerrs & INFINIPATH_HWE_PCIECPLTIMEOUT) | ||
458 | strlcat(msg, "[PCIe completion timeout]", msgl); | ||
459 | |||
460 | /* | ||
461 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
462 | * parity or memory parity error failures, because most likely we | ||
463 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
464 | * might see them, if they are in parts of the PCIe core that aren't | ||
465 | * essential. | ||
466 | */ | ||
467 | if (hwerrs & INFINIPATH_HWE_PCIE1PLLFAILED) | ||
468 | strlcat(msg, "[PCIePLL1]", msgl); | ||
469 | if (hwerrs & INFINIPATH_HWE_PCIE0PLLFAILED) | ||
470 | strlcat(msg, "[PCIePLL0]", msgl); | ||
471 | if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXTLH) | ||
472 | strlcat(msg, "[PCIe XTLH core parity]", msgl); | ||
473 | if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXADM) | ||
474 | strlcat(msg, "[PCIe ADM TX core parity]", msgl); | ||
475 | if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYRADM) | ||
476 | strlcat(msg, "[PCIe ADM RX core parity]", msgl); | ||
477 | |||
478 | if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR) | ||
479 | strlcat(msg, "[Rx Dsync]", msgl); | ||
480 | if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) | ||
481 | strlcat(msg, "[SerDes PLL]", msgl); | ||
482 | |||
483 | ipath_dev_err(dd, "%s hardware error\n", msg); | 496 | ipath_dev_err(dd, "%s hardware error\n", msg); |
484 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { | 497 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { |
485 | /* | 498 | /* |
@@ -525,6 +538,9 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | |||
525 | case 5: | 538 | case 5: |
526 | n = "InfiniPath_QMH7140"; | 539 | n = "InfiniPath_QMH7140"; |
527 | break; | 540 | break; |
541 | case 6: | ||
542 | n = "InfiniPath_QLE7142"; | ||
543 | break; | ||
528 | default: | 544 | default: |
529 | ipath_dev_err(dd, | 545 | ipath_dev_err(dd, |
530 | "Don't yet know about board with ID %u\n", | 546 | "Don't yet know about board with ID %u\n", |
@@ -571,9 +587,12 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | |||
571 | if (!dd->ipath_boardrev) // no PLL for Emulator | 587 | if (!dd->ipath_boardrev) // no PLL for Emulator |
572 | val &= ~INFINIPATH_HWE_SERDESPLLFAILED; | 588 | val &= ~INFINIPATH_HWE_SERDESPLLFAILED; |
573 | 589 | ||
574 | /* workaround bug 9460 in internal interface bus parity checking */ | 590 | if (dd->ipath_minrev < 2) { |
575 | val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM; | 591 | /* workaround bug 9460 in internal interface bus parity |
576 | 592 | * checking. Fixed (HW bug 9490) in Rev2. | |
593 | */ | ||
594 | val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM; | ||
595 | } | ||
577 | dd->ipath_hwerrmask = val; | 596 | dd->ipath_hwerrmask = val; |
578 | } | 597 | } |
579 | 598 | ||
@@ -583,8 +602,8 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | |||
583 | */ | 602 | */ |
584 | static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | 603 | static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) |
585 | { | 604 | { |
586 | u64 val, tmp, config1; | 605 | u64 val, tmp, config1, prev_val; |
587 | int ret = 0, change = 0; | 606 | int ret = 0; |
588 | 607 | ||
589 | ipath_dbg("Trying to bringup serdes\n"); | 608 | ipath_dbg("Trying to bringup serdes\n"); |
590 | 609 | ||
@@ -641,6 +660,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
641 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | 660 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); |
642 | 661 | ||
643 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | 662 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); |
663 | prev_val = val; | ||
644 | if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) & | 664 | if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) & |
645 | INFINIPATH_XGXS_MDIOADDR_MASK) != 3) { | 665 | INFINIPATH_XGXS_MDIOADDR_MASK) != 3) { |
646 | val &= | 666 | val &= |
@@ -648,11 +668,9 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
648 | INFINIPATH_XGXS_MDIOADDR_SHIFT); | 668 | INFINIPATH_XGXS_MDIOADDR_SHIFT); |
649 | /* MDIO address 3 */ | 669 | /* MDIO address 3 */ |
650 | val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT; | 670 | val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT; |
651 | change = 1; | ||
652 | } | 671 | } |
653 | if (val & INFINIPATH_XGXS_RESET) { | 672 | if (val & INFINIPATH_XGXS_RESET) { |
654 | val &= ~INFINIPATH_XGXS_RESET; | 673 | val &= ~INFINIPATH_XGXS_RESET; |
655 | change = 1; | ||
656 | } | 674 | } |
657 | if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & | 675 | if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & |
658 | INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { | 676 | INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { |
@@ -661,9 +679,19 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
661 | INFINIPATH_XGXS_RX_POL_SHIFT); | 679 | INFINIPATH_XGXS_RX_POL_SHIFT); |
662 | val |= dd->ipath_rx_pol_inv << | 680 | val |= dd->ipath_rx_pol_inv << |
663 | INFINIPATH_XGXS_RX_POL_SHIFT; | 681 | INFINIPATH_XGXS_RX_POL_SHIFT; |
664 | change = 1; | ||
665 | } | 682 | } |
666 | if (change) | 683 | if (dd->ipath_minrev >= 2) { |
684 | /* Rev 2. can tolerate multiple writes to PBC, and | ||
685 | * allowing them can provide lower latency on some | ||
686 | * CPUs, but this feature is off by default, only | ||
687 | * turned on by setting D63 of XGXSconfig reg. | ||
688 | * May want to make this conditional more | ||
689 | * fine-grained in future. This is not exactly | ||
690 | * related to XGXS, but where the bit ended up. | ||
691 | */ | ||
692 | val |= INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR; | ||
693 | } | ||
694 | if (val != prev_val) | ||
667 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | 695 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); |
668 | 696 | ||
669 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | 697 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); |
@@ -717,9 +745,25 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | |||
717 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | 745 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); |
718 | } | 746 | } |
719 | 747 | ||
720 | /* this is not yet needed on this chip, so just return 0. */ | ||
721 | static int ipath_pe_intconfig(struct ipath_devdata *dd) | 748 | static int ipath_pe_intconfig(struct ipath_devdata *dd) |
722 | { | 749 | { |
750 | u64 val; | ||
751 | u32 chiprev; | ||
752 | |||
753 | /* | ||
754 | * If the chip supports added error indication via GPIO pins, | ||
755 | * enable interrupts on those bits so the interrupt routine | ||
756 | * can count the events. Also set flag so interrupt routine | ||
757 | * can know they are expected. | ||
758 | */ | ||
759 | chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT; | ||
760 | if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { | ||
761 | /* Rev2+ reports extra errors via internal GPIO pins */ | ||
762 | dd->ipath_flags |= IPATH_GPIO_ERRINTRS; | ||
763 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | ||
764 | val |= IPATH_GPIO_ERRINTR_MASK; | ||
765 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | ||
766 | } | ||
723 | return 0; | 767 | return 0; |
724 | } | 768 | } |
725 | 769 | ||
@@ -853,21 +897,23 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd, | |||
853 | return 0; | 897 | return 0; |
854 | } | 898 | } |
855 | 899 | ||
856 | static void ipath_init_pe_variables(void) | 900 | static void ipath_init_pe_variables(struct ipath_devdata *dd) |
857 | { | 901 | { |
858 | /* | 902 | /* |
859 | * bits for selecting i2c direction and values, | 903 | * bits for selecting i2c direction and values, |
860 | * used for I2C serial flash | 904 | * used for I2C serial flash |
861 | */ | 905 | */ |
862 | ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; | 906 | dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; |
863 | ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; | 907 | dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; |
864 | ipath_gpio_sda = IPATH_GPIO_SDA; | 908 | dd->ipath_gpio_sda = IPATH_GPIO_SDA; |
865 | ipath_gpio_scl = IPATH_GPIO_SCL; | 909 | dd->ipath_gpio_scl = IPATH_GPIO_SCL; |
866 | 910 | ||
867 | /* variables for sanity checking interrupt and errors */ | 911 | /* variables for sanity checking interrupt and errors */ |
868 | infinipath_hwe_bitsextant = | 912 | dd->ipath_hwe_bitsextant = |
869 | (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << | 913 | (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << |
870 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | | 914 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | |
915 | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << | ||
916 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | | ||
871 | (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << | 917 | (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << |
872 | INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | | 918 | INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | |
873 | INFINIPATH_HWE_PCIE1PLLFAILED | | 919 | INFINIPATH_HWE_PCIE1PLLFAILED | |
@@ -883,13 +929,13 @@ static void ipath_init_pe_variables(void) | |||
883 | INFINIPATH_HWE_SERDESPLLFAILED | | 929 | INFINIPATH_HWE_SERDESPLLFAILED | |
884 | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | | 930 | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | |
885 | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; | 931 | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; |
886 | infinipath_i_bitsextant = | 932 | dd->ipath_i_bitsextant = |
887 | (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | | 933 | (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | |
888 | (INFINIPATH_I_RCVAVAIL_MASK << | 934 | (INFINIPATH_I_RCVAVAIL_MASK << |
889 | INFINIPATH_I_RCVAVAIL_SHIFT) | | 935 | INFINIPATH_I_RCVAVAIL_SHIFT) | |
890 | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | | 936 | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | |
891 | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; | 937 | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; |
892 | infinipath_e_bitsextant = | 938 | dd->ipath_e_bitsextant = |
893 | INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | | 939 | INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | |
894 | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | | 940 | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | |
895 | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | | 941 | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | |
@@ -907,8 +953,8 @@ static void ipath_init_pe_variables(void) | |||
907 | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | | 953 | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | |
908 | INFINIPATH_E_HARDWARE; | 954 | INFINIPATH_E_HARDWARE; |
909 | 955 | ||
910 | infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; | 956 | dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; |
911 | infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; | 957 | dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; |
912 | } | 958 | } |
913 | 959 | ||
914 | /* setup the MSI stuff again after a reset. I'd like to just call | 960 | /* setup the MSI stuff again after a reset. I'd like to just call |
@@ -1082,6 +1128,45 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, | |||
1082 | mmiowb(); | 1128 | mmiowb(); |
1083 | spin_unlock_irqrestore(&dd->ipath_tid_lock, flags); | 1129 | spin_unlock_irqrestore(&dd->ipath_tid_lock, flags); |
1084 | } | 1130 | } |
1131 | /** | ||
1132 | * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher | ||
1133 | * @dd: the infinipath device | ||
1134 | * @tidptr: pointer to the expected TID (in chip) to udpate | ||
1135 | * @tidtype: 0 for eager, 1 for expected | ||
1136 | * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing | ||
1137 | * | ||
1138 | * This exists as a separate routine to allow for selection of the | ||
1139 | * appropriate "flavor". The static calls in cleanup just use the | ||
1140 | * revision-agnostic form, as they are not performance critical. | ||
1141 | */ | ||
1142 | static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr, | ||
1143 | u32 type, unsigned long pa) | ||
1144 | { | ||
1145 | u32 __iomem *tidp32 = (u32 __iomem *)tidptr; | ||
1146 | |||
1147 | if (pa != dd->ipath_tidinvalid) { | ||
1148 | if (pa & ((1U << 11) - 1)) { | ||
1149 | dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " | ||
1150 | "not 2KB aligned!\n", pa); | ||
1151 | return; | ||
1152 | } | ||
1153 | pa >>= 11; | ||
1154 | /* paranoia check */ | ||
1155 | if (pa & (7<<29)) | ||
1156 | ipath_dev_err(dd, | ||
1157 | "BUG: Physical page address 0x%lx " | ||
1158 | "has bits set in 31-29\n", pa); | ||
1159 | |||
1160 | if (type == 0) | ||
1161 | pa |= dd->ipath_tidtemplate; | ||
1162 | else /* for now, always full 4KB page */ | ||
1163 | pa |= 2 << 29; | ||
1164 | } | ||
1165 | if (dd->ipath_kregbase) | ||
1166 | writel(pa, tidp32); | ||
1167 | mmiowb(); | ||
1168 | } | ||
1169 | |||
1085 | 1170 | ||
1086 | /** | 1171 | /** |
1087 | * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager | 1172 | * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager |
@@ -1203,7 +1288,7 @@ int __attribute__((weak)) ipath_unordered_wc(void) | |||
1203 | 1288 | ||
1204 | /** | 1289 | /** |
1205 | * ipath_init_pe_get_base_info - set chip-specific flags for user code | 1290 | * ipath_init_pe_get_base_info - set chip-specific flags for user code |
1206 | * @dd: the infinipath device | 1291 | * @pd: the infinipath port |
1207 | * @kbase: ipath_base_info pointer | 1292 | * @kbase: ipath_base_info pointer |
1208 | * | 1293 | * |
1209 | * We set the PCIE flag because the lower bandwidth on PCIe vs | 1294 | * We set the PCIE flag because the lower bandwidth on PCIe vs |
@@ -1212,6 +1297,7 @@ int __attribute__((weak)) ipath_unordered_wc(void) | |||
1212 | static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) | 1297 | static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) |
1213 | { | 1298 | { |
1214 | struct ipath_base_info *kinfo = kbase; | 1299 | struct ipath_base_info *kinfo = kbase; |
1300 | struct ipath_devdata *dd; | ||
1215 | 1301 | ||
1216 | if (ipath_unordered_wc()) { | 1302 | if (ipath_unordered_wc()) { |
1217 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER; | 1303 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER; |
@@ -1220,8 +1306,20 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) | |||
1220 | else | 1306 | else |
1221 | ipath_cdbg(PROC, "Not Intel processor, WC ordered\n"); | 1307 | ipath_cdbg(PROC, "Not Intel processor, WC ordered\n"); |
1222 | 1308 | ||
1223 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE; | 1309 | if (pd == NULL) |
1310 | goto done; | ||
1224 | 1311 | ||
1312 | dd = pd->port_dd; | ||
1313 | |||
1314 | if (dd != NULL && dd->ipath_minrev >= 2) { | ||
1315 | ipath_cdbg(PROC, "IBA6120 Rev2, allow multiple PBC write\n"); | ||
1316 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_PBC_REWRITE; | ||
1317 | ipath_cdbg(PROC, "IBA6120 Rev2, allow loose DMA alignment\n"); | ||
1318 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_LOOSE_DMA_ALIGN; | ||
1319 | } | ||
1320 | |||
1321 | done: | ||
1322 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE; | ||
1225 | return 0; | 1323 | return 0; |
1226 | } | 1324 | } |
1227 | 1325 | ||
@@ -1244,7 +1342,10 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd) | |||
1244 | dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes; | 1342 | dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes; |
1245 | dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes; | 1343 | dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes; |
1246 | dd->ipath_f_clear_tids = ipath_pe_clear_tids; | 1344 | dd->ipath_f_clear_tids = ipath_pe_clear_tids; |
1247 | dd->ipath_f_put_tid = ipath_pe_put_tid; | 1345 | if (dd->ipath_minrev >= 2) |
1346 | dd->ipath_f_put_tid = ipath_pe_put_tid_2; | ||
1347 | else | ||
1348 | dd->ipath_f_put_tid = ipath_pe_put_tid; | ||
1248 | dd->ipath_f_cleanup = ipath_setup_pe_cleanup; | 1349 | dd->ipath_f_cleanup = ipath_setup_pe_cleanup; |
1249 | dd->ipath_f_setextled = ipath_setup_pe_setextled; | 1350 | dd->ipath_f_setextled = ipath_setup_pe_setextled; |
1250 | dd->ipath_f_get_base_info = ipath_pe_get_base_info; | 1351 | dd->ipath_f_get_base_info = ipath_pe_get_base_info; |
@@ -1259,6 +1360,6 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd) | |||
1259 | dd->ipath_kregs = &ipath_pe_kregs; | 1360 | dd->ipath_kregs = &ipath_pe_kregs; |
1260 | dd->ipath_cregs = &ipath_pe_cregs; | 1361 | dd->ipath_cregs = &ipath_pe_cregs; |
1261 | 1362 | ||
1262 | ipath_init_pe_variables(); | 1363 | ipath_init_pe_variables(dd); |
1263 | } | 1364 | } |
1264 | 1365 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 44669dc2e22d..d819cca524cd 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -88,13 +88,13 @@ MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); | |||
88 | static int create_port0_egr(struct ipath_devdata *dd) | 88 | static int create_port0_egr(struct ipath_devdata *dd) |
89 | { | 89 | { |
90 | unsigned e, egrcnt; | 90 | unsigned e, egrcnt; |
91 | struct sk_buff **skbs; | 91 | struct ipath_skbinfo *skbinfo; |
92 | int ret; | 92 | int ret; |
93 | 93 | ||
94 | egrcnt = dd->ipath_rcvegrcnt; | 94 | egrcnt = dd->ipath_rcvegrcnt; |
95 | 95 | ||
96 | skbs = vmalloc(sizeof(*dd->ipath_port0_skbs) * egrcnt); | 96 | skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt); |
97 | if (skbs == NULL) { | 97 | if (skbinfo == NULL) { |
98 | ipath_dev_err(dd, "allocation error for eager TID " | 98 | ipath_dev_err(dd, "allocation error for eager TID " |
99 | "skb array\n"); | 99 | "skb array\n"); |
100 | ret = -ENOMEM; | 100 | ret = -ENOMEM; |
@@ -109,13 +109,13 @@ static int create_port0_egr(struct ipath_devdata *dd) | |||
109 | * 4 bytes so that the data buffer stays word aligned. | 109 | * 4 bytes so that the data buffer stays word aligned. |
110 | * See ipath_kreceive() for more details. | 110 | * See ipath_kreceive() for more details. |
111 | */ | 111 | */ |
112 | skbs[e] = ipath_alloc_skb(dd, GFP_KERNEL); | 112 | skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL); |
113 | if (!skbs[e]) { | 113 | if (!skbinfo[e].skb) { |
114 | ipath_dev_err(dd, "SKB allocation error for " | 114 | ipath_dev_err(dd, "SKB allocation error for " |
115 | "eager TID %u\n", e); | 115 | "eager TID %u\n", e); |
116 | while (e != 0) | 116 | while (e != 0) |
117 | dev_kfree_skb(skbs[--e]); | 117 | dev_kfree_skb(skbinfo[--e].skb); |
118 | vfree(skbs); | 118 | vfree(skbinfo); |
119 | ret = -ENOMEM; | 119 | ret = -ENOMEM; |
120 | goto bail; | 120 | goto bail; |
121 | } | 121 | } |
@@ -124,14 +124,17 @@ static int create_port0_egr(struct ipath_devdata *dd) | |||
124 | * After loop above, so we can test non-NULL to see if ready | 124 | * After loop above, so we can test non-NULL to see if ready |
125 | * to use at receive, etc. | 125 | * to use at receive, etc. |
126 | */ | 126 | */ |
127 | dd->ipath_port0_skbs = skbs; | 127 | dd->ipath_port0_skbinfo = skbinfo; |
128 | 128 | ||
129 | for (e = 0; e < egrcnt; e++) { | 129 | for (e = 0; e < egrcnt; e++) { |
130 | unsigned long phys = | 130 | dd->ipath_port0_skbinfo[e].phys = |
131 | virt_to_phys(dd->ipath_port0_skbs[e]->data); | 131 | ipath_map_single(dd->pcidev, |
132 | dd->ipath_port0_skbinfo[e].skb->data, | ||
133 | dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE); | ||
132 | dd->ipath_f_put_tid(dd, e + (u64 __iomem *) | 134 | dd->ipath_f_put_tid(dd, e + (u64 __iomem *) |
133 | ((char __iomem *) dd->ipath_kregbase + | 135 | ((char __iomem *) dd->ipath_kregbase + |
134 | dd->ipath_rcvegrbase), 0, phys); | 136 | dd->ipath_rcvegrbase), 0, |
137 | dd->ipath_port0_skbinfo[e].phys); | ||
135 | } | 138 | } |
136 | 139 | ||
137 | ret = 0; | 140 | ret = 0; |
@@ -432,16 +435,33 @@ done: | |||
432 | */ | 435 | */ |
433 | static void init_shadow_tids(struct ipath_devdata *dd) | 436 | static void init_shadow_tids(struct ipath_devdata *dd) |
434 | { | 437 | { |
435 | dd->ipath_pageshadow = (struct page **) | 438 | struct page **pages; |
436 | vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * | 439 | dma_addr_t *addrs; |
440 | |||
441 | pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * | ||
437 | sizeof(struct page *)); | 442 | sizeof(struct page *)); |
438 | if (!dd->ipath_pageshadow) | 443 | if (!pages) { |
439 | ipath_dev_err(dd, "failed to allocate shadow page * " | 444 | ipath_dev_err(dd, "failed to allocate shadow page * " |
440 | "array, no expected sends!\n"); | 445 | "array, no expected sends!\n"); |
441 | else | 446 | dd->ipath_pageshadow = NULL; |
442 | memset(dd->ipath_pageshadow, 0, | 447 | return; |
443 | dd->ipath_cfgports * dd->ipath_rcvtidcnt * | 448 | } |
444 | sizeof(struct page *)); | 449 | |
450 | addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * | ||
451 | sizeof(dma_addr_t)); | ||
452 | if (!addrs) { | ||
453 | ipath_dev_err(dd, "failed to allocate shadow dma handle " | ||
454 | "array, no expected sends!\n"); | ||
455 | vfree(dd->ipath_pageshadow); | ||
456 | dd->ipath_pageshadow = NULL; | ||
457 | return; | ||
458 | } | ||
459 | |||
460 | memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * | ||
461 | sizeof(struct page *)); | ||
462 | |||
463 | dd->ipath_pageshadow = pages; | ||
464 | dd->ipath_physshadow = addrs; | ||
445 | } | 465 | } |
446 | 466 | ||
447 | static void enable_chip(struct ipath_devdata *dd, | 467 | static void enable_chip(struct ipath_devdata *dd, |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 49bf7bb15b04..6bee53ce5f33 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -37,6 +37,50 @@ | |||
37 | #include "ipath_verbs.h" | 37 | #include "ipath_verbs.h" |
38 | #include "ipath_common.h" | 38 | #include "ipath_common.h" |
39 | 39 | ||
40 | /* | ||
41 | * Called when we might have an error that is specific to a particular | ||
42 | * PIO buffer, and may need to cancel that buffer, so it can be re-used. | ||
43 | */ | ||
44 | void ipath_disarm_senderrbufs(struct ipath_devdata *dd) | ||
45 | { | ||
46 | u32 piobcnt; | ||
47 | unsigned long sbuf[4]; | ||
48 | /* | ||
49 | * it's possible that sendbuffererror could have bits set; might | ||
50 | * have already done this as a result of hardware error handling | ||
51 | */ | ||
52 | piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; | ||
53 | /* read these before writing errorclear */ | ||
54 | sbuf[0] = ipath_read_kreg64( | ||
55 | dd, dd->ipath_kregs->kr_sendbuffererror); | ||
56 | sbuf[1] = ipath_read_kreg64( | ||
57 | dd, dd->ipath_kregs->kr_sendbuffererror + 1); | ||
58 | if (piobcnt > 128) { | ||
59 | sbuf[2] = ipath_read_kreg64( | ||
60 | dd, dd->ipath_kregs->kr_sendbuffererror + 2); | ||
61 | sbuf[3] = ipath_read_kreg64( | ||
62 | dd, dd->ipath_kregs->kr_sendbuffererror + 3); | ||
63 | } | ||
64 | |||
65 | if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { | ||
66 | int i; | ||
67 | if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG)) { | ||
68 | __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG, | ||
69 | "SendbufErrs %lx %lx", sbuf[0], | ||
70 | sbuf[1]); | ||
71 | if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128) | ||
72 | printk(" %lx %lx ", sbuf[2], sbuf[3]); | ||
73 | printk("\n"); | ||
74 | } | ||
75 | |||
76 | for (i = 0; i < piobcnt; i++) | ||
77 | if (test_bit(i, sbuf)) | ||
78 | ipath_disarm_piobufs(dd, i, 1); | ||
79 | dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */ | ||
80 | } | ||
81 | } | ||
82 | |||
83 | |||
40 | /* These are all rcv-related errors which we want to count for stats */ | 84 | /* These are all rcv-related errors which we want to count for stats */ |
41 | #define E_SUM_PKTERRS \ | 85 | #define E_SUM_PKTERRS \ |
42 | (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \ | 86 | (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \ |
@@ -68,53 +112,9 @@ | |||
68 | 112 | ||
69 | static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs) | 113 | static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs) |
70 | { | 114 | { |
71 | unsigned long sbuf[4]; | ||
72 | u64 ignore_this_time = 0; | 115 | u64 ignore_this_time = 0; |
73 | u32 piobcnt; | ||
74 | 116 | ||
75 | /* if possible that sendbuffererror could be valid */ | 117 | ipath_disarm_senderrbufs(dd); |
76 | piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; | ||
77 | /* read these before writing errorclear */ | ||
78 | sbuf[0] = ipath_read_kreg64( | ||
79 | dd, dd->ipath_kregs->kr_sendbuffererror); | ||
80 | sbuf[1] = ipath_read_kreg64( | ||
81 | dd, dd->ipath_kregs->kr_sendbuffererror + 1); | ||
82 | if (piobcnt > 128) { | ||
83 | sbuf[2] = ipath_read_kreg64( | ||
84 | dd, dd->ipath_kregs->kr_sendbuffererror + 2); | ||
85 | sbuf[3] = ipath_read_kreg64( | ||
86 | dd, dd->ipath_kregs->kr_sendbuffererror + 3); | ||
87 | } | ||
88 | |||
89 | if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { | ||
90 | int i; | ||
91 | |||
92 | ipath_cdbg(PKT, "SendbufErrs %lx %lx ", sbuf[0], sbuf[1]); | ||
93 | if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128) | ||
94 | printk("%lx %lx ", sbuf[2], sbuf[3]); | ||
95 | for (i = 0; i < piobcnt; i++) { | ||
96 | if (test_bit(i, sbuf)) { | ||
97 | u32 __iomem *piobuf; | ||
98 | if (i < dd->ipath_piobcnt2k) | ||
99 | piobuf = (u32 __iomem *) | ||
100 | (dd->ipath_pio2kbase + | ||
101 | i * dd->ipath_palign); | ||
102 | else | ||
103 | piobuf = (u32 __iomem *) | ||
104 | (dd->ipath_pio4kbase + | ||
105 | (i - dd->ipath_piobcnt2k) * | ||
106 | dd->ipath_4kalign); | ||
107 | |||
108 | ipath_cdbg(PKT, | ||
109 | "PIObuf[%u] @%p pbc is %x; ", | ||
110 | i, piobuf, readl(piobuf)); | ||
111 | |||
112 | ipath_disarm_piobufs(dd, i, 1); | ||
113 | } | ||
114 | } | ||
115 | if (ipath_debug & __IPATH_PKTDBG) | ||
116 | printk("\n"); | ||
117 | } | ||
118 | if ((errs & E_SUM_LINK_PKTERRS) && | 118 | if ((errs & E_SUM_LINK_PKTERRS) && |
119 | !(dd->ipath_flags & IPATH_LINKACTIVE)) { | 119 | !(dd->ipath_flags & IPATH_LINKACTIVE)) { |
120 | /* | 120 | /* |
@@ -132,6 +132,82 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs) | |||
132 | return ignore_this_time; | 132 | return ignore_this_time; |
133 | } | 133 | } |
134 | 134 | ||
135 | /* generic hw error messages... */ | ||
136 | #define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \ | ||
137 | { \ | ||
138 | .mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a << \ | ||
139 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ), \ | ||
140 | .msg = "TXE " #a " Memory Parity" \ | ||
141 | } | ||
142 | #define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \ | ||
143 | { \ | ||
144 | .mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a << \ | ||
145 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ), \ | ||
146 | .msg = "RXE " #a " Memory Parity" \ | ||
147 | } | ||
148 | |||
149 | static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = { | ||
150 | INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"), | ||
151 | INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"), | ||
152 | |||
153 | INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF), | ||
154 | INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC), | ||
155 | INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO), | ||
156 | |||
157 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF), | ||
158 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ), | ||
159 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID), | ||
160 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID), | ||
161 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF), | ||
162 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO), | ||
163 | INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO), | ||
164 | }; | ||
165 | |||
166 | /** | ||
167 | * ipath_format_hwmsg - format a single hwerror message | ||
168 | * @msg message buffer | ||
169 | * @msgl length of message buffer | ||
170 | * @hwmsg message to add to message buffer | ||
171 | */ | ||
172 | static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg) | ||
173 | { | ||
174 | strlcat(msg, "[", msgl); | ||
175 | strlcat(msg, hwmsg, msgl); | ||
176 | strlcat(msg, "]", msgl); | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * ipath_format_hwerrors - format hardware error messages for display | ||
181 | * @hwerrs hardware errors bit vector | ||
182 | * @hwerrmsgs hardware error descriptions | ||
183 | * @nhwerrmsgs number of hwerrmsgs | ||
184 | * @msg message buffer | ||
185 | * @msgl message buffer length | ||
186 | */ | ||
187 | void ipath_format_hwerrors(u64 hwerrs, | ||
188 | const struct ipath_hwerror_msgs *hwerrmsgs, | ||
189 | size_t nhwerrmsgs, | ||
190 | char *msg, size_t msgl) | ||
191 | { | ||
192 | int i; | ||
193 | const int glen = | ||
194 | sizeof(ipath_generic_hwerror_msgs) / | ||
195 | sizeof(ipath_generic_hwerror_msgs[0]); | ||
196 | |||
197 | for (i=0; i<glen; i++) { | ||
198 | if (hwerrs & ipath_generic_hwerror_msgs[i].mask) { | ||
199 | ipath_format_hwmsg(msg, msgl, | ||
200 | ipath_generic_hwerror_msgs[i].msg); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | for (i=0; i<nhwerrmsgs; i++) { | ||
205 | if (hwerrs & hwerrmsgs[i].mask) { | ||
206 | ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg); | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | |||
135 | /* return the strings for the most common link states */ | 211 | /* return the strings for the most common link states */ |
136 | static char *ib_linkstate(u32 linkstate) | 212 | static char *ib_linkstate(u32 linkstate) |
137 | { | 213 | { |
@@ -404,10 +480,10 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
404 | dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg); | 480 | dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg); |
405 | } | 481 | } |
406 | 482 | ||
407 | if (!noprint && (errs & ~infinipath_e_bitsextant)) | 483 | if (!noprint && (errs & ~dd->ipath_e_bitsextant)) |
408 | ipath_dev_err(dd, "error interrupt with unknown errors " | 484 | ipath_dev_err(dd, "error interrupt with unknown errors " |
409 | "%llx set\n", (unsigned long long) | 485 | "%llx set\n", (unsigned long long) |
410 | (errs & ~infinipath_e_bitsextant)); | 486 | (errs & ~dd->ipath_e_bitsextant)); |
411 | 487 | ||
412 | if (errs & E_SUM_ERRS) | 488 | if (errs & E_SUM_ERRS) |
413 | ignore_this_time = handle_e_sum_errs(dd, errs); | 489 | ignore_this_time = handle_e_sum_errs(dd, errs); |
@@ -478,6 +554,14 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
478 | ~(INFINIPATH_E_HARDWARE | | 554 | ~(INFINIPATH_E_HARDWARE | |
479 | INFINIPATH_E_IBSTATUSCHANGED); | 555 | INFINIPATH_E_IBSTATUSCHANGED); |
480 | } | 556 | } |
557 | |||
558 | /* likely due to cancel, so suppress */ | ||
559 | if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) && | ||
560 | dd->ipath_lastcancel > jiffies) { | ||
561 | ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n"); | ||
562 | errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN); | ||
563 | } | ||
564 | |||
481 | if (!errs) | 565 | if (!errs) |
482 | return 0; | 566 | return 0; |
483 | 567 | ||
@@ -529,7 +613,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
529 | * don't report same point multiple times, | 613 | * don't report same point multiple times, |
530 | * except kernel | 614 | * except kernel |
531 | */ | 615 | */ |
532 | tl = (u32) * pd->port_rcvhdrtail_kvaddr; | 616 | tl = *(u64 *) pd->port_rcvhdrtail_kvaddr; |
533 | if (tl == dd->ipath_lastrcvhdrqtails[i]) | 617 | if (tl == dd->ipath_lastrcvhdrqtails[i]) |
534 | continue; | 618 | continue; |
535 | hd = ipath_read_ureg32(dd, ur_rcvhdrhead, | 619 | hd = ipath_read_ureg32(dd, ur_rcvhdrhead, |
@@ -729,9 +813,9 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) | |||
729 | int rcvdint = 0; | 813 | int rcvdint = 0; |
730 | 814 | ||
731 | portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & | 815 | portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & |
732 | infinipath_i_rcvavail_mask) | 816 | dd->ipath_i_rcvavail_mask) |
733 | | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & | 817 | | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & |
734 | infinipath_i_rcvurg_mask); | 818 | dd->ipath_i_rcvurg_mask); |
735 | for (i = 1; i < dd->ipath_cfgports; i++) { | 819 | for (i = 1; i < dd->ipath_cfgports; i++) { |
736 | struct ipath_portdata *pd = dd->ipath_pd[i]; | 820 | struct ipath_portdata *pd = dd->ipath_pd[i]; |
737 | if (portr & (1 << i) && pd && pd->port_cnt && | 821 | if (portr & (1 << i) && pd && pd->port_cnt && |
@@ -808,7 +892,7 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
808 | if (oldhead != curtail) { | 892 | if (oldhead != curtail) { |
809 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | 893 | if (dd->ipath_flags & IPATH_GPIO_INTR) { |
810 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | 894 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, |
811 | (u64) (1 << 2)); | 895 | (u64) (1 << IPATH_GPIO_PORT0_BIT)); |
812 | istat = port0rbits | INFINIPATH_I_GPIO; | 896 | istat = port0rbits | INFINIPATH_I_GPIO; |
813 | } | 897 | } |
814 | else | 898 | else |
@@ -838,10 +922,10 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
838 | if (unexpected) | 922 | if (unexpected) |
839 | unexpected = 0; | 923 | unexpected = 0; |
840 | 924 | ||
841 | if (unlikely(istat & ~infinipath_i_bitsextant)) | 925 | if (unlikely(istat & ~dd->ipath_i_bitsextant)) |
842 | ipath_dev_err(dd, | 926 | ipath_dev_err(dd, |
843 | "interrupt with unknown interrupts %x set\n", | 927 | "interrupt with unknown interrupts %x set\n", |
844 | istat & (u32) ~ infinipath_i_bitsextant); | 928 | istat & (u32) ~ dd->ipath_i_bitsextant); |
845 | else | 929 | else |
846 | ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); | 930 | ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); |
847 | 931 | ||
@@ -867,26 +951,80 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
867 | 951 | ||
868 | if (istat & INFINIPATH_I_GPIO) { | 952 | if (istat & INFINIPATH_I_GPIO) { |
869 | /* | 953 | /* |
870 | * Packets are available in the port 0 rcv queue. | 954 | * GPIO interrupts fall in two broad classes: |
871 | * Eventually this needs to be generalized to check | 955 | * GPIO_2 indicates (on some HT4xx boards) that a packet |
872 | * IPATH_GPIO_INTR, and the specific GPIO bit, if | 956 | * has arrived for Port 0. Checking for this |
873 | * GPIO interrupts are used for anything else. | 957 | * is controlled by flag IPATH_GPIO_INTR. |
958 | * GPIO_3..5 on IBA6120 Rev2 chips indicate errors | ||
959 | * that we need to count. Checking for this | ||
960 | * is controlled by flag IPATH_GPIO_ERRINTRS. | ||
874 | */ | 961 | */ |
875 | if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) { | 962 | u32 gpiostatus; |
876 | u32 gpiostatus; | 963 | u32 to_clear = 0; |
877 | gpiostatus = ipath_read_kreg32( | 964 | |
878 | dd, dd->ipath_kregs->kr_gpio_status); | 965 | gpiostatus = ipath_read_kreg32( |
879 | ipath_dbg("Unexpected GPIO interrupt bits %x\n", | 966 | dd, dd->ipath_kregs->kr_gpio_status); |
880 | gpiostatus); | 967 | /* First the error-counter case. |
881 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | 968 | */ |
882 | gpiostatus); | 969 | if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) && |
970 | (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) { | ||
971 | /* want to clear the bits we see asserted. */ | ||
972 | to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK); | ||
973 | |||
974 | /* | ||
975 | * Count appropriately, clear bits out of our copy, | ||
976 | * as they have been "handled". | ||
977 | */ | ||
978 | if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) { | ||
979 | ipath_dbg("FlowCtl on UnsupVL\n"); | ||
980 | dd->ipath_rxfc_unsupvl_errs++; | ||
981 | } | ||
982 | if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) { | ||
983 | ipath_dbg("Overrun Threshold exceeded\n"); | ||
984 | dd->ipath_overrun_thresh_errs++; | ||
985 | } | ||
986 | if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) { | ||
987 | ipath_dbg("Local Link Integrity error\n"); | ||
988 | dd->ipath_lli_errs++; | ||
989 | } | ||
990 | gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK; | ||
883 | } | 991 | } |
884 | else { | 992 | /* Now the Port0 Receive case */ |
885 | /* Clear GPIO status bit 2 */ | 993 | if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) && |
886 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | 994 | (dd->ipath_flags & IPATH_GPIO_INTR)) { |
887 | (u64) (1 << 2)); | 995 | /* |
996 | * GPIO status bit 2 is set, and we expected it. | ||
997 | * clear it and indicate in p0bits. | ||
998 | * This probably only happens if a Port0 pkt | ||
999 | * arrives at _just_ the wrong time, and we | ||
1000 | * handle that by seting chk0rcv; | ||
1001 | */ | ||
1002 | to_clear |= (1 << IPATH_GPIO_PORT0_BIT); | ||
1003 | gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); | ||
888 | chk0rcv = 1; | 1004 | chk0rcv = 1; |
889 | } | 1005 | } |
1006 | if (unlikely(gpiostatus)) { | ||
1007 | /* | ||
1008 | * Some unexpected bits remain. If they could have | ||
1009 | * caused the interrupt, complain and clear. | ||
1010 | * MEA: this is almost certainly non-ideal. | ||
1011 | * we should look into auto-disable of unexpected | ||
1012 | * GPIO interrupts, possibly on a "three strikes" | ||
1013 | * basis. | ||
1014 | */ | ||
1015 | u32 mask; | ||
1016 | mask = ipath_read_kreg32( | ||
1017 | dd, dd->ipath_kregs->kr_gpio_mask); | ||
1018 | if (mask & gpiostatus) { | ||
1019 | ipath_dbg("Unexpected GPIO IRQ bits %x\n", | ||
1020 | gpiostatus & mask); | ||
1021 | to_clear |= (gpiostatus & mask); | ||
1022 | } | ||
1023 | } | ||
1024 | if (to_clear) { | ||
1025 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, | ||
1026 | (u64) to_clear); | ||
1027 | } | ||
890 | } | 1028 | } |
891 | chk0rcv |= istat & port0rbits; | 1029 | chk0rcv |= istat & port0rbits; |
892 | 1030 | ||
@@ -911,9 +1049,9 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | |||
911 | istat &= ~port0rbits; | 1049 | istat &= ~port0rbits; |
912 | } | 1050 | } |
913 | 1051 | ||
914 | if (istat & ((infinipath_i_rcvavail_mask << | 1052 | if (istat & ((dd->ipath_i_rcvavail_mask << |
915 | INFINIPATH_I_RCVAVAIL_SHIFT) | 1053 | INFINIPATH_I_RCVAVAIL_SHIFT) |
916 | | (infinipath_i_rcvurg_mask << | 1054 | | (dd->ipath_i_rcvurg_mask << |
917 | INFINIPATH_I_RCVURG_SHIFT))) | 1055 | INFINIPATH_I_RCVURG_SHIFT))) |
918 | handle_urcv(dd, istat); | 1056 | handle_urcv(dd, istat); |
919 | 1057 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index a8a56276ff1d..d7540b71b451 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -39,6 +39,8 @@ | |||
39 | */ | 39 | */ |
40 | 40 | ||
41 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
42 | #include <linux/pci.h> | ||
43 | #include <linux/dma-mapping.h> | ||
42 | #include <asm/io.h> | 44 | #include <asm/io.h> |
43 | 45 | ||
44 | #include "ipath_common.h" | 46 | #include "ipath_common.h" |
@@ -62,7 +64,7 @@ struct ipath_portdata { | |||
62 | /* rcvhdrq base, needs mmap before useful */ | 64 | /* rcvhdrq base, needs mmap before useful */ |
63 | void *port_rcvhdrq; | 65 | void *port_rcvhdrq; |
64 | /* kernel virtual address where hdrqtail is updated */ | 66 | /* kernel virtual address where hdrqtail is updated */ |
65 | volatile __le64 *port_rcvhdrtail_kvaddr; | 67 | void *port_rcvhdrtail_kvaddr; |
66 | /* | 68 | /* |
67 | * temp buffer for expected send setup, allocated at open, instead | 69 | * temp buffer for expected send setup, allocated at open, instead |
68 | * of each setup call | 70 | * of each setup call |
@@ -79,8 +81,8 @@ struct ipath_portdata { | |||
79 | dma_addr_t port_rcvhdrq_phys; | 81 | dma_addr_t port_rcvhdrq_phys; |
80 | dma_addr_t port_rcvhdrqtailaddr_phys; | 82 | dma_addr_t port_rcvhdrqtailaddr_phys; |
81 | /* | 83 | /* |
82 | * number of opens on this instance (0 or 1; ignoring forks, dup, | 84 | * number of opens (including slave subports) on this instance |
83 | * etc. for now) | 85 | * (ignoring forks, dup, etc. for now) |
84 | */ | 86 | */ |
85 | int port_cnt; | 87 | int port_cnt; |
86 | /* | 88 | /* |
@@ -89,6 +91,10 @@ struct ipath_portdata { | |||
89 | */ | 91 | */ |
90 | /* instead of calculating it */ | 92 | /* instead of calculating it */ |
91 | unsigned port_port; | 93 | unsigned port_port; |
94 | /* non-zero if port is being shared. */ | ||
95 | u16 port_subport_cnt; | ||
96 | /* non-zero if port is being shared. */ | ||
97 | u16 port_subport_id; | ||
92 | /* chip offset of PIO buffers for this port */ | 98 | /* chip offset of PIO buffers for this port */ |
93 | u32 port_piobufs; | 99 | u32 port_piobufs; |
94 | /* how many alloc_pages() chunks in port_rcvegrbuf_pages */ | 100 | /* how many alloc_pages() chunks in port_rcvegrbuf_pages */ |
@@ -121,6 +127,16 @@ struct ipath_portdata { | |||
121 | u16 port_pkeys[4]; | 127 | u16 port_pkeys[4]; |
122 | /* so file ops can get at unit */ | 128 | /* so file ops can get at unit */ |
123 | struct ipath_devdata *port_dd; | 129 | struct ipath_devdata *port_dd; |
130 | /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */ | ||
131 | void *subport_uregbase; | ||
132 | /* An array of pages for the eager receive buffers * N */ | ||
133 | void *subport_rcvegrbuf; | ||
134 | /* An array of pages for the eager header queue entries * N */ | ||
135 | void *subport_rcvhdr_base; | ||
136 | /* The version of the library which opened this port */ | ||
137 | u32 userversion; | ||
138 | /* Bitmask of active slaves */ | ||
139 | u32 active_slaves; | ||
124 | }; | 140 | }; |
125 | 141 | ||
126 | struct sk_buff; | 142 | struct sk_buff; |
@@ -132,6 +148,11 @@ struct _ipath_layer { | |||
132 | void *l_arg; | 148 | void *l_arg; |
133 | }; | 149 | }; |
134 | 150 | ||
151 | struct ipath_skbinfo { | ||
152 | struct sk_buff *skb; | ||
153 | dma_addr_t phys; | ||
154 | }; | ||
155 | |||
135 | struct ipath_devdata { | 156 | struct ipath_devdata { |
136 | struct list_head ipath_list; | 157 | struct list_head ipath_list; |
137 | 158 | ||
@@ -154,7 +175,7 @@ struct ipath_devdata { | |||
154 | /* ipath_cfgports pointers */ | 175 | /* ipath_cfgports pointers */ |
155 | struct ipath_portdata **ipath_pd; | 176 | struct ipath_portdata **ipath_pd; |
156 | /* sk_buffs used by port 0 eager receive queue */ | 177 | /* sk_buffs used by port 0 eager receive queue */ |
157 | struct sk_buff **ipath_port0_skbs; | 178 | struct ipath_skbinfo *ipath_port0_skbinfo; |
158 | /* kvirt address of 1st 2k pio buffer */ | 179 | /* kvirt address of 1st 2k pio buffer */ |
159 | void __iomem *ipath_pio2kbase; | 180 | void __iomem *ipath_pio2kbase; |
160 | /* kvirt address of 1st 4k pio buffer */ | 181 | /* kvirt address of 1st 4k pio buffer */ |
@@ -315,12 +336,16 @@ struct ipath_devdata { | |||
315 | u8 ipath_ht_slave_off; | 336 | u8 ipath_ht_slave_off; |
316 | /* for write combining settings */ | 337 | /* for write combining settings */ |
317 | unsigned long ipath_wc_cookie; | 338 | unsigned long ipath_wc_cookie; |
339 | unsigned long ipath_wc_base; | ||
340 | unsigned long ipath_wc_len; | ||
318 | /* ref count for each pkey */ | 341 | /* ref count for each pkey */ |
319 | atomic_t ipath_pkeyrefs[4]; | 342 | atomic_t ipath_pkeyrefs[4]; |
320 | /* shadow copy of all exptids physaddr; used only by funcsim */ | 343 | /* shadow copy of all exptids physaddr; used only by funcsim */ |
321 | u64 *ipath_tidsimshadow; | 344 | u64 *ipath_tidsimshadow; |
322 | /* shadow copy of struct page *'s for exp tid pages */ | 345 | /* shadow copy of struct page *'s for exp tid pages */ |
323 | struct page **ipath_pageshadow; | 346 | struct page **ipath_pageshadow; |
347 | /* shadow copy of dma handles for exp tid pages */ | ||
348 | dma_addr_t *ipath_physshadow; | ||
324 | /* lock to workaround chip bug 9437 */ | 349 | /* lock to workaround chip bug 9437 */ |
325 | spinlock_t ipath_tid_lock; | 350 | spinlock_t ipath_tid_lock; |
326 | 351 | ||
@@ -402,6 +427,9 @@ struct ipath_devdata { | |||
402 | unsigned long ipath_rcvctrl; | 427 | unsigned long ipath_rcvctrl; |
403 | /* shadow kr_sendctrl */ | 428 | /* shadow kr_sendctrl */ |
404 | unsigned long ipath_sendctrl; | 429 | unsigned long ipath_sendctrl; |
430 | /* ports waiting for PIOavail intr */ | ||
431 | unsigned long ipath_portpiowait; | ||
432 | unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */ | ||
405 | 433 | ||
406 | /* value we put in kr_rcvhdrcnt */ | 434 | /* value we put in kr_rcvhdrcnt */ |
407 | u32 ipath_rcvhdrcnt; | 435 | u32 ipath_rcvhdrcnt; |
@@ -465,8 +493,6 @@ struct ipath_devdata { | |||
465 | u32 ipath_htwidth; | 493 | u32 ipath_htwidth; |
466 | /* HT speed (200,400,800,1000) from HT config */ | 494 | /* HT speed (200,400,800,1000) from HT config */ |
467 | u32 ipath_htspeed; | 495 | u32 ipath_htspeed; |
468 | /* ports waiting for PIOavail intr */ | ||
469 | unsigned long ipath_portpiowait; | ||
470 | /* | 496 | /* |
471 | * number of sequential ibcstatus change for polling active/quiet | 497 | * number of sequential ibcstatus change for polling active/quiet |
472 | * (i.e., link not coming up). | 498 | * (i.e., link not coming up). |
@@ -510,8 +536,47 @@ struct ipath_devdata { | |||
510 | u32 ipath_lli_counter; | 536 | u32 ipath_lli_counter; |
511 | /* local link integrity errors */ | 537 | /* local link integrity errors */ |
512 | u32 ipath_lli_errors; | 538 | u32 ipath_lli_errors; |
539 | /* | ||
540 | * Above counts only cases where _successive_ LocalLinkIntegrity | ||
541 | * errors were seen in the receive headers of kern-packets. | ||
542 | * Below are the three (monotonically increasing) counters | ||
543 | * maintained via GPIO interrupts on iba6120-rev2. | ||
544 | */ | ||
545 | u32 ipath_rxfc_unsupvl_errs; | ||
546 | u32 ipath_overrun_thresh_errs; | ||
547 | u32 ipath_lli_errs; | ||
548 | |||
549 | /* | ||
550 | * Not all devices managed by a driver instance are the same | ||
551 | * type, so these fields must be per-device. | ||
552 | */ | ||
553 | u64 ipath_i_bitsextant; | ||
554 | ipath_err_t ipath_e_bitsextant; | ||
555 | ipath_err_t ipath_hwe_bitsextant; | ||
556 | |||
557 | /* | ||
558 | * Below should be computable from number of ports, | ||
559 | * since they are never modified. | ||
560 | */ | ||
561 | u32 ipath_i_rcvavail_mask; | ||
562 | u32 ipath_i_rcvurg_mask; | ||
563 | |||
564 | /* | ||
565 | * Register bits for selecting i2c direction and values, used for | ||
566 | * I2C serial flash. | ||
567 | */ | ||
568 | u16 ipath_gpio_sda_num; | ||
569 | u16 ipath_gpio_scl_num; | ||
570 | u64 ipath_gpio_sda; | ||
571 | u64 ipath_gpio_scl; | ||
513 | }; | 572 | }; |
514 | 573 | ||
574 | /* Private data for file operations */ | ||
575 | struct ipath_filedata { | ||
576 | struct ipath_portdata *pd; | ||
577 | unsigned subport; | ||
578 | unsigned tidcursor; | ||
579 | }; | ||
515 | extern struct list_head ipath_dev_list; | 580 | extern struct list_head ipath_dev_list; |
516 | extern spinlock_t ipath_devs_lock; | 581 | extern spinlock_t ipath_devs_lock; |
517 | extern struct ipath_devdata *ipath_lookup(int unit); | 582 | extern struct ipath_devdata *ipath_lookup(int unit); |
@@ -521,6 +586,7 @@ int ipath_enable_wc(struct ipath_devdata *dd); | |||
521 | void ipath_disable_wc(struct ipath_devdata *dd); | 586 | void ipath_disable_wc(struct ipath_devdata *dd); |
522 | int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); | 587 | int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); |
523 | void ipath_shutdown_device(struct ipath_devdata *); | 588 | void ipath_shutdown_device(struct ipath_devdata *); |
589 | void ipath_disarm_senderrbufs(struct ipath_devdata *); | ||
524 | 590 | ||
525 | struct file_operations; | 591 | struct file_operations; |
526 | int ipath_cdev_init(int minor, char *name, struct file_operations *fops, | 592 | int ipath_cdev_init(int minor, char *name, struct file_operations *fops, |
@@ -572,7 +638,11 @@ int ipath_set_lid(struct ipath_devdata *, u32, u8); | |||
572 | int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); | 638 | int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); |
573 | 639 | ||
574 | /* for use in system calls, where we want to know device type, etc. */ | 640 | /* for use in system calls, where we want to know device type, etc. */ |
575 | #define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data) | 641 | #define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd |
642 | #define subport_fp(fp) \ | ||
643 | ((struct ipath_filedata *)(fp)->private_data)->subport | ||
644 | #define tidcursor_fp(fp) \ | ||
645 | ((struct ipath_filedata *)(fp)->private_data)->tidcursor | ||
576 | 646 | ||
577 | /* | 647 | /* |
578 | * values for ipath_flags | 648 | * values for ipath_flags |
@@ -612,6 +682,15 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); | |||
612 | /* can miss port0 rx interrupts */ | 682 | /* can miss port0 rx interrupts */ |
613 | #define IPATH_POLL_RX_INTR 0x40000 | 683 | #define IPATH_POLL_RX_INTR 0x40000 |
614 | #define IPATH_DISABLED 0x80000 /* administratively disabled */ | 684 | #define IPATH_DISABLED 0x80000 /* administratively disabled */ |
685 | /* Use GPIO interrupts for new counters */ | ||
686 | #define IPATH_GPIO_ERRINTRS 0x100000 | ||
687 | |||
688 | /* Bits in GPIO for the added interrupts */ | ||
689 | #define IPATH_GPIO_PORT0_BIT 2 | ||
690 | #define IPATH_GPIO_RXUVL_BIT 3 | ||
691 | #define IPATH_GPIO_OVRUN_BIT 4 | ||
692 | #define IPATH_GPIO_LLI_BIT 5 | ||
693 | #define IPATH_GPIO_ERRINTR_MASK 0x38 | ||
615 | 694 | ||
616 | /* portdata flag bit offsets */ | 695 | /* portdata flag bit offsets */ |
617 | /* waiting for a packet to arrive */ | 696 | /* waiting for a packet to arrive */ |
@@ -799,6 +878,13 @@ int ipathfs_add_device(struct ipath_devdata *); | |||
799 | int ipathfs_remove_device(struct ipath_devdata *); | 878 | int ipathfs_remove_device(struct ipath_devdata *); |
800 | 879 | ||
801 | /* | 880 | /* |
881 | * dma_addr wrappers - all 0's invalid for hw | ||
882 | */ | ||
883 | dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long, | ||
884 | size_t, int); | ||
885 | dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int); | ||
886 | |||
887 | /* | ||
802 | * Flush write combining store buffers (if present) and perform a write | 888 | * Flush write combining store buffers (if present) and perform a write |
803 | * barrier. | 889 | * barrier. |
804 | */ | 890 | */ |
@@ -855,4 +941,20 @@ extern struct mutex ipath_mutex; | |||
855 | 941 | ||
856 | #endif /* _IPATH_DEBUGGING */ | 942 | #endif /* _IPATH_DEBUGGING */ |
857 | 943 | ||
944 | /* | ||
945 | * this is used for formatting hw error messages... | ||
946 | */ | ||
947 | struct ipath_hwerror_msgs { | ||
948 | u64 mask; | ||
949 | const char *msg; | ||
950 | }; | ||
951 | |||
952 | #define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b } | ||
953 | |||
954 | /* in ipath_intr.c... */ | ||
955 | void ipath_format_hwerrors(u64 hwerrs, | ||
956 | const struct ipath_hwerror_msgs *hwerrmsgs, | ||
957 | size_t nhwerrmsgs, | ||
958 | char *msg, size_t lmsg); | ||
959 | |||
858 | #endif /* _IPATH_KERNEL_H */ | 960 | #endif /* _IPATH_KERNEL_H */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index ba1b93226caa..9a6cbd05adcd 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -118,9 +118,10 @@ void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey) | |||
118 | * Check the IB SGE for validity and initialize our internal version | 118 | * Check the IB SGE for validity and initialize our internal version |
119 | * of it. | 119 | * of it. |
120 | */ | 120 | */ |
121 | int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | 121 | int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, |
122 | struct ib_sge *sge, int acc) | 122 | struct ib_sge *sge, int acc) |
123 | { | 123 | { |
124 | struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
124 | struct ipath_mregion *mr; | 125 | struct ipath_mregion *mr; |
125 | unsigned n, m; | 126 | unsigned n, m; |
126 | size_t off; | 127 | size_t off; |
@@ -140,7 +141,8 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | |||
140 | goto bail; | 141 | goto bail; |
141 | } | 142 | } |
142 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; | 143 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; |
143 | if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { | 144 | if (unlikely(mr == NULL || mr->lkey != sge->lkey || |
145 | qp->ibqp.pd != mr->pd)) { | ||
144 | ret = 0; | 146 | ret = 0; |
145 | goto bail; | 147 | goto bail; |
146 | } | 148 | } |
@@ -188,9 +190,10 @@ bail: | |||
188 | * | 190 | * |
189 | * Return 1 if successful, otherwise 0. | 191 | * Return 1 if successful, otherwise 0. |
190 | */ | 192 | */ |
191 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | 193 | int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, |
192 | u32 len, u64 vaddr, u32 rkey, int acc) | 194 | u32 len, u64 vaddr, u32 rkey, int acc) |
193 | { | 195 | { |
196 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
194 | struct ipath_lkey_table *rkt = &dev->lk_table; | 197 | struct ipath_lkey_table *rkt = &dev->lk_table; |
195 | struct ipath_sge *sge = &ss->sge; | 198 | struct ipath_sge *sge = &ss->sge; |
196 | struct ipath_mregion *mr; | 199 | struct ipath_mregion *mr; |
@@ -214,7 +217,8 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | |||
214 | } | 217 | } |
215 | 218 | ||
216 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; | 219 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; |
217 | if (unlikely(mr == NULL || mr->lkey != rkey)) { | 220 | if (unlikely(mr == NULL || mr->lkey != rkey || |
221 | qp->ibqp.pd != mr->pd)) { | ||
218 | ret = 0; | 222 | ret = 0; |
219 | goto bail; | 223 | goto bail; |
220 | } | 224 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 72d1db89db8f..25908b02fbe5 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
@@ -87,7 +87,8 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp, | |||
87 | struct ipath_devdata *dd = to_idev(ibdev)->dd; | 87 | struct ipath_devdata *dd = to_idev(ibdev)->dd; |
88 | u32 vendor, majrev, minrev; | 88 | u32 vendor, majrev, minrev; |
89 | 89 | ||
90 | if (smp->attr_mod) | 90 | /* GUID 0 is illegal */ |
91 | if (smp->attr_mod || (dd->ipath_guid == 0)) | ||
91 | smp->status |= IB_SMP_INVALID_FIELD; | 92 | smp->status |= IB_SMP_INVALID_FIELD; |
92 | 93 | ||
93 | nip->base_version = 1; | 94 | nip->base_version = 1; |
@@ -131,10 +132,15 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp, | |||
131 | * We only support one GUID for now. If this changes, the | 132 | * We only support one GUID for now. If this changes, the |
132 | * portinfo.guid_cap field needs to be updated too. | 133 | * portinfo.guid_cap field needs to be updated too. |
133 | */ | 134 | */ |
134 | if (startgx == 0) | 135 | if (startgx == 0) { |
135 | /* The first is a copy of the read-only HW GUID. */ | 136 | __be64 g = to_idev(ibdev)->dd->ipath_guid; |
136 | *p = to_idev(ibdev)->dd->ipath_guid; | 137 | if (g == 0) |
137 | else | 138 | /* GUID 0 is illegal */ |
139 | smp->status |= IB_SMP_INVALID_FIELD; | ||
140 | else | ||
141 | /* The first is a copy of the read-only HW GUID. */ | ||
142 | *p = g; | ||
143 | } else | ||
138 | smp->status |= IB_SMP_INVALID_FIELD; | 144 | smp->status |= IB_SMP_INVALID_FIELD; |
139 | 145 | ||
140 | return reply(smp); | 146 | return reply(smp); |
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index b36f6fb3e37a..a0673c1eef71 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -138,6 +138,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, | |||
138 | goto bail; | 138 | goto bail; |
139 | } | 139 | } |
140 | 140 | ||
141 | mr->mr.pd = pd; | ||
141 | mr->mr.user_base = *iova_start; | 142 | mr->mr.user_base = *iova_start; |
142 | mr->mr.iova = *iova_start; | 143 | mr->mr.iova = *iova_start; |
143 | mr->mr.length = 0; | 144 | mr->mr.length = 0; |
@@ -197,6 +198,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |||
197 | goto bail; | 198 | goto bail; |
198 | } | 199 | } |
199 | 200 | ||
201 | mr->mr.pd = pd; | ||
200 | mr->mr.user_base = region->user_base; | 202 | mr->mr.user_base = region->user_base; |
201 | mr->mr.iova = region->virt_base; | 203 | mr->mr.iova = region->virt_base; |
202 | mr->mr.length = region->length; | 204 | mr->mr.length = region->length; |
@@ -289,6 +291,7 @@ struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
289 | * Resources are allocated but no valid mapping (RKEY can't be | 291 | * Resources are allocated but no valid mapping (RKEY can't be |
290 | * used). | 292 | * used). |
291 | */ | 293 | */ |
294 | fmr->mr.pd = pd; | ||
292 | fmr->mr.user_base = 0; | 295 | fmr->mr.user_base = 0; |
293 | fmr->mr.iova = 0; | 296 | fmr->mr.iova = 0; |
294 | fmr->mr.length = 0; | 297 | fmr->mr.length = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 224b0f40767f..46c1c89bf6ae 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -335,6 +335,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
335 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 335 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
336 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 336 | qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
337 | qp->r_nak_state = 0; | 337 | qp->r_nak_state = 0; |
338 | qp->r_wrid_valid = 0; | ||
338 | qp->s_rnr_timeout = 0; | 339 | qp->s_rnr_timeout = 0; |
339 | qp->s_head = 0; | 340 | qp->s_head = 0; |
340 | qp->s_tail = 0; | 341 | qp->s_tail = 0; |
@@ -342,6 +343,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
342 | qp->s_last = 0; | 343 | qp->s_last = 0; |
343 | qp->s_ssn = 1; | 344 | qp->s_ssn = 1; |
344 | qp->s_lsn = 0; | 345 | qp->s_lsn = 0; |
346 | qp->s_wait_credit = 0; | ||
345 | if (qp->r_rq.wq) { | 347 | if (qp->r_rq.wq) { |
346 | qp->r_rq.wq->head = 0; | 348 | qp->r_rq.wq->head = 0; |
347 | qp->r_rq.wq->tail = 0; | 349 | qp->r_rq.wq->tail = 0; |
@@ -352,12 +354,13 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
352 | /** | 354 | /** |
353 | * ipath_error_qp - put a QP into an error state | 355 | * ipath_error_qp - put a QP into an error state |
354 | * @qp: the QP to put into an error state | 356 | * @qp: the QP to put into an error state |
357 | * @err: the receive completion error to signal if a RWQE is active | ||
355 | * | 358 | * |
356 | * Flushes both send and receive work queues. | 359 | * Flushes both send and receive work queues. |
357 | * QP s_lock should be held and interrupts disabled. | 360 | * QP s_lock should be held and interrupts disabled. |
358 | */ | 361 | */ |
359 | 362 | ||
360 | void ipath_error_qp(struct ipath_qp *qp) | 363 | void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) |
361 | { | 364 | { |
362 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 365 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
363 | struct ib_wc wc; | 366 | struct ib_wc wc; |
@@ -373,7 +376,6 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
373 | list_del_init(&qp->piowait); | 376 | list_del_init(&qp->piowait); |
374 | spin_unlock(&dev->pending_lock); | 377 | spin_unlock(&dev->pending_lock); |
375 | 378 | ||
376 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
377 | wc.vendor_err = 0; | 379 | wc.vendor_err = 0; |
378 | wc.byte_len = 0; | 380 | wc.byte_len = 0; |
379 | wc.imm_data = 0; | 381 | wc.imm_data = 0; |
@@ -385,6 +387,12 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
385 | wc.sl = 0; | 387 | wc.sl = 0; |
386 | wc.dlid_path_bits = 0; | 388 | wc.dlid_path_bits = 0; |
387 | wc.port_num = 0; | 389 | wc.port_num = 0; |
390 | if (qp->r_wrid_valid) { | ||
391 | qp->r_wrid_valid = 0; | ||
392 | wc.status = err; | ||
393 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
394 | } | ||
395 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
388 | 396 | ||
389 | while (qp->s_last != qp->s_head) { | 397 | while (qp->s_last != qp->s_head) { |
390 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | 398 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); |
@@ -501,7 +509,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
501 | break; | 509 | break; |
502 | 510 | ||
503 | case IB_QPS_ERR: | 511 | case IB_QPS_ERR: |
504 | ipath_error_qp(qp); | 512 | ipath_error_qp(qp, IB_WC_GENERAL_ERR); |
505 | break; | 513 | break; |
506 | 514 | ||
507 | default: | 515 | default: |
@@ -516,7 +524,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
516 | qp->remote_qpn = attr->dest_qp_num; | 524 | qp->remote_qpn = attr->dest_qp_num; |
517 | 525 | ||
518 | if (attr_mask & IB_QP_SQ_PSN) { | 526 | if (attr_mask & IB_QP_SQ_PSN) { |
519 | qp->s_next_psn = attr->sq_psn; | 527 | qp->s_psn = qp->s_next_psn = attr->sq_psn; |
520 | qp->s_last_psn = qp->s_next_psn - 1; | 528 | qp->s_last_psn = qp->s_next_psn - 1; |
521 | } | 529 | } |
522 | 530 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index a08654042c03..a504cf67f272 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -201,6 +201,18 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
201 | qp->s_rnr_timeout) | 201 | qp->s_rnr_timeout) |
202 | goto done; | 202 | goto done; |
203 | 203 | ||
204 | /* Limit the number of packets sent without an ACK. */ | ||
205 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { | ||
206 | qp->s_wait_credit = 1; | ||
207 | dev->n_rc_stalls++; | ||
208 | spin_lock(&dev->pending_lock); | ||
209 | if (list_empty(&qp->timerwait)) | ||
210 | list_add_tail(&qp->timerwait, | ||
211 | &dev->pending[dev->pending_index]); | ||
212 | spin_unlock(&dev->pending_lock); | ||
213 | goto done; | ||
214 | } | ||
215 | |||
204 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | 216 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ |
205 | hwords = 5; | 217 | hwords = 5; |
206 | bth0 = 0; | 218 | bth0 = 0; |
@@ -221,7 +233,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
221 | /* Check if send work queue is empty. */ | 233 | /* Check if send work queue is empty. */ |
222 | if (qp->s_tail == qp->s_head) | 234 | if (qp->s_tail == qp->s_head) |
223 | goto done; | 235 | goto done; |
224 | qp->s_psn = wqe->psn = qp->s_next_psn; | 236 | wqe->psn = qp->s_next_psn; |
225 | newreq = 1; | 237 | newreq = 1; |
226 | } | 238 | } |
227 | /* | 239 | /* |
@@ -393,12 +405,6 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
393 | ss = &qp->s_sge; | 405 | ss = &qp->s_sge; |
394 | len = qp->s_len; | 406 | len = qp->s_len; |
395 | if (len > pmtu) { | 407 | if (len > pmtu) { |
396 | /* | ||
397 | * Request an ACK every 1/2 MB to avoid retransmit | ||
398 | * timeouts. | ||
399 | */ | ||
400 | if (((wqe->length - len) % (512 * 1024)) == 0) | ||
401 | bth2 |= 1 << 31; | ||
402 | len = pmtu; | 408 | len = pmtu; |
403 | break; | 409 | break; |
404 | } | 410 | } |
@@ -435,12 +441,6 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
435 | ss = &qp->s_sge; | 441 | ss = &qp->s_sge; |
436 | len = qp->s_len; | 442 | len = qp->s_len; |
437 | if (len > pmtu) { | 443 | if (len > pmtu) { |
438 | /* | ||
439 | * Request an ACK every 1/2 MB to avoid retransmit | ||
440 | * timeouts. | ||
441 | */ | ||
442 | if (((wqe->length - len) % (512 * 1024)) == 0) | ||
443 | bth2 |= 1 << 31; | ||
444 | len = pmtu; | 444 | len = pmtu; |
445 | break; | 445 | break; |
446 | } | 446 | } |
@@ -498,6 +498,8 @@ int ipath_make_rc_req(struct ipath_qp *qp, | |||
498 | */ | 498 | */ |
499 | goto done; | 499 | goto done; |
500 | } | 500 | } |
501 | if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0) | ||
502 | bth2 |= 1 << 31; /* Request ACK. */ | ||
501 | qp->s_len -= len; | 503 | qp->s_len -= len; |
502 | qp->s_hdrwords = hwords; | 504 | qp->s_hdrwords = hwords; |
503 | qp->s_cur_sge = ss; | 505 | qp->s_cur_sge = ss; |
@@ -737,6 +739,15 @@ bail: | |||
737 | return; | 739 | return; |
738 | } | 740 | } |
739 | 741 | ||
742 | static inline void update_last_psn(struct ipath_qp *qp, u32 psn) | ||
743 | { | ||
744 | if (qp->s_wait_credit) { | ||
745 | qp->s_wait_credit = 0; | ||
746 | tasklet_hi_schedule(&qp->s_task); | ||
747 | } | ||
748 | qp->s_last_psn = psn; | ||
749 | } | ||
750 | |||
740 | /** | 751 | /** |
741 | * do_rc_ack - process an incoming RC ACK | 752 | * do_rc_ack - process an incoming RC ACK |
742 | * @qp: the QP the ACK came in on | 753 | * @qp: the QP the ACK came in on |
@@ -805,7 +816,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
805 | * The last valid PSN seen is the previous | 816 | * The last valid PSN seen is the previous |
806 | * request's. | 817 | * request's. |
807 | */ | 818 | */ |
808 | qp->s_last_psn = wqe->psn - 1; | 819 | update_last_psn(qp, wqe->psn - 1); |
809 | /* Retry this request. */ | 820 | /* Retry this request. */ |
810 | ipath_restart_rc(qp, wqe->psn, &wc); | 821 | ipath_restart_rc(qp, wqe->psn, &wc); |
811 | /* | 822 | /* |
@@ -864,7 +875,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
864 | ipath_get_credit(qp, aeth); | 875 | ipath_get_credit(qp, aeth); |
865 | qp->s_rnr_retry = qp->s_rnr_retry_cnt; | 876 | qp->s_rnr_retry = qp->s_rnr_retry_cnt; |
866 | qp->s_retry = qp->s_retry_cnt; | 877 | qp->s_retry = qp->s_retry_cnt; |
867 | qp->s_last_psn = psn; | 878 | update_last_psn(qp, psn); |
868 | ret = 1; | 879 | ret = 1; |
869 | goto bail; | 880 | goto bail; |
870 | 881 | ||
@@ -883,7 +894,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
883 | goto bail; | 894 | goto bail; |
884 | 895 | ||
885 | /* The last valid PSN is the previous PSN. */ | 896 | /* The last valid PSN is the previous PSN. */ |
886 | qp->s_last_psn = psn - 1; | 897 | update_last_psn(qp, psn - 1); |
887 | 898 | ||
888 | dev->n_rc_resends += (int)qp->s_psn - (int)psn; | 899 | dev->n_rc_resends += (int)qp->s_psn - (int)psn; |
889 | 900 | ||
@@ -898,7 +909,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
898 | case 3: /* NAK */ | 909 | case 3: /* NAK */ |
899 | /* The last valid PSN seen is the previous request's. */ | 910 | /* The last valid PSN seen is the previous request's. */ |
900 | if (qp->s_last != qp->s_tail) | 911 | if (qp->s_last != qp->s_tail) |
901 | qp->s_last_psn = wqe->psn - 1; | 912 | update_last_psn(qp, wqe->psn - 1); |
902 | switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) & | 913 | switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) & |
903 | IPATH_AETH_CREDIT_MASK) { | 914 | IPATH_AETH_CREDIT_MASK) { |
904 | case 0: /* PSN sequence error */ | 915 | case 0: /* PSN sequence error */ |
@@ -1071,7 +1082,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1071 | * since we don't want s_sge modified. | 1082 | * since we don't want s_sge modified. |
1072 | */ | 1083 | */ |
1073 | qp->s_len -= pmtu; | 1084 | qp->s_len -= pmtu; |
1074 | qp->s_last_psn = psn; | 1085 | update_last_psn(qp, psn); |
1075 | spin_unlock_irqrestore(&qp->s_lock, flags); | 1086 | spin_unlock_irqrestore(&qp->s_lock, flags); |
1076 | ipath_copy_sge(&qp->s_sge, data, pmtu); | 1087 | ipath_copy_sge(&qp->s_sge, data, pmtu); |
1077 | goto bail; | 1088 | goto bail; |
@@ -1223,7 +1234,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, | |||
1223 | * Address range must be a subset of the original | 1234 | * Address range must be a subset of the original |
1224 | * request and start on pmtu boundaries. | 1235 | * request and start on pmtu boundaries. |
1225 | */ | 1236 | */ |
1226 | ok = ipath_rkey_ok(dev, &qp->s_rdma_sge, | 1237 | ok = ipath_rkey_ok(qp, &qp->s_rdma_sge, |
1227 | qp->s_rdma_len, vaddr, rkey, | 1238 | qp->s_rdma_len, vaddr, rkey, |
1228 | IB_ACCESS_REMOTE_READ); | 1239 | IB_ACCESS_REMOTE_READ); |
1229 | if (unlikely(!ok)) { | 1240 | if (unlikely(!ok)) { |
@@ -1282,6 +1293,14 @@ done: | |||
1282 | return 1; | 1293 | return 1; |
1283 | } | 1294 | } |
1284 | 1295 | ||
1296 | static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) | ||
1297 | { | ||
1298 | spin_lock_irq(&qp->s_lock); | ||
1299 | qp->state = IB_QPS_ERR; | ||
1300 | ipath_error_qp(qp, err); | ||
1301 | spin_unlock_irq(&qp->s_lock); | ||
1302 | } | ||
1303 | |||
1285 | /** | 1304 | /** |
1286 | * ipath_rc_rcv - process an incoming RC packet | 1305 | * ipath_rc_rcv - process an incoming RC packet |
1287 | * @dev: the device this packet came in on | 1306 | * @dev: the device this packet came in on |
@@ -1309,6 +1328,10 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1309 | struct ib_reth *reth; | 1328 | struct ib_reth *reth; |
1310 | int header_in_data; | 1329 | int header_in_data; |
1311 | 1330 | ||
1331 | /* Validate the SLID. See Ch. 9.6.1.5 */ | ||
1332 | if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) | ||
1333 | goto done; | ||
1334 | |||
1312 | /* Check for GRH */ | 1335 | /* Check for GRH */ |
1313 | if (!has_grh) { | 1336 | if (!has_grh) { |
1314 | ohdr = &hdr->u.oth; | 1337 | ohdr = &hdr->u.oth; |
@@ -1370,8 +1393,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1370 | */ | 1393 | */ |
1371 | if (qp->r_ack_state >= OP(COMPARE_SWAP)) | 1394 | if (qp->r_ack_state >= OP(COMPARE_SWAP)) |
1372 | goto send_ack; | 1395 | goto send_ack; |
1373 | /* XXX Flush WQEs */ | 1396 | ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR); |
1374 | qp->state = IB_QPS_ERR; | ||
1375 | qp->r_ack_state = OP(SEND_ONLY); | 1397 | qp->r_ack_state = OP(SEND_ONLY); |
1376 | qp->r_nak_state = IB_NAK_INVALID_REQUEST; | 1398 | qp->r_nak_state = IB_NAK_INVALID_REQUEST; |
1377 | qp->r_ack_psn = qp->r_psn; | 1399 | qp->r_ack_psn = qp->r_psn; |
@@ -1477,9 +1499,9 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1477 | goto nack_inv; | 1499 | goto nack_inv; |
1478 | ipath_copy_sge(&qp->r_sge, data, tlen); | 1500 | ipath_copy_sge(&qp->r_sge, data, tlen); |
1479 | qp->r_msn++; | 1501 | qp->r_msn++; |
1480 | if (opcode == OP(RDMA_WRITE_LAST) || | 1502 | if (!qp->r_wrid_valid) |
1481 | opcode == OP(RDMA_WRITE_ONLY)) | ||
1482 | break; | 1503 | break; |
1504 | qp->r_wrid_valid = 0; | ||
1483 | wc.wr_id = qp->r_wr_id; | 1505 | wc.wr_id = qp->r_wr_id; |
1484 | wc.status = IB_WC_SUCCESS; | 1506 | wc.status = IB_WC_SUCCESS; |
1485 | wc.opcode = IB_WC_RECV; | 1507 | wc.opcode = IB_WC_RECV; |
@@ -1517,7 +1539,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1517 | int ok; | 1539 | int ok; |
1518 | 1540 | ||
1519 | /* Check rkey & NAK */ | 1541 | /* Check rkey & NAK */ |
1520 | ok = ipath_rkey_ok(dev, &qp->r_sge, | 1542 | ok = ipath_rkey_ok(qp, &qp->r_sge, |
1521 | qp->r_len, vaddr, rkey, | 1543 | qp->r_len, vaddr, rkey, |
1522 | IB_ACCESS_REMOTE_WRITE); | 1544 | IB_ACCESS_REMOTE_WRITE); |
1523 | if (unlikely(!ok)) | 1545 | if (unlikely(!ok)) |
@@ -1559,7 +1581,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1559 | int ok; | 1581 | int ok; |
1560 | 1582 | ||
1561 | /* Check rkey & NAK */ | 1583 | /* Check rkey & NAK */ |
1562 | ok = ipath_rkey_ok(dev, &qp->s_rdma_sge, | 1584 | ok = ipath_rkey_ok(qp, &qp->s_rdma_sge, |
1563 | qp->s_rdma_len, vaddr, rkey, | 1585 | qp->s_rdma_len, vaddr, rkey, |
1564 | IB_ACCESS_REMOTE_READ); | 1586 | IB_ACCESS_REMOTE_READ); |
1565 | if (unlikely(!ok)) { | 1587 | if (unlikely(!ok)) { |
@@ -1618,7 +1640,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1618 | goto nack_inv; | 1640 | goto nack_inv; |
1619 | rkey = be32_to_cpu(ateth->rkey); | 1641 | rkey = be32_to_cpu(ateth->rkey); |
1620 | /* Check rkey & NAK */ | 1642 | /* Check rkey & NAK */ |
1621 | if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, | 1643 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, |
1622 | sizeof(u64), vaddr, rkey, | 1644 | sizeof(u64), vaddr, rkey, |
1623 | IB_ACCESS_REMOTE_ATOMIC))) | 1645 | IB_ACCESS_REMOTE_ATOMIC))) |
1624 | goto nack_acc; | 1646 | goto nack_acc; |
@@ -1670,8 +1692,7 @@ nack_acc: | |||
1670 | * is pending though. | 1692 | * is pending though. |
1671 | */ | 1693 | */ |
1672 | if (qp->r_ack_state < OP(COMPARE_SWAP)) { | 1694 | if (qp->r_ack_state < OP(COMPARE_SWAP)) { |
1673 | /* XXX Flush WQEs */ | 1695 | ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR); |
1674 | qp->state = IB_QPS_ERR; | ||
1675 | qp->r_ack_state = OP(RDMA_WRITE_ONLY); | 1696 | qp->r_ack_state = OP(RDMA_WRITE_ONLY); |
1676 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; | 1697 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; |
1677 | qp->r_ack_psn = qp->r_psn; | 1698 | qp->r_ack_psn = qp->r_psn; |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 6e23b3d632b8..dffc76016d3c 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -134,10 +134,24 @@ | |||
134 | #define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40 | 134 | #define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40 |
135 | #define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL | 135 | #define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL |
136 | #define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44 | 136 | #define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44 |
137 | #define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL | ||
138 | #define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL | ||
139 | #define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL | 137 | #define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL |
140 | #define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL | 138 | #define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL |
139 | /* txe mem parity errors (shift by INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) */ | ||
140 | #define INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF 0x1ULL | ||
141 | #define INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC 0x2ULL | ||
142 | #define INFINIPATH_HWE_TXEMEMPARITYERR_PIOLAUNCHFIFO 0x4ULL | ||
143 | /* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */ | ||
144 | #define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL | ||
145 | #define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL | ||
146 | #define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x04ULL | ||
147 | #define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x08ULL | ||
148 | #define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL | ||
149 | #define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL | ||
150 | #define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL | ||
151 | /* waldo specific -- find the rest in ipath_6110.c */ | ||
152 | #define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL | ||
153 | /* monty specific -- find the rest in ipath_6120.c */ | ||
154 | #define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL | ||
141 | 155 | ||
142 | /* kr_hwdiagctrl bits */ | 156 | /* kr_hwdiagctrl bits */ |
143 | #define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL | 157 | #define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL |
@@ -209,9 +223,9 @@ | |||
209 | 223 | ||
210 | /* combination link status states that we use with some frequency */ | 224 | /* combination link status states that we use with some frequency */ |
211 | #define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \ | 225 | #define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \ |
212 | << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \ | 226 | << INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | \ |
213 | (INFINIPATH_IBCS_LINKSTATE_MASK \ | 227 | (INFINIPATH_IBCS_LINKSTATE_MASK \ |
214 | <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT)) | 228 | <<INFINIPATH_IBCS_LINKSTATE_SHIFT)) |
215 | #define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \ | 229 | #define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \ |
216 | << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \ | 230 | << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \ |
217 | (INFINIPATH_IBCS_LT_STATE_LINKUP \ | 231 | (INFINIPATH_IBCS_LT_STATE_LINKUP \ |
@@ -302,6 +316,17 @@ | |||
302 | 316 | ||
303 | typedef u64 ipath_err_t; | 317 | typedef u64 ipath_err_t; |
304 | 318 | ||
319 | /* The following change with the type of device, so | ||
320 | * need to be part of the ipath_devdata struct, or | ||
321 | * we could have problems plugging in devices of | ||
322 | * different types (e.g. one HT, one PCIE) | ||
323 | * in one system, to be managed by one driver. | ||
324 | * On the other hand, this file is may also be included | ||
325 | * by other code, so leave the declarations here | ||
326 | * temporarily. Minor footprint issue if common-model | ||
327 | * linker used, none if C89+ linker used. | ||
328 | */ | ||
329 | |||
305 | /* mask of defined bits for various registers */ | 330 | /* mask of defined bits for various registers */ |
306 | extern u64 infinipath_i_bitsextant; | 331 | extern u64 infinipath_i_bitsextant; |
307 | extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant; | 332 | extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant; |
@@ -310,13 +335,6 @@ extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant; | |||
310 | extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask; | 335 | extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask; |
311 | 336 | ||
312 | /* | 337 | /* |
313 | * register bits for selecting i2c direction and values, used for I2C serial | ||
314 | * flash | ||
315 | */ | ||
316 | extern u16 ipath_gpio_sda_num, ipath_gpio_scl_num; | ||
317 | extern u64 ipath_gpio_sda, ipath_gpio_scl; | ||
318 | |||
319 | /* | ||
320 | * These are the infinipath general register numbers (not offsets). | 338 | * These are the infinipath general register numbers (not offsets). |
321 | * The kernel registers are used directly, those beyond the kernel | 339 | * The kernel registers are used directly, those beyond the kernel |
322 | * registers are calculated from one of the base registers. The use of | 340 | * registers are calculated from one of the base registers. The use of |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 5c1da2d25e03..f7530512045d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -108,7 +108,6 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) | |||
108 | 108 | ||
109 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) | 109 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) |
110 | { | 110 | { |
111 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
112 | int user = to_ipd(qp->ibqp.pd)->user; | 111 | int user = to_ipd(qp->ibqp.pd)->user; |
113 | int i, j, ret; | 112 | int i, j, ret; |
114 | struct ib_wc wc; | 113 | struct ib_wc wc; |
@@ -119,8 +118,7 @@ static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) | |||
119 | continue; | 118 | continue; |
120 | /* Check LKEY */ | 119 | /* Check LKEY */ |
121 | if ((user && wqe->sg_list[i].lkey == 0) || | 120 | if ((user && wqe->sg_list[i].lkey == 0) || |
122 | !ipath_lkey_ok(&dev->lk_table, | 121 | !ipath_lkey_ok(qp, &qp->r_sg_list[j], &wqe->sg_list[i], |
123 | &qp->r_sg_list[j], &wqe->sg_list[i], | ||
124 | IB_ACCESS_LOCAL_WRITE)) | 122 | IB_ACCESS_LOCAL_WRITE)) |
125 | goto bad_lkey; | 123 | goto bad_lkey; |
126 | qp->r_len += wqe->sg_list[i].length; | 124 | qp->r_len += wqe->sg_list[i].length; |
@@ -231,6 +229,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
231 | } | 229 | } |
232 | } | 230 | } |
233 | spin_unlock_irqrestore(&rq->lock, flags); | 231 | spin_unlock_irqrestore(&rq->lock, flags); |
232 | qp->r_wrid_valid = 1; | ||
234 | 233 | ||
235 | bail: | 234 | bail: |
236 | return ret; | 235 | return ret; |
@@ -326,7 +325,7 @@ again: | |||
326 | case IB_WR_RDMA_WRITE: | 325 | case IB_WR_RDMA_WRITE: |
327 | if (wqe->length == 0) | 326 | if (wqe->length == 0) |
328 | break; | 327 | break; |
329 | if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length, | 328 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, |
330 | wqe->wr.wr.rdma.remote_addr, | 329 | wqe->wr.wr.rdma.remote_addr, |
331 | wqe->wr.wr.rdma.rkey, | 330 | wqe->wr.wr.rdma.rkey, |
332 | IB_ACCESS_REMOTE_WRITE))) { | 331 | IB_ACCESS_REMOTE_WRITE))) { |
@@ -350,7 +349,7 @@ again: | |||
350 | break; | 349 | break; |
351 | 350 | ||
352 | case IB_WR_RDMA_READ: | 351 | case IB_WR_RDMA_READ: |
353 | if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length, | 352 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, |
354 | wqe->wr.wr.rdma.remote_addr, | 353 | wqe->wr.wr.rdma.remote_addr, |
355 | wqe->wr.wr.rdma.rkey, | 354 | wqe->wr.wr.rdma.rkey, |
356 | IB_ACCESS_REMOTE_READ))) | 355 | IB_ACCESS_REMOTE_READ))) |
@@ -365,7 +364,7 @@ again: | |||
365 | 364 | ||
366 | case IB_WR_ATOMIC_CMP_AND_SWP: | 365 | case IB_WR_ATOMIC_CMP_AND_SWP: |
367 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 366 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
368 | if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64), | 367 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), |
369 | wqe->wr.wr.rdma.remote_addr, | 368 | wqe->wr.wr.rdma.remote_addr, |
370 | wqe->wr.wr.rdma.rkey, | 369 | wqe->wr.wr.rdma.rkey, |
371 | IB_ACCESS_REMOTE_ATOMIC))) | 370 | IB_ACCESS_REMOTE_ATOMIC))) |
@@ -575,8 +574,7 @@ int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
575 | } | 574 | } |
576 | if (wr->sg_list[i].length == 0) | 575 | if (wr->sg_list[i].length == 0) |
577 | continue; | 576 | continue; |
578 | if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table, | 577 | if (!ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i], |
579 | &wqe->sg_list[j], &wr->sg_list[i], | ||
580 | acc)) { | 578 | acc)) { |
581 | spin_unlock_irqrestore(&qp->s_lock, flags); | 579 | spin_unlock_irqrestore(&qp->s_lock, flags); |
582 | ret = -EINVAL; | 580 | ret = -EINVAL; |
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 941e866d9517..94033503400c 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c | |||
@@ -104,11 +104,6 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
104 | u32 sz; | 104 | u32 sz; |
105 | struct ib_srq *ret; | 105 | struct ib_srq *ret; |
106 | 106 | ||
107 | if (dev->n_srqs_allocated == ib_ipath_max_srqs) { | ||
108 | ret = ERR_PTR(-ENOMEM); | ||
109 | goto done; | ||
110 | } | ||
111 | |||
112 | if (srq_init_attr->attr.max_wr == 0) { | 107 | if (srq_init_attr->attr.max_wr == 0) { |
113 | ret = ERR_PTR(-EINVAL); | 108 | ret = ERR_PTR(-EINVAL); |
114 | goto done; | 109 | goto done; |
@@ -180,10 +175,17 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
180 | spin_lock_init(&srq->rq.lock); | 175 | spin_lock_init(&srq->rq.lock); |
181 | srq->rq.wq->head = 0; | 176 | srq->rq.wq->head = 0; |
182 | srq->rq.wq->tail = 0; | 177 | srq->rq.wq->tail = 0; |
183 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | ||
184 | srq->limit = srq_init_attr->attr.srq_limit; | 178 | srq->limit = srq_init_attr->attr.srq_limit; |
185 | 179 | ||
186 | dev->n_srqs_allocated++; | 180 | spin_lock(&dev->n_srqs_lock); |
181 | if (dev->n_srqs_allocated == ib_ipath_max_srqs) { | ||
182 | spin_unlock(&dev->n_srqs_lock); | ||
183 | ret = ERR_PTR(-ENOMEM); | ||
184 | goto bail_wq; | ||
185 | } | ||
186 | |||
187 | dev->n_srqs_allocated++; | ||
188 | spin_unlock(&dev->n_srqs_lock); | ||
187 | 189 | ||
188 | ret = &srq->ibsrq; | 190 | ret = &srq->ibsrq; |
189 | goto done; | 191 | goto done; |
@@ -351,8 +353,13 @@ int ipath_destroy_srq(struct ib_srq *ibsrq) | |||
351 | struct ipath_srq *srq = to_isrq(ibsrq); | 353 | struct ipath_srq *srq = to_isrq(ibsrq); |
352 | struct ipath_ibdev *dev = to_idev(ibsrq->device); | 354 | struct ipath_ibdev *dev = to_idev(ibsrq->device); |
353 | 355 | ||
356 | spin_lock(&dev->n_srqs_lock); | ||
354 | dev->n_srqs_allocated--; | 357 | dev->n_srqs_allocated--; |
355 | vfree(srq->rq.wq); | 358 | spin_unlock(&dev->n_srqs_lock); |
359 | if (srq->ip) | ||
360 | kref_put(&srq->ip->ref, ipath_release_mmap_info); | ||
361 | else | ||
362 | vfree(srq->rq.wq); | ||
356 | kfree(srq); | 363 | kfree(srq); |
357 | 364 | ||
358 | return 0; | 365 | return 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index e299148c4b68..182de34f9f47 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c | |||
@@ -257,7 +257,7 @@ static ssize_t store_guid(struct device *dev, | |||
257 | struct ipath_devdata *dd = dev_get_drvdata(dev); | 257 | struct ipath_devdata *dd = dev_get_drvdata(dev); |
258 | ssize_t ret; | 258 | ssize_t ret; |
259 | unsigned short guid[8]; | 259 | unsigned short guid[8]; |
260 | __be64 nguid; | 260 | __be64 new_guid; |
261 | u8 *ng; | 261 | u8 *ng; |
262 | int i; | 262 | int i; |
263 | 263 | ||
@@ -266,7 +266,7 @@ static ssize_t store_guid(struct device *dev, | |||
266 | &guid[4], &guid[5], &guid[6], &guid[7]) != 8) | 266 | &guid[4], &guid[5], &guid[6], &guid[7]) != 8) |
267 | goto invalid; | 267 | goto invalid; |
268 | 268 | ||
269 | ng = (u8 *) &nguid; | 269 | ng = (u8 *) &new_guid; |
270 | 270 | ||
271 | for (i = 0; i < 8; i++) { | 271 | for (i = 0; i < 8; i++) { |
272 | if (guid[i] > 0xff) | 272 | if (guid[i] > 0xff) |
@@ -274,7 +274,10 @@ static ssize_t store_guid(struct device *dev, | |||
274 | ng[i] = guid[i]; | 274 | ng[i] = guid[i]; |
275 | } | 275 | } |
276 | 276 | ||
277 | dd->ipath_guid = nguid; | 277 | if (new_guid == 0) |
278 | goto invalid; | ||
279 | |||
280 | dd->ipath_guid = new_guid; | ||
278 | dd->ipath_nguid = 1; | 281 | dd->ipath_nguid = 1; |
279 | 282 | ||
280 | ret = strlen(buf); | 283 | ret = strlen(buf); |
@@ -297,6 +300,16 @@ static ssize_t show_nguid(struct device *dev, | |||
297 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid); | 300 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid); |
298 | } | 301 | } |
299 | 302 | ||
303 | static ssize_t show_nports(struct device *dev, | ||
304 | struct device_attribute *attr, | ||
305 | char *buf) | ||
306 | { | ||
307 | struct ipath_devdata *dd = dev_get_drvdata(dev); | ||
308 | |||
309 | /* Return the number of user ports available. */ | ||
310 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1); | ||
311 | } | ||
312 | |||
300 | static ssize_t show_serial(struct device *dev, | 313 | static ssize_t show_serial(struct device *dev, |
301 | struct device_attribute *attr, | 314 | struct device_attribute *attr, |
302 | char *buf) | 315 | char *buf) |
@@ -608,6 +621,7 @@ static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid); | |||
608 | static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu); | 621 | static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu); |
609 | static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled); | 622 | static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled); |
610 | static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL); | 623 | static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL); |
624 | static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL); | ||
611 | static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset); | 625 | static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset); |
612 | static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); | 626 | static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); |
613 | static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); | 627 | static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); |
@@ -623,6 +637,7 @@ static struct attribute *dev_attributes[] = { | |||
623 | &dev_attr_mlid.attr, | 637 | &dev_attr_mlid.attr, |
624 | &dev_attr_mtu.attr, | 638 | &dev_attr_mtu.attr, |
625 | &dev_attr_nguid.attr, | 639 | &dev_attr_nguid.attr, |
640 | &dev_attr_nports.attr, | ||
626 | &dev_attr_serial.attr, | 641 | &dev_attr_serial.attr, |
627 | &dev_attr_status.attr, | 642 | &dev_attr_status.attr, |
628 | &dev_attr_status_str.attr, | 643 | &dev_attr_status_str.attr, |
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index 0fd3cded16ba..e636cfd67a82 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c | |||
@@ -246,6 +246,10 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
246 | struct ib_reth *reth; | 246 | struct ib_reth *reth; |
247 | int header_in_data; | 247 | int header_in_data; |
248 | 248 | ||
249 | /* Validate the SLID. See Ch. 9.6.1.5 */ | ||
250 | if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) | ||
251 | goto done; | ||
252 | |||
249 | /* Check for GRH */ | 253 | /* Check for GRH */ |
250 | if (!has_grh) { | 254 | if (!has_grh) { |
251 | ohdr = &hdr->u.oth; | 255 | ohdr = &hdr->u.oth; |
@@ -440,7 +444,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
440 | int ok; | 444 | int ok; |
441 | 445 | ||
442 | /* Check rkey */ | 446 | /* Check rkey */ |
443 | ok = ipath_rkey_ok(dev, &qp->r_sge, qp->r_len, | 447 | ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len, |
444 | vaddr, rkey, | 448 | vaddr, rkey, |
445 | IB_ACCESS_REMOTE_WRITE); | 449 | IB_ACCESS_REMOTE_WRITE); |
446 | if (unlikely(!ok)) { | 450 | if (unlikely(!ok)) { |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 6991d1d74e3c..49f1102af8b3 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -39,7 +39,6 @@ | |||
39 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, | 39 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, |
40 | u32 *lengthp, struct ipath_sge_state *ss) | 40 | u32 *lengthp, struct ipath_sge_state *ss) |
41 | { | 41 | { |
42 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
43 | int user = to_ipd(qp->ibqp.pd)->user; | 42 | int user = to_ipd(qp->ibqp.pd)->user; |
44 | int i, j, ret; | 43 | int i, j, ret; |
45 | struct ib_wc wc; | 44 | struct ib_wc wc; |
@@ -50,8 +49,7 @@ static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, | |||
50 | continue; | 49 | continue; |
51 | /* Check LKEY */ | 50 | /* Check LKEY */ |
52 | if ((user && wqe->sg_list[i].lkey == 0) || | 51 | if ((user && wqe->sg_list[i].lkey == 0) || |
53 | !ipath_lkey_ok(&dev->lk_table, | 52 | !ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, |
54 | j ? &ss->sg_list[j - 1] : &ss->sge, | ||
55 | &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) | 53 | &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) |
56 | goto bad_lkey; | 54 | goto bad_lkey; |
57 | *lengthp += wqe->sg_list[i].length; | 55 | *lengthp += wqe->sg_list[i].length; |
@@ -343,7 +341,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
343 | 341 | ||
344 | if (wr->sg_list[i].length == 0) | 342 | if (wr->sg_list[i].length == 0) |
345 | continue; | 343 | continue; |
346 | if (!ipath_lkey_ok(&dev->lk_table, ss.num_sge ? | 344 | if (!ipath_lkey_ok(qp, ss.num_sge ? |
347 | sg_list + ss.num_sge - 1 : &ss.sge, | 345 | sg_list + ss.num_sge - 1 : &ss.sge, |
348 | &wr->sg_list[i], 0)) { | 346 | &wr->sg_list[i], 0)) { |
349 | ret = -EINVAL; | 347 | ret = -EINVAL; |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index e32fca9faf80..413754b1d8a2 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -90,6 +90,62 @@ bail: | |||
90 | } | 90 | } |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * ipath_map_page - a safety wrapper around pci_map_page() | ||
94 | * | ||
95 | * A dma_addr of all 0's is interpreted by the chip as "disabled". | ||
96 | * Unfortunately, it can also be a valid dma_addr returned on some | ||
97 | * architectures. | ||
98 | * | ||
99 | * The powerpc iommu assigns dma_addrs in ascending order, so we don't | ||
100 | * have to bother with retries or mapping a dummy page to insure we | ||
101 | * don't just get the same mapping again. | ||
102 | * | ||
103 | * I'm sure we won't be so lucky with other iommu's, so FIXME. | ||
104 | */ | ||
105 | dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page, | ||
106 | unsigned long offset, size_t size, int direction) | ||
107 | { | ||
108 | dma_addr_t phys; | ||
109 | |||
110 | phys = pci_map_page(hwdev, page, offset, size, direction); | ||
111 | |||
112 | if (phys == 0) { | ||
113 | pci_unmap_page(hwdev, phys, size, direction); | ||
114 | phys = pci_map_page(hwdev, page, offset, size, direction); | ||
115 | /* | ||
116 | * FIXME: If we get 0 again, we should keep this page, | ||
117 | * map another, then free the 0 page. | ||
118 | */ | ||
119 | } | ||
120 | |||
121 | return phys; | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * ipath_map_single - a safety wrapper around pci_map_single() | ||
126 | * | ||
127 | * Same idea as ipath_map_page(). | ||
128 | */ | ||
129 | dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | ||
130 | int direction) | ||
131 | { | ||
132 | dma_addr_t phys; | ||
133 | |||
134 | phys = pci_map_single(hwdev, ptr, size, direction); | ||
135 | |||
136 | if (phys == 0) { | ||
137 | pci_unmap_single(hwdev, phys, size, direction); | ||
138 | phys = pci_map_single(hwdev, ptr, size, direction); | ||
139 | /* | ||
140 | * FIXME: If we get 0 again, we should keep this page, | ||
141 | * map another, then free the 0 page. | ||
142 | */ | ||
143 | } | ||
144 | |||
145 | return phys; | ||
146 | } | ||
147 | |||
148 | /** | ||
93 | * ipath_get_user_pages - lock user pages into memory | 149 | * ipath_get_user_pages - lock user pages into memory |
94 | * @start_page: the start page | 150 | * @start_page: the start page |
95 | * @num_pages: the number of pages | 151 | * @num_pages: the number of pages |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index b8381c5e72bd..42eaed88c281 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -898,7 +898,8 @@ int ipath_get_counters(struct ipath_devdata *dd, | |||
898 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + | 898 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + |
899 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + | 899 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + |
900 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + | 900 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + |
901 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); | 901 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt) + |
902 | dd->ipath_rxfc_unsupvl_errs; | ||
902 | cntrs->port_rcv_remphys_errors = | 903 | cntrs->port_rcv_remphys_errors = |
903 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); | 904 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); |
904 | cntrs->port_xmit_discards = | 905 | cntrs->port_xmit_discards = |
@@ -911,8 +912,10 @@ int ipath_get_counters(struct ipath_devdata *dd, | |||
911 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | 912 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); |
912 | cntrs->port_rcv_packets = | 913 | cntrs->port_rcv_packets = |
913 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | 914 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); |
914 | cntrs->local_link_integrity_errors = dd->ipath_lli_errors; | 915 | cntrs->local_link_integrity_errors = |
915 | cntrs->excessive_buffer_overrun_errors = 0; /* XXX */ | 916 | (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? |
917 | dd->ipath_lli_errs : dd->ipath_lli_errors; | ||
918 | cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs; | ||
916 | 919 | ||
917 | ret = 0; | 920 | ret = 0; |
918 | 921 | ||
@@ -1199,6 +1202,7 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, | |||
1199 | struct ipath_ah *ah; | 1202 | struct ipath_ah *ah; |
1200 | struct ib_ah *ret; | 1203 | struct ib_ah *ret; |
1201 | struct ipath_ibdev *dev = to_idev(pd->device); | 1204 | struct ipath_ibdev *dev = to_idev(pd->device); |
1205 | unsigned long flags; | ||
1202 | 1206 | ||
1203 | /* A multicast address requires a GRH (see ch. 8.4.1). */ | 1207 | /* A multicast address requires a GRH (see ch. 8.4.1). */ |
1204 | if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && | 1208 | if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && |
@@ -1225,16 +1229,16 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, | |||
1225 | goto bail; | 1229 | goto bail; |
1226 | } | 1230 | } |
1227 | 1231 | ||
1228 | spin_lock(&dev->n_ahs_lock); | 1232 | spin_lock_irqsave(&dev->n_ahs_lock, flags); |
1229 | if (dev->n_ahs_allocated == ib_ipath_max_ahs) { | 1233 | if (dev->n_ahs_allocated == ib_ipath_max_ahs) { |
1230 | spin_unlock(&dev->n_ahs_lock); | 1234 | spin_unlock_irqrestore(&dev->n_ahs_lock, flags); |
1231 | kfree(ah); | 1235 | kfree(ah); |
1232 | ret = ERR_PTR(-ENOMEM); | 1236 | ret = ERR_PTR(-ENOMEM); |
1233 | goto bail; | 1237 | goto bail; |
1234 | } | 1238 | } |
1235 | 1239 | ||
1236 | dev->n_ahs_allocated++; | 1240 | dev->n_ahs_allocated++; |
1237 | spin_unlock(&dev->n_ahs_lock); | 1241 | spin_unlock_irqrestore(&dev->n_ahs_lock, flags); |
1238 | 1242 | ||
1239 | /* ib_create_ah() will initialize ah->ibah. */ | 1243 | /* ib_create_ah() will initialize ah->ibah. */ |
1240 | ah->attr = *ah_attr; | 1244 | ah->attr = *ah_attr; |
@@ -1255,10 +1259,11 @@ static int ipath_destroy_ah(struct ib_ah *ibah) | |||
1255 | { | 1259 | { |
1256 | struct ipath_ibdev *dev = to_idev(ibah->device); | 1260 | struct ipath_ibdev *dev = to_idev(ibah->device); |
1257 | struct ipath_ah *ah = to_iah(ibah); | 1261 | struct ipath_ah *ah = to_iah(ibah); |
1262 | unsigned long flags; | ||
1258 | 1263 | ||
1259 | spin_lock(&dev->n_ahs_lock); | 1264 | spin_lock_irqsave(&dev->n_ahs_lock, flags); |
1260 | dev->n_ahs_allocated--; | 1265 | dev->n_ahs_allocated--; |
1261 | spin_unlock(&dev->n_ahs_lock); | 1266 | spin_unlock_irqrestore(&dev->n_ahs_lock, flags); |
1262 | 1267 | ||
1263 | kfree(ah); | 1268 | kfree(ah); |
1264 | 1269 | ||
@@ -1380,11 +1385,13 @@ static int enable_timer(struct ipath_devdata *dd) | |||
1380 | * processing. | 1385 | * processing. |
1381 | */ | 1386 | */ |
1382 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | 1387 | if (dd->ipath_flags & IPATH_GPIO_INTR) { |
1388 | u64 val; | ||
1383 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, | 1389 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, |
1384 | 0x2074076542310ULL); | 1390 | 0x2074076542310ULL); |
1385 | /* Enable GPIO bit 2 interrupt */ | 1391 | /* Enable GPIO bit 2 interrupt */ |
1386 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, | 1392 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); |
1387 | (u64) (1 << 2)); | 1393 | val |= (u64) (1 << IPATH_GPIO_PORT0_BIT); |
1394 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | ||
1388 | } | 1395 | } |
1389 | 1396 | ||
1390 | init_timer(&dd->verbs_timer); | 1397 | init_timer(&dd->verbs_timer); |
@@ -1399,8 +1406,17 @@ static int enable_timer(struct ipath_devdata *dd) | |||
1399 | static int disable_timer(struct ipath_devdata *dd) | 1406 | static int disable_timer(struct ipath_devdata *dd) |
1400 | { | 1407 | { |
1401 | /* Disable GPIO bit 2 interrupt */ | 1408 | /* Disable GPIO bit 2 interrupt */ |
1402 | if (dd->ipath_flags & IPATH_GPIO_INTR) | 1409 | if (dd->ipath_flags & IPATH_GPIO_INTR) { |
1403 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); | 1410 | u64 val; |
1411 | /* Disable GPIO bit 2 interrupt */ | ||
1412 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | ||
1413 | val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); | ||
1414 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | ||
1415 | /* | ||
1416 | * We might want to undo changes to debugportselect, | ||
1417 | * but how? | ||
1418 | */ | ||
1419 | } | ||
1404 | 1420 | ||
1405 | del_timer_sync(&dd->verbs_timer); | 1421 | del_timer_sync(&dd->verbs_timer); |
1406 | 1422 | ||
@@ -1683,6 +1699,7 @@ static ssize_t show_stats(struct class_device *cdev, char *buf) | |||
1683 | "RC OTH NAKs %d\n" | 1699 | "RC OTH NAKs %d\n" |
1684 | "RC timeouts %d\n" | 1700 | "RC timeouts %d\n" |
1685 | "RC RDMA dup %d\n" | 1701 | "RC RDMA dup %d\n" |
1702 | "RC stalls %d\n" | ||
1686 | "piobuf wait %d\n" | 1703 | "piobuf wait %d\n" |
1687 | "no piobuf %d\n" | 1704 | "no piobuf %d\n" |
1688 | "PKT drops %d\n" | 1705 | "PKT drops %d\n" |
@@ -1690,7 +1707,7 @@ static ssize_t show_stats(struct class_device *cdev, char *buf) | |||
1690 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, | 1707 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, |
1691 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, | 1708 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, |
1692 | dev->n_other_naks, dev->n_timeouts, | 1709 | dev->n_other_naks, dev->n_timeouts, |
1693 | dev->n_rdma_dup_busy, dev->n_piowait, | 1710 | dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait, |
1694 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); | 1711 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); |
1695 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { | 1712 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { |
1696 | const struct ipath_opcode_stats *si = &dev->opstats[i]; | 1713 | const struct ipath_opcode_stats *si = &dev->opstats[i]; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 09bbb3f9a217..8039f6e5f0c8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -220,6 +220,7 @@ struct ipath_segarray { | |||
220 | }; | 220 | }; |
221 | 221 | ||
222 | struct ipath_mregion { | 222 | struct ipath_mregion { |
223 | struct ib_pd *pd; /* shares refcnt of ibmr.pd */ | ||
223 | u64 user_base; /* User's address for this region */ | 224 | u64 user_base; /* User's address for this region */ |
224 | u64 iova; /* IB start address of this region */ | 225 | u64 iova; /* IB start address of this region */ |
225 | size_t length; | 226 | size_t length; |
@@ -364,12 +365,14 @@ struct ipath_qp { | |||
364 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | 365 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ |
365 | u8 r_reuse_sge; /* for UC receive errors */ | 366 | u8 r_reuse_sge; /* for UC receive errors */ |
366 | u8 r_sge_inx; /* current index into sg_list */ | 367 | u8 r_sge_inx; /* current index into sg_list */ |
368 | u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ | ||
367 | u8 qp_access_flags; | 369 | u8 qp_access_flags; |
368 | u8 s_max_sge; /* size of s_wq->sg_list */ | 370 | u8 s_max_sge; /* size of s_wq->sg_list */ |
369 | u8 s_retry_cnt; /* number of times to retry */ | 371 | u8 s_retry_cnt; /* number of times to retry */ |
370 | u8 s_rnr_retry_cnt; | 372 | u8 s_rnr_retry_cnt; |
371 | u8 s_retry; /* requester retry counter */ | 373 | u8 s_retry; /* requester retry counter */ |
372 | u8 s_rnr_retry; /* requester RNR retry counter */ | 374 | u8 s_rnr_retry; /* requester RNR retry counter */ |
375 | u8 s_wait_credit; /* limit number of unacked packets sent */ | ||
373 | u8 s_pkey_index; /* PKEY index to use */ | 376 | u8 s_pkey_index; /* PKEY index to use */ |
374 | u8 timeout; /* Timeout for this QP */ | 377 | u8 timeout; /* Timeout for this QP */ |
375 | enum ib_mtu path_mtu; | 378 | enum ib_mtu path_mtu; |
@@ -393,6 +396,8 @@ struct ipath_qp { | |||
393 | #define IPATH_S_BUSY 0 | 396 | #define IPATH_S_BUSY 0 |
394 | #define IPATH_S_SIGNAL_REQ_WR 1 | 397 | #define IPATH_S_SIGNAL_REQ_WR 1 |
395 | 398 | ||
399 | #define IPATH_PSN_CREDIT 2048 | ||
400 | |||
396 | /* | 401 | /* |
397 | * Since struct ipath_swqe is not a fixed size, we can't simply index into | 402 | * Since struct ipath_swqe is not a fixed size, we can't simply index into |
398 | * struct ipath_qp.s_wq. This function does the array index computation. | 403 | * struct ipath_qp.s_wq. This function does the array index computation. |
@@ -521,6 +526,7 @@ struct ipath_ibdev { | |||
521 | u32 n_rnr_naks; | 526 | u32 n_rnr_naks; |
522 | u32 n_other_naks; | 527 | u32 n_other_naks; |
523 | u32 n_timeouts; | 528 | u32 n_timeouts; |
529 | u32 n_rc_stalls; | ||
524 | u32 n_pkt_drops; | 530 | u32 n_pkt_drops; |
525 | u32 n_vl15_dropped; | 531 | u32 n_vl15_dropped; |
526 | u32 n_wqe_errs; | 532 | u32 n_wqe_errs; |
@@ -634,6 +640,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
634 | 640 | ||
635 | int ipath_destroy_qp(struct ib_qp *ibqp); | 641 | int ipath_destroy_qp(struct ib_qp *ibqp); |
636 | 642 | ||
643 | void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err); | ||
644 | |||
637 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 645 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
638 | int attr_mask, struct ib_udata *udata); | 646 | int attr_mask, struct ib_udata *udata); |
639 | 647 | ||
@@ -653,12 +661,6 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | |||
653 | 661 | ||
654 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); | 662 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); |
655 | 663 | ||
656 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | ||
657 | u32 len, u64 vaddr, u32 rkey, int acc); | ||
658 | |||
659 | int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | ||
660 | struct ib_sge *sge, int acc); | ||
661 | |||
662 | void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); | 664 | void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); |
663 | 665 | ||
664 | void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); | 666 | void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); |
@@ -683,10 +685,10 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, | |||
683 | 685 | ||
684 | void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey); | 686 | void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey); |
685 | 687 | ||
686 | int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | 688 | int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, |
687 | struct ib_sge *sge, int acc); | 689 | struct ib_sge *sge, int acc); |
688 | 690 | ||
689 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | 691 | int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, |
690 | u32 len, u64 vaddr, u32 rkey, int acc); | 692 | u32 len, u64 vaddr, u32 rkey, int acc); |
691 | 693 | ||
692 | int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | 694 | int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, |
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c index 036fde662aa9..0095bb70f34e 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c | |||
@@ -38,13 +38,23 @@ | |||
38 | #include "ipath_kernel.h" | 38 | #include "ipath_kernel.h" |
39 | 39 | ||
40 | /** | 40 | /** |
41 | * ipath_unordered_wc - indicate whether write combining is ordered | 41 | * ipath_enable_wc - enable write combining for MMIO writes to the device |
42 | * @dd: infinipath device | ||
42 | * | 43 | * |
43 | * PowerPC systems (at least those in the 970 processor family) | 44 | * Nothing to do on PowerPC, so just return without error. |
44 | * write partially filled store buffers in address order, but will write | 45 | */ |
45 | * completely filled store buffers in "random" order, and therefore must | 46 | int ipath_enable_wc(struct ipath_devdata *dd) |
46 | * have serialization for correctness with current InfiniPath chips. | 47 | { |
48 | return 0; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * ipath_unordered_wc - indicate whether write combining is unordered | ||
47 | * | 53 | * |
54 | * Because our performance depends on our ability to do write | ||
55 | * combining mmio writes in the most efficient way, we need to | ||
56 | * know if we are on a processor that may reorder stores when | ||
57 | * write combining. | ||
48 | */ | 58 | */ |
49 | int ipath_unordered_wc(void) | 59 | int ipath_unordered_wc(void) |
50 | { | 60 | { |
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c index f8f9e2e8cbdd..04696e62da87 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c | |||
@@ -123,6 +123,8 @@ int ipath_enable_wc(struct ipath_devdata *dd) | |||
123 | ipath_cdbg(VERBOSE, "Set mtrr for chip to WC, " | 123 | ipath_cdbg(VERBOSE, "Set mtrr for chip to WC, " |
124 | "cookie is %d\n", cookie); | 124 | "cookie is %d\n", cookie); |
125 | dd->ipath_wc_cookie = cookie; | 125 | dd->ipath_wc_cookie = cookie; |
126 | dd->ipath_wc_base = (unsigned long) pioaddr; | ||
127 | dd->ipath_wc_len = (unsigned long) piolen; | ||
126 | } | 128 | } |
127 | } | 129 | } |
128 | 130 | ||
@@ -136,9 +138,16 @@ int ipath_enable_wc(struct ipath_devdata *dd) | |||
136 | void ipath_disable_wc(struct ipath_devdata *dd) | 138 | void ipath_disable_wc(struct ipath_devdata *dd) |
137 | { | 139 | { |
138 | if (dd->ipath_wc_cookie) { | 140 | if (dd->ipath_wc_cookie) { |
141 | int r; | ||
139 | ipath_cdbg(VERBOSE, "undoing WCCOMB on pio buffers\n"); | 142 | ipath_cdbg(VERBOSE, "undoing WCCOMB on pio buffers\n"); |
140 | mtrr_del(dd->ipath_wc_cookie, 0, 0); | 143 | r = mtrr_del(dd->ipath_wc_cookie, dd->ipath_wc_base, |
141 | dd->ipath_wc_cookie = 0; | 144 | dd->ipath_wc_len); |
145 | if (r < 0) | ||
146 | dev_info(&dd->pcidev->dev, | ||
147 | "mtrr_del(%lx, %lx, %lx) failed: %d\n", | ||
148 | dd->ipath_wc_cookie, dd->ipath_wc_base, | ||
149 | dd->ipath_wc_len, r); | ||
150 | dd->ipath_wc_cookie = 0; /* even on failure */ | ||
142 | } | 151 | } |
143 | } | 152 | } |
144 | 153 | ||
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig index 365a1b5f19e0..aecbb9083f0c 100644 --- a/drivers/infiniband/ulp/iser/Kconfig +++ b/drivers/infiniband/ulp/iser/Kconfig | |||
@@ -1,11 +1,12 @@ | |||
1 | config INFINIBAND_ISER | 1 | config INFINIBAND_ISER |
2 | tristate "ISCSI RDMA Protocol" | 2 | tristate "iSCSI Extensions for RDMA (iSER)" |
3 | depends on INFINIBAND && SCSI && INET | 3 | depends on INFINIBAND && SCSI && INET |
4 | select SCSI_ISCSI_ATTRS | 4 | select SCSI_ISCSI_ATTRS |
5 | ---help--- | 5 | ---help--- |
6 | Support for the ISCSI RDMA Protocol over InfiniBand. This | 6 | Support for the iSCSI Extensions for RDMA (iSER) Protocol |
7 | allows you to access storage devices that speak ISER/ISCSI | 7 | over InfiniBand. This allows you to access storage devices |
8 | over InfiniBand. | 8 | that speak iSCSI over iSER over InfiniBand. |
9 | 9 | ||
10 | The ISER protocol is defined by IETF. | 10 | The iSER protocol is defined by IETF. |
11 | See <http://www.ietf.org/>. | 11 | See <http://www.ietf.org/internet-drafts/draft-ietf-ips-iser-05.txt> |
12 | and <http://www.infinibandta.org/members/spec/iser_annex_060418.pdf> | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 2a14fe2e3226..eb6f98d82289 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -317,6 +317,8 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
317 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 317 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
318 | 318 | ||
319 | iscsi_conn_teardown(cls_conn); | 319 | iscsi_conn_teardown(cls_conn); |
320 | if (iser_conn->ib_conn) | ||
321 | iser_conn->ib_conn->iser_conn = NULL; | ||
320 | kfree(iser_conn); | 322 | kfree(iser_conn); |
321 | } | 323 | } |
322 | 324 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 2cf9ae0def1c..9c53916f28c2 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -192,7 +192,7 @@ struct iser_regd_buf { | |||
192 | 192 | ||
193 | struct iser_dto { | 193 | struct iser_dto { |
194 | struct iscsi_iser_cmd_task *ctask; | 194 | struct iscsi_iser_cmd_task *ctask; |
195 | struct iscsi_iser_conn *conn; | 195 | struct iser_conn *ib_conn; |
196 | int notify_enable; | 196 | int notify_enable; |
197 | 197 | ||
198 | /* vector of registered buffers */ | 198 | /* vector of registered buffers */ |
@@ -355,4 +355,11 @@ int iser_post_send(struct iser_desc *tx_desc); | |||
355 | 355 | ||
356 | int iser_conn_state_comp(struct iser_conn *ib_conn, | 356 | int iser_conn_state_comp(struct iser_conn *ib_conn, |
357 | enum iser_ib_conn_state comp); | 357 | enum iser_ib_conn_state comp); |
358 | |||
359 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | ||
360 | struct iser_data_buf *data, | ||
361 | enum iser_data_dir iser_dir, | ||
362 | enum dma_data_direction dma_dir); | ||
363 | |||
364 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask); | ||
358 | #endif | 365 | #endif |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index ccf56f6f7236..9b3d79c796c8 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -66,42 +66,6 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto, | |||
66 | dto->regd_vector_len++; | 66 | dto->regd_vector_len++; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | ||
70 | struct iser_data_buf *data, | ||
71 | enum iser_data_dir iser_dir, | ||
72 | enum dma_data_direction dma_dir) | ||
73 | { | ||
74 | struct device *dma_device; | ||
75 | |||
76 | iser_ctask->dir[iser_dir] = 1; | ||
77 | dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | ||
78 | |||
79 | data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); | ||
80 | if (data->dma_nents == 0) { | ||
81 | iser_err("dma_map_sg failed!!!\n"); | ||
82 | return -EINVAL; | ||
83 | } | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | ||
88 | { | ||
89 | struct device *dma_device; | ||
90 | struct iser_data_buf *data; | ||
91 | |||
92 | dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | ||
93 | |||
94 | if (iser_ctask->dir[ISER_DIR_IN]) { | ||
95 | data = &iser_ctask->data[ISER_DIR_IN]; | ||
96 | dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); | ||
97 | } | ||
98 | |||
99 | if (iser_ctask->dir[ISER_DIR_OUT]) { | ||
100 | data = &iser_ctask->data[ISER_DIR_OUT]; | ||
101 | dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* Register user buffer memory and initialize passive rdma | 69 | /* Register user buffer memory and initialize passive rdma |
106 | * dto descriptor. Total data size is stored in | 70 | * dto descriptor. Total data size is stored in |
107 | * iser_ctask->data[ISER_DIR_IN].data_len | 71 | * iser_ctask->data[ISER_DIR_IN].data_len |
@@ -249,7 +213,7 @@ static int iser_post_receive_control(struct iscsi_conn *conn) | |||
249 | } | 213 | } |
250 | 214 | ||
251 | recv_dto = &rx_desc->dto; | 215 | recv_dto = &rx_desc->dto; |
252 | recv_dto->conn = iser_conn; | 216 | recv_dto->ib_conn = iser_conn->ib_conn; |
253 | recv_dto->regd_vector_len = 0; | 217 | recv_dto->regd_vector_len = 0; |
254 | 218 | ||
255 | regd_hdr = &rx_desc->hdr_regd_buf; | 219 | regd_hdr = &rx_desc->hdr_regd_buf; |
@@ -296,7 +260,7 @@ static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn, | |||
296 | regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ | 260 | regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ |
297 | regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; | 261 | regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; |
298 | 262 | ||
299 | send_dto->conn = iser_conn; | 263 | send_dto->ib_conn = iser_conn->ib_conn; |
300 | send_dto->notify_enable = 1; | 264 | send_dto->notify_enable = 1; |
301 | send_dto->regd_vector_len = 0; | 265 | send_dto->regd_vector_len = 0; |
302 | 266 | ||
@@ -588,7 +552,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
588 | unsigned long dto_xfer_len) | 552 | unsigned long dto_xfer_len) |
589 | { | 553 | { |
590 | struct iser_dto *dto = &rx_desc->dto; | 554 | struct iser_dto *dto = &rx_desc->dto; |
591 | struct iscsi_iser_conn *conn = dto->conn; | 555 | struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; |
592 | struct iscsi_session *session = conn->iscsi_conn->session; | 556 | struct iscsi_session *session = conn->iscsi_conn->session; |
593 | struct iscsi_cmd_task *ctask; | 557 | struct iscsi_cmd_task *ctask; |
594 | struct iscsi_iser_cmd_task *iser_ctask; | 558 | struct iscsi_iser_cmd_task *iser_ctask; |
@@ -641,7 +605,8 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
641 | void iser_snd_completion(struct iser_desc *tx_desc) | 605 | void iser_snd_completion(struct iser_desc *tx_desc) |
642 | { | 606 | { |
643 | struct iser_dto *dto = &tx_desc->dto; | 607 | struct iser_dto *dto = &tx_desc->dto; |
644 | struct iscsi_iser_conn *iser_conn = dto->conn; | 608 | struct iser_conn *ib_conn = dto->ib_conn; |
609 | struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; | ||
645 | struct iscsi_conn *conn = iser_conn->iscsi_conn; | 610 | struct iscsi_conn *conn = iser_conn->iscsi_conn; |
646 | struct iscsi_mgmt_task *mtask; | 611 | struct iscsi_mgmt_task *mtask; |
647 | 612 | ||
@@ -652,7 +617,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
652 | if (tx_desc->type == ISCSI_TX_DATAOUT) | 617 | if (tx_desc->type == ISCSI_TX_DATAOUT) |
653 | kmem_cache_free(ig.desc_cache, tx_desc); | 618 | kmem_cache_free(ig.desc_cache, tx_desc); |
654 | 619 | ||
655 | atomic_dec(&iser_conn->ib_conn->post_send_buf_count); | 620 | atomic_dec(&ib_conn->post_send_buf_count); |
656 | 621 | ||
657 | write_lock(conn->recv_lock); | 622 | write_lock(conn->recv_lock); |
658 | if (conn->suspend_tx) { | 623 | if (conn->suspend_tx) { |
@@ -698,14 +663,19 @@ void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) | |||
698 | void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | 663 | void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) |
699 | { | 664 | { |
700 | int deferred; | 665 | int deferred; |
666 | int is_rdma_aligned = 1; | ||
701 | 667 | ||
702 | /* if we were reading, copy back to unaligned sglist, | 668 | /* if we were reading, copy back to unaligned sglist, |
703 | * anyway dma_unmap and free the copy | 669 | * anyway dma_unmap and free the copy |
704 | */ | 670 | */ |
705 | if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) | 671 | if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) { |
672 | is_rdma_aligned = 0; | ||
706 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); | 673 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); |
707 | if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) | 674 | } |
675 | if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) { | ||
676 | is_rdma_aligned = 0; | ||
708 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); | 677 | iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); |
678 | } | ||
709 | 679 | ||
710 | if (iser_ctask->dir[ISER_DIR_IN]) { | 680 | if (iser_ctask->dir[ISER_DIR_IN]) { |
711 | deferred = iser_regd_buff_release | 681 | deferred = iser_regd_buff_release |
@@ -725,7 +695,9 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) | |||
725 | } | 695 | } |
726 | } | 696 | } |
727 | 697 | ||
728 | iser_dma_unmap_task_data(iser_ctask); | 698 | /* if the data was unaligned, it was already unmapped and then copied */ |
699 | if (is_rdma_aligned) | ||
700 | iser_dma_unmap_task_data(iser_ctask); | ||
729 | } | 701 | } |
730 | 702 | ||
731 | void iser_dto_buffs_release(struct iser_dto *dto) | 703 | void iser_dto_buffs_release(struct iser_dto *dto) |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index d0b03f426581..0606744c3f84 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -369,6 +369,44 @@ static void iser_page_vec_build(struct iser_data_buf *data, | |||
369 | } | 369 | } |
370 | } | 370 | } |
371 | 371 | ||
372 | int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | ||
373 | struct iser_data_buf *data, | ||
374 | enum iser_data_dir iser_dir, | ||
375 | enum dma_data_direction dma_dir) | ||
376 | { | ||
377 | struct device *dma_device; | ||
378 | |||
379 | iser_ctask->dir[iser_dir] = 1; | ||
380 | dma_device = | ||
381 | iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | ||
382 | |||
383 | data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); | ||
384 | if (data->dma_nents == 0) { | ||
385 | iser_err("dma_map_sg failed!!!\n"); | ||
386 | return -EINVAL; | ||
387 | } | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | ||
392 | { | ||
393 | struct device *dma_device; | ||
394 | struct iser_data_buf *data; | ||
395 | |||
396 | dma_device = | ||
397 | iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | ||
398 | |||
399 | if (iser_ctask->dir[ISER_DIR_IN]) { | ||
400 | data = &iser_ctask->data[ISER_DIR_IN]; | ||
401 | dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); | ||
402 | } | ||
403 | |||
404 | if (iser_ctask->dir[ISER_DIR_OUT]) { | ||
405 | data = &iser_ctask->data[ISER_DIR_OUT]; | ||
406 | dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); | ||
407 | } | ||
408 | } | ||
409 | |||
372 | /** | 410 | /** |
373 | * iser_reg_rdma_mem - Registers memory intended for RDMA, | 411 | * iser_reg_rdma_mem - Registers memory intended for RDMA, |
374 | * obtaining rkey and va | 412 | * obtaining rkey and va |
@@ -394,6 +432,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
394 | iser_err("rdma alignment violation %d/%d aligned\n", | 432 | iser_err("rdma alignment violation %d/%d aligned\n", |
395 | aligned_len, mem->size); | 433 | aligned_len, mem->size); |
396 | iser_data_buf_dump(mem); | 434 | iser_data_buf_dump(mem); |
435 | |||
436 | /* unmap the command data before accessing it */ | ||
437 | iser_dma_unmap_task_data(iser_ctask); | ||
438 | |||
397 | /* allocate copy buf, if we are writing, copy the */ | 439 | /* allocate copy buf, if we are writing, copy the */ |
398 | /* unaligned scatterlist, dma map the copy */ | 440 | /* unaligned scatterlist, dma map the copy */ |
399 | if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) | 441 | if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ecdca7fc1e4c..18a000034996 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -571,6 +571,8 @@ void iser_conn_release(struct iser_conn *ib_conn) | |||
571 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ | 571 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ |
572 | if (device != NULL) | 572 | if (device != NULL) |
573 | iser_device_try_release(device); | 573 | iser_device_try_release(device); |
574 | if (ib_conn->iser_conn) | ||
575 | ib_conn->iser_conn->ib_conn = NULL; | ||
574 | kfree(ib_conn); | 576 | kfree(ib_conn); |
575 | } | 577 | } |
576 | 578 | ||
@@ -694,7 +696,7 @@ int iser_post_recv(struct iser_desc *rx_desc) | |||
694 | struct iser_dto *recv_dto = &rx_desc->dto; | 696 | struct iser_dto *recv_dto = &rx_desc->dto; |
695 | 697 | ||
696 | /* Retrieve conn */ | 698 | /* Retrieve conn */ |
697 | ib_conn = recv_dto->conn->ib_conn; | 699 | ib_conn = recv_dto->ib_conn; |
698 | 700 | ||
699 | iser_dto_to_iov(recv_dto, iov, 2); | 701 | iser_dto_to_iov(recv_dto, iov, 2); |
700 | 702 | ||
@@ -727,7 +729,7 @@ int iser_post_send(struct iser_desc *tx_desc) | |||
727 | struct iser_conn *ib_conn; | 729 | struct iser_conn *ib_conn; |
728 | struct iser_dto *dto = &tx_desc->dto; | 730 | struct iser_dto *dto = &tx_desc->dto; |
729 | 731 | ||
730 | ib_conn = dto->conn->ib_conn; | 732 | ib_conn = dto->ib_conn; |
731 | 733 | ||
732 | iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN); | 734 | iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN); |
733 | 735 | ||
@@ -774,7 +776,7 @@ static void iser_comp_error_worker(void *data) | |||
774 | static void iser_handle_comp_error(struct iser_desc *desc) | 776 | static void iser_handle_comp_error(struct iser_desc *desc) |
775 | { | 777 | { |
776 | struct iser_dto *dto = &desc->dto; | 778 | struct iser_dto *dto = &desc->dto; |
777 | struct iser_conn *ib_conn = dto->conn->ib_conn; | 779 | struct iser_conn *ib_conn = dto->ib_conn; |
778 | 780 | ||
779 | iser_dto_buffs_release(dto); | 781 | iser_dto_buffs_release(dto); |
780 | 782 | ||