aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_verbs.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_verbs.h')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h64
1 files changed, 52 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 6514aa8306cd..9d12ae8a778e 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -74,6 +74,11 @@
74#define IPATH_POST_RECV_OK 0x02 74#define IPATH_POST_RECV_OK 0x02
75#define IPATH_PROCESS_RECV_OK 0x04 75#define IPATH_PROCESS_RECV_OK 0x04
76#define IPATH_PROCESS_SEND_OK 0x08 76#define IPATH_PROCESS_SEND_OK 0x08
77#define IPATH_PROCESS_NEXT_SEND_OK 0x10
78#define IPATH_FLUSH_SEND 0x20
79#define IPATH_FLUSH_RECV 0x40
80#define IPATH_PROCESS_OR_FLUSH_SEND \
81 (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
77 82
78/* IB Performance Manager status values */ 83/* IB Performance Manager status values */
79#define IB_PMA_SAMPLE_STATUS_DONE 0x00 84#define IB_PMA_SAMPLE_STATUS_DONE 0x00
@@ -353,12 +358,14 @@ struct ipath_qp {
353 struct ib_qp ibqp; 358 struct ib_qp ibqp;
354 struct ipath_qp *next; /* link list for QPN hash table */ 359 struct ipath_qp *next; /* link list for QPN hash table */
355 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ 360 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
361 struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */
356 struct list_head piowait; /* link for wait PIO buf */ 362 struct list_head piowait; /* link for wait PIO buf */
357 struct list_head timerwait; /* link for waiting for timeouts */ 363 struct list_head timerwait; /* link for waiting for timeouts */
358 struct ib_ah_attr remote_ah_attr; 364 struct ib_ah_attr remote_ah_attr;
359 struct ipath_ib_header s_hdr; /* next packet header to send */ 365 struct ipath_ib_header s_hdr; /* next packet header to send */
360 atomic_t refcount; 366 atomic_t refcount;
361 wait_queue_head_t wait; 367 wait_queue_head_t wait;
368 wait_queue_head_t wait_dma;
362 struct tasklet_struct s_task; 369 struct tasklet_struct s_task;
363 struct ipath_mmap_info *ip; 370 struct ipath_mmap_info *ip;
364 struct ipath_sge_state *s_cur_sge; 371 struct ipath_sge_state *s_cur_sge;
@@ -369,7 +376,7 @@ struct ipath_qp {
369 struct ipath_sge_state s_rdma_read_sge; 376 struct ipath_sge_state s_rdma_read_sge;
370 struct ipath_sge_state r_sge; /* current receive data */ 377 struct ipath_sge_state r_sge; /* current receive data */
371 spinlock_t s_lock; 378 spinlock_t s_lock;
372 unsigned long s_busy; 379 atomic_t s_dma_busy;
373 u16 s_pkt_delay; 380 u16 s_pkt_delay;
374 u16 s_hdrwords; /* size of s_hdr in 32 bit words */ 381 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
375 u32 s_cur_size; /* size of send packet in bytes */ 382 u32 s_cur_size; /* size of send packet in bytes */
@@ -383,6 +390,7 @@ struct ipath_qp {
383 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ 390 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
384 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 391 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
385 u64 r_wr_id; /* ID for current receive WQE */ 392 u64 r_wr_id; /* ID for current receive WQE */
393 unsigned long r_aflags;
386 u32 r_len; /* total length of r_sge */ 394 u32 r_len; /* total length of r_sge */
387 u32 r_rcv_len; /* receive data len processed */ 395 u32 r_rcv_len; /* receive data len processed */
388 u32 r_psn; /* expected rcv packet sequence number */ 396 u32 r_psn; /* expected rcv packet sequence number */
@@ -394,8 +402,7 @@ struct ipath_qp {
394 u8 r_state; /* opcode of last packet received */ 402 u8 r_state; /* opcode of last packet received */
395 u8 r_nak_state; /* non-zero if NAK is pending */ 403 u8 r_nak_state; /* non-zero if NAK is pending */
396 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 404 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
397 u8 r_reuse_sge; /* for UC receive errors */ 405 u8 r_flags;
398 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
399 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 406 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
400 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 407 u8 r_head_ack_queue; /* index into s_ack_queue[] */
401 u8 qp_access_flags; 408 u8 qp_access_flags;
@@ -404,13 +411,13 @@ struct ipath_qp {
404 u8 s_rnr_retry_cnt; 411 u8 s_rnr_retry_cnt;
405 u8 s_retry; /* requester retry counter */ 412 u8 s_retry; /* requester retry counter */
406 u8 s_rnr_retry; /* requester RNR retry counter */ 413 u8 s_rnr_retry; /* requester RNR retry counter */
407 u8 s_wait_credit; /* limit number of unacked packets sent */
408 u8 s_pkey_index; /* PKEY index to use */ 414 u8 s_pkey_index; /* PKEY index to use */
409 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 415 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
410 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 416 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
411 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 417 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
412 u8 s_flags; 418 u8 s_flags;
413 u8 s_dmult; 419 u8 s_dmult;
420 u8 s_draining;
414 u8 timeout; /* Timeout for this QP */ 421 u8 timeout; /* Timeout for this QP */
415 enum ib_mtu path_mtu; 422 enum ib_mtu path_mtu;
416 u32 remote_qpn; 423 u32 remote_qpn;
@@ -428,16 +435,40 @@ struct ipath_qp {
428 struct ipath_sge r_sg_list[0]; /* verified SGEs */ 435 struct ipath_sge r_sg_list[0]; /* verified SGEs */
429}; 436};
430 437
431/* Bit definition for s_busy. */ 438/*
432#define IPATH_S_BUSY 0 439 * Atomic bit definitions for r_aflags.
440 */
441#define IPATH_R_WRID_VALID 0
442
443/*
444 * Bit definitions for r_flags.
445 */
446#define IPATH_R_REUSE_SGE 0x01
447#define IPATH_R_RDMAR_SEQ 0x02
433 448
434/* 449/*
435 * Bit definitions for s_flags. 450 * Bit definitions for s_flags.
451 *
452 * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
453 * before processing the next SWQE
454 * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
455 * before processing the next SWQE
456 * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
457 * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
458 * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
459 * next send completion entry not via send DMA.
436 */ 460 */
437#define IPATH_S_SIGNAL_REQ_WR 0x01 461#define IPATH_S_SIGNAL_REQ_WR 0x01
438#define IPATH_S_FENCE_PENDING 0x02 462#define IPATH_S_FENCE_PENDING 0x02
439#define IPATH_S_RDMAR_PENDING 0x04 463#define IPATH_S_RDMAR_PENDING 0x04
440#define IPATH_S_ACK_PENDING 0x08 464#define IPATH_S_ACK_PENDING 0x08
465#define IPATH_S_BUSY 0x10
466#define IPATH_S_WAITING 0x20
467#define IPATH_S_WAIT_SSN_CREDIT 0x40
468#define IPATH_S_WAIT_DMA 0x80
469
470#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
471 IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
441 472
442#define IPATH_PSN_CREDIT 512 473#define IPATH_PSN_CREDIT 512
443 474
@@ -573,13 +604,11 @@ struct ipath_ibdev {
573 u32 n_rnr_naks; 604 u32 n_rnr_naks;
574 u32 n_other_naks; 605 u32 n_other_naks;
575 u32 n_timeouts; 606 u32 n_timeouts;
576 u32 n_rc_stalls;
577 u32 n_pkt_drops; 607 u32 n_pkt_drops;
578 u32 n_vl15_dropped; 608 u32 n_vl15_dropped;
579 u32 n_wqe_errs; 609 u32 n_wqe_errs;
580 u32 n_rdma_dup_busy; 610 u32 n_rdma_dup_busy;
581 u32 n_piowait; 611 u32 n_piowait;
582 u32 n_no_piobuf;
583 u32 n_unaligned; 612 u32 n_unaligned;
584 u32 port_cap_flags; 613 u32 port_cap_flags;
585 u32 pma_sample_start; 614 u32 pma_sample_start;
@@ -657,6 +686,17 @@ static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
657 return container_of(ibdev, struct ipath_ibdev, ibdev); 686 return container_of(ibdev, struct ipath_ibdev, ibdev);
658} 687}
659 688
689/*
690 * This must be called with s_lock held.
691 */
692static inline void ipath_schedule_send(struct ipath_qp *qp)
693{
694 if (qp->s_flags & IPATH_S_ANY_WAIT)
695 qp->s_flags &= ~IPATH_S_ANY_WAIT;
696 if (!(qp->s_flags & IPATH_S_BUSY))
697 tasklet_hi_schedule(&qp->s_task);
698}
699
660int ipath_process_mad(struct ib_device *ibdev, 700int ipath_process_mad(struct ib_device *ibdev,
661 int mad_flags, 701 int mad_flags,
662 u8 port_num, 702 u8 port_num,
@@ -706,12 +746,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
706int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 746int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
707 int attr_mask, struct ib_qp_init_attr *init_attr); 747 int attr_mask, struct ib_qp_init_attr *init_attr);
708 748
709void ipath_free_all_qps(struct ipath_qp_table *qpt); 749unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
710 750
711int ipath_init_qp_table(struct ipath_ibdev *idev, int size); 751int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
712 752
713void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
714
715void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 753void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
716 754
717unsigned ipath_ib_rate_to_mult(enum ib_rate rate); 755unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
@@ -729,7 +767,9 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
729void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, 767void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
730 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 768 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
731 769
732void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); 770void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
771
772void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
733 773
734int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); 774int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
735 775