aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/device.c
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2014-07-14 12:04:54 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-15 19:25:16 -0400
commit7730b4c7e32c0ab4d7db746a9c3a84cf715161fa (patch)
tree91a71211347fda589fb5670d5eeb790aeb53fc10 /drivers/infiniband/hw/cxgb4/device.c
parent031cf4769bc4504d046074274d1ecd70d89d20b8 (diff)
cxgb4/iw_cxgb4: work request logging feature
This commit enhances the iwarp driver to optionally keep a log of rdma work request timining data for kernel mode QPs. If iw_cxgb4 module option c4iw_wr_log is set to non-zero, each work request is tracked and timing data maintained in a rolling log that is 4096 entries deep by default. Module option c4iw_wr_log_size_order allows specifing a log2 size to use instead of the default order of 12 (4096 entries). Both module options are read-only and must be passed in at module load time to set them. IE: modprobe iw_cxgb4 c4iw_wr_log=1 c4iw_wr_log_size_order=10 The timing data is viewable via the iw_cxgb4 debugfs file "wr_log". Writing anything to this file will clear all the timing data. Data tracked includes: - The host time when the work request was posted, just before ringing the doorbell. The host time when the completion was polled by the application. This is also the time the log entry is created. The delta of these two times is the amount of time took processing the work request. - The qid of the EQ used to post the work request. - The work request opcode. - The cqe wr_id field. For sq completions requests this is the swsqe index. For recv completions this is the MSN of the ingress SEND. This value can be used to match log entries from this log with firmware flowc event entries. - The sge timestamp value just before ringing the doorbell when posting, the sge timestamp value just after polling the completion, and CQE.timestamp field from the completion itself. With these three timestamps we can track the latency from post to poll, and the amount of time the completion resided in the CQ before being reaped by the application. With debug firmware, the sge timestamp is also logged by firmware in its flowc history so that we can compute the latency from posting the work request until the firmware sees it. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/device.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c137
1 files changed, 137 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8386678f1159..df1f1b52c7ec 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -55,6 +55,15 @@ module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5, 55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)"); 56 "Allow DB Coalescing on T5 (default = 0)");
57 57
58int c4iw_wr_log = 0;
59module_param(c4iw_wr_log, int, 0444);
60MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
61
62int c4iw_wr_log_size_order = 12;
63module_param(c4iw_wr_log_size_order, int, 0444);
64MODULE_PARM_DESC(c4iw_wr_log_size_order,
65 "Number of entries (log2) in the work request timing log.");
66
58struct uld_ctx { 67struct uld_ctx {
59 struct list_head entry; 68 struct list_head entry;
60 struct cxgb4_lld_info lldi; 69 struct cxgb4_lld_info lldi;
@@ -103,6 +112,117 @@ static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 112 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
104} 113}
105 114
115void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
116{
117 struct wr_log_entry le;
118 int idx;
119
120 if (!wq->rdev->wr_log)
121 return;
122
123 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
124 (wq->rdev->wr_log_size - 1);
125 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
126 getnstimeofday(&le.poll_host_ts);
127 le.valid = 1;
128 le.cqe_sge_ts = CQE_TS(cqe);
129 if (SQ_TYPE(cqe)) {
130 le.qid = wq->sq.qid;
131 le.opcode = CQE_OPCODE(cqe);
132 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
133 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
134 le.wr_id = CQE_WRID_SQ_IDX(cqe);
135 } else {
136 le.qid = wq->rq.qid;
137 le.opcode = FW_RI_RECEIVE;
138 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
139 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
140 le.wr_id = CQE_WRID_MSN(cqe);
141 }
142 wq->rdev->wr_log[idx] = le;
143}
144
145static int wr_log_show(struct seq_file *seq, void *v)
146{
147 struct c4iw_dev *dev = seq->private;
148 struct timespec prev_ts = {0, 0};
149 struct wr_log_entry *lep;
150 int prev_ts_set = 0;
151 int idx, end;
152
153#define ts2ns(ts) ((ts) * dev->rdev.lldi.cclk_ps / 1000)
154
155 idx = atomic_read(&dev->rdev.wr_log_idx) &
156 (dev->rdev.wr_log_size - 1);
157 end = idx - 1;
158 if (end < 0)
159 end = dev->rdev.wr_log_size - 1;
160 lep = &dev->rdev.wr_log[idx];
161 while (idx != end) {
162 if (lep->valid) {
163 if (!prev_ts_set) {
164 prev_ts_set = 1;
165 prev_ts = lep->poll_host_ts;
166 }
167 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
168 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
169 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
170 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
171 "cqe_poll_delta_ns %llu\n",
172 idx,
173 timespec_sub(lep->poll_host_ts,
174 prev_ts).tv_sec,
175 timespec_sub(lep->poll_host_ts,
176 prev_ts).tv_nsec,
177 lep->qid, lep->opcode,
178 lep->opcode == FW_RI_RECEIVE ?
179 "msn" : "wrid",
180 lep->wr_id,
181 timespec_sub(lep->poll_host_ts,
182 lep->post_host_ts).tv_sec,
183 timespec_sub(lep->poll_host_ts,
184 lep->post_host_ts).tv_nsec,
185 lep->post_sge_ts, lep->cqe_sge_ts,
186 lep->poll_sge_ts,
187 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
188 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
189 prev_ts = lep->poll_host_ts;
190 }
191 idx++;
192 if (idx > (dev->rdev.wr_log_size - 1))
193 idx = 0;
194 lep = &dev->rdev.wr_log[idx];
195 }
196#undef ts2ns
197 return 0;
198}
199
200static int wr_log_open(struct inode *inode, struct file *file)
201{
202 return single_open(file, wr_log_show, inode->i_private);
203}
204
205static ssize_t wr_log_clear(struct file *file, const char __user *buf,
206 size_t count, loff_t *pos)
207{
208 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
209 int i;
210
211 if (dev->rdev.wr_log)
212 for (i = 0; i < dev->rdev.wr_log_size; i++)
213 dev->rdev.wr_log[i].valid = 0;
214 return count;
215}
216
217static const struct file_operations wr_log_debugfs_fops = {
218 .owner = THIS_MODULE,
219 .open = wr_log_open,
220 .release = single_release,
221 .read = seq_read,
222 .llseek = seq_lseek,
223 .write = wr_log_clear,
224};
225
106static int dump_qp(int id, void *p, void *data) 226static int dump_qp(int id, void *p, void *data)
107{ 227{
108 struct c4iw_qp *qp = p; 228 struct c4iw_qp *qp = p;
@@ -604,6 +724,12 @@ static int setup_debugfs(struct c4iw_dev *devp)
604 if (de && de->d_inode) 724 if (de && de->d_inode)
605 de->d_inode->i_size = 4096; 725 de->d_inode->i_size = 4096;
606 726
727 if (c4iw_wr_log) {
728 de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root,
729 (void *)devp, &wr_log_debugfs_fops);
730 if (de && de->d_inode)
731 de->d_inode->i_size = 4096;
732 }
607 return 0; 733 return 0;
608} 734}
609 735
@@ -717,6 +843,16 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
717 pr_err(MOD "error allocating status page\n"); 843 pr_err(MOD "error allocating status page\n");
718 goto err4; 844 goto err4;
719 } 845 }
846 if (c4iw_wr_log) {
847 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
848 sizeof(*rdev->wr_log), GFP_KERNEL);
849 if (rdev->wr_log) {
850 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
851 atomic_set(&rdev->wr_log_idx, 0);
852 } else {
853 pr_err(MOD "error allocating wr_log. Logging disabled\n");
854 }
855 }
720 return 0; 856 return 0;
721err4: 857err4:
722 c4iw_rqtpool_destroy(rdev); 858 c4iw_rqtpool_destroy(rdev);
@@ -730,6 +866,7 @@ err1:
730 866
731static void c4iw_rdev_close(struct c4iw_rdev *rdev) 867static void c4iw_rdev_close(struct c4iw_rdev *rdev)
732{ 868{
869 kfree(rdev->wr_log);
733 free_page((unsigned long)rdev->status_page); 870 free_page((unsigned long)rdev->status_page);
734 c4iw_pblpool_destroy(rdev); 871 c4iw_pblpool_destroy(rdev);
735 c4iw_rqtpool_destroy(rdev); 872 c4iw_rqtpool_destroy(rdev);