aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSelvin Xavier <selvin.xavier@emulex.com>2014-02-04 01:27:07 -0500
committerRoland Dreier <roland@purestorage.com>2014-04-03 11:30:05 -0400
commita51f06e1679e2abac2e8a817884e60edc18c5c86 (patch)
tree633ac6e242bb3cd9ea3dba6ff6b6834565002807
parentbbc5ec524eecf8af95b81c3c1d15cbc672568b4e (diff)
RDMA/ocrdma: Query controller information
Issue mailbox commands to query ocrdma controller information and phy information and print them while adding ocrdma device. Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com> Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/ocrdma/Makefile2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h61
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c148
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h233
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c623
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h54
8 files changed, 1135 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
index 06a5bed12e43..d1bfd4f4cdde 100644
--- a/drivers/infiniband/hw/ocrdma/Makefile
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/emulex/benet
2 2
3obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o 3obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
4 4
5ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o 5ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 4ea8496c7107..3042c87a74ba 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -44,10 +44,17 @@
44#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 44#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
45#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 45#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
46 46
47#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
48#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
49
50#define OC_SKH_DEVICE_PF 0x720
51#define OC_SKH_DEVICE_VF 0x728
47#define OCRDMA_MAX_AH 512 52#define OCRDMA_MAX_AH 512
48 53
49#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 54#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
50 55
56#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
57
51struct ocrdma_dev_attr { 58struct ocrdma_dev_attr {
52 u8 fw_ver[32]; 59 u8 fw_ver[32];
53 u32 vendor_id; 60 u32 vendor_id;
@@ -86,6 +93,12 @@ struct ocrdma_dev_attr {
86 u8 num_ird_pages; 93 u8 num_ird_pages;
87}; 94};
88 95
96struct ocrdma_dma_mem {
97 void *va;
98 dma_addr_t pa;
99 u32 size;
100};
101
89struct ocrdma_pbl { 102struct ocrdma_pbl {
90 void *va; 103 void *va;
91 dma_addr_t pa; 104 dma_addr_t pa;
@@ -151,6 +164,26 @@ struct ocrdma_mr {
151 struct ocrdma_hw_mr hwmr; 164 struct ocrdma_hw_mr hwmr;
152}; 165};
153 166
167struct ocrdma_stats {
168 u8 type;
169 struct ocrdma_dev *dev;
170};
171
172struct stats_mem {
173 struct ocrdma_mqe mqe;
174 void *va;
175 dma_addr_t pa;
176 u32 size;
177 char *debugfs_mem;
178};
179
180struct phy_info {
181 u16 auto_speeds_supported;
182 u16 fixed_speeds_supported;
183 u16 phy_type;
184 u16 interface_type;
185};
186
154struct ocrdma_dev { 187struct ocrdma_dev {
155 struct ib_device ibdev; 188 struct ib_device ibdev;
156 struct ocrdma_dev_attr attr; 189 struct ocrdma_dev_attr attr;
@@ -194,6 +227,9 @@ struct ocrdma_dev {
194 struct mqe_ctx mqe_ctx; 227 struct mqe_ctx mqe_ctx;
195 228
196 struct be_dev_info nic_info; 229 struct be_dev_info nic_info;
230 struct phy_info phy;
231 char model_number[32];
232 u32 hba_port_num;
197 233
198 struct list_head entry; 234 struct list_head entry;
199 struct rcu_head rcu; 235 struct rcu_head rcu;
@@ -201,6 +237,20 @@ struct ocrdma_dev {
201 struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; 237 struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG];
202 u16 pvid; 238 u16 pvid;
203 u32 asic_id; 239 u32 asic_id;
240
241 ulong last_stats_time;
242 struct mutex stats_lock; /* provide synch for debugfs operations */
243 struct stats_mem stats_mem;
244 struct ocrdma_stats rsrc_stats;
245 struct ocrdma_stats rx_stats;
246 struct ocrdma_stats wqe_stats;
247 struct ocrdma_stats tx_stats;
248 struct ocrdma_stats db_err_stats;
249 struct ocrdma_stats tx_qp_err_stats;
250 struct ocrdma_stats rx_qp_err_stats;
251 struct ocrdma_stats tx_dbg_stats;
252 struct ocrdma_stats rx_dbg_stats;
253 struct dentry *dir;
204}; 254};
205 255
206struct ocrdma_cq { 256struct ocrdma_cq {
@@ -434,6 +484,17 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
434 return 0; 484 return 0;
435} 485}
436 486
487static inline char *hca_name(struct ocrdma_dev *dev)
488{
489 switch (dev->nic_info.pdev->device) {
490 case OC_SKH_DEVICE_PF:
491 case OC_SKH_DEVICE_VF:
492 return OC_NAME_SH;
493 default:
494 return OC_NAME_UNKNOWN;
495 }
496}
497
437static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, 498static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
438 int eqid) 499 int eqid)
439{ 500{
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index e6797ff4c99b..5d3485846d87 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -243,6 +243,23 @@ static int ocrdma_get_mbx_errno(u32 status)
243 return err_num; 243 return err_num;
244} 244}
245 245
246char *port_speed_string(struct ocrdma_dev *dev)
247{
248 char *str = "";
249 u16 speeds_supported;
250
251 speeds_supported = dev->phy.fixed_speeds_supported |
252 dev->phy.auto_speeds_supported;
253 if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
254 str = "40Gbps ";
255 else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
256 str = "10Gbps ";
257 else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
258 str = "1Gbps ";
259
260 return str;
261}
262
246static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) 263static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
247{ 264{
248 int err_num = -EINVAL; 265 int err_num = -EINVAL;
@@ -332,6 +349,11 @@ static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
332 return mqe; 349 return mqe;
333} 350}
334 351
352static void *ocrdma_alloc_mqe(void)
353{
354 return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
355}
356
335static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) 357static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
336{ 358{
337 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); 359 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
@@ -1154,6 +1176,96 @@ mbx_err:
1154 return status; 1176 return status;
1155} 1177}
1156 1178
1179int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1180{
1181 struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1182 struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
1183 struct ocrdma_rdma_stats_resp *old_stats = NULL;
1184 int status;
1185
1186 old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
1187 if (old_stats == NULL)
1188 return -ENOMEM;
1189
1190 memset(mqe, 0, sizeof(*mqe));
1191 mqe->hdr.pyld_len = dev->stats_mem.size;
1192 mqe->hdr.spcl_sge_cnt_emb |=
1193 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1194 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1195 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1196 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1197 mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1198
1199 /* Cache the old stats */
1200 memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1201 memset(req, 0, dev->stats_mem.size);
1202
1203 ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1204 OCRDMA_CMD_GET_RDMA_STATS,
1205 OCRDMA_SUBSYS_ROCE,
1206 dev->stats_mem.size);
1207 if (reset)
1208 req->reset_stats = reset;
1209
1210 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1211 if (status)
1212 /* Copy from cache, if mbox fails */
1213 memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1214 else
1215 ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1216
1217 kfree(old_stats);
1218 return status;
1219}
1220
1221static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1222{
1223 int status = -ENOMEM;
1224 struct ocrdma_dma_mem dma;
1225 struct ocrdma_mqe *mqe;
1226 struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1227 struct mgmt_hba_attribs *hba_attribs;
1228
1229 mqe = ocrdma_alloc_mqe();
1230 if (!mqe)
1231 return status;
1232 memset(mqe, 0, sizeof(*mqe));
1233
1234 dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1235 dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1236 dma.size, &dma.pa, GFP_KERNEL);
1237 if (!dma.va)
1238 goto free_mqe;
1239
1240 mqe->hdr.pyld_len = dma.size;
1241 mqe->hdr.spcl_sge_cnt_emb |=
1242 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1243 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1244 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1245 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1246 mqe->u.nonemb_req.sge[0].len = dma.size;
1247
1248 memset(dma.va, 0, dma.size);
1249 ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1250 OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1251 OCRDMA_SUBSYS_COMMON,
1252 dma.size);
1253
1254 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1255 if (!status) {
1256 ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1257 hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1258
1259 dev->hba_port_num = hba_attribs->phy_port;
1260 strncpy(dev->model_number,
1261 hba_attribs->controller_model_number, 31);
1262 }
1263 dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1264free_mqe:
1265 kfree(mqe);
1266 return status;
1267}
1268
1157static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) 1269static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1158{ 1270{
1159 int status = -ENOMEM; 1271 int status = -ENOMEM;
@@ -1201,6 +1313,35 @@ mbx_err:
1201 return status; 1313 return status;
1202} 1314}
1203 1315
1316static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1317{
1318 int status = -ENOMEM;
1319 struct ocrdma_mqe *cmd;
1320 struct ocrdma_get_phy_info_rsp *rsp;
1321
1322 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1323 if (!cmd)
1324 return status;
1325
1326 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1327 OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1328 sizeof(*cmd));
1329
1330 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1331 if (status)
1332 goto mbx_err;
1333
1334 rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
1335 dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
1336 dev->phy.auto_speeds_supported =
1337 le16_to_cpu(rsp->auto_speeds_supported);
1338 dev->phy.fixed_speeds_supported =
1339 le16_to_cpu(rsp->fixed_speeds_supported);
1340mbx_err:
1341 kfree(cmd);
1342 return status;
1343}
1344
1204int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 1345int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1205{ 1346{
1206 int status = -ENOMEM; 1347 int status = -ENOMEM;
@@ -2570,6 +2711,13 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
2570 status = ocrdma_mbx_create_ah_tbl(dev); 2711 status = ocrdma_mbx_create_ah_tbl(dev);
2571 if (status) 2712 if (status)
2572 goto conf_err; 2713 goto conf_err;
2714 status = ocrdma_mbx_get_phy_info(dev);
2715 if (status)
2716 goto conf_err;
2717 status = ocrdma_mbx_get_ctrl_attribs(dev);
2718 if (status)
2719 goto conf_err;
2720
2573 return 0; 2721 return 0;
2574 2722
2575conf_err: 2723conf_err:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 77da536d5d96..e513f7293142 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -133,4 +133,6 @@ bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
133void ocrdma_flush_qp(struct ocrdma_qp *); 133void ocrdma_flush_qp(struct ocrdma_qp *);
134int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); 134int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
135 135
136int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
137char *port_speed_string(struct ocrdma_dev *dev);
136#endif /* __OCRDMA_HW_H__ */ 138#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index ae17a3670bcd..7d18b3ad21e7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -39,6 +39,7 @@
39#include "ocrdma_ah.h" 39#include "ocrdma_ah.h"
40#include "be_roce.h" 40#include "be_roce.h"
41#include "ocrdma_hw.h" 41#include "ocrdma_hw.h"
42#include "ocrdma_stats.h"
42#include "ocrdma_abi.h" 43#include "ocrdma_abi.h"
43 44
44MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 45MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
@@ -372,6 +373,15 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
372 spin_lock(&ocrdma_devlist_lock); 373 spin_lock(&ocrdma_devlist_lock);
373 list_add_tail_rcu(&dev->entry, &ocrdma_dev_list); 374 list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
374 spin_unlock(&ocrdma_devlist_lock); 375 spin_unlock(&ocrdma_devlist_lock);
376 /* Init stats */
377 ocrdma_add_port_stats(dev);
378
379 pr_info("%s %s: %s \"%s\" port %d\n",
380 dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
381 port_speed_string(dev), dev->model_number,
382 dev->hba_port_num);
383 pr_info("%s ocrdma%d driver loaded successfully\n",
384 dev_name(&dev->nic_info.pdev->dev), dev->id);
375 return dev; 385 return dev;
376 386
377alloc_err: 387alloc_err:
@@ -400,6 +410,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
400 /* first unregister with stack to stop all the active traffic 410 /* first unregister with stack to stop all the active traffic
401 * of the registered clients. 411 * of the registered clients.
402 */ 412 */
413 ocrdma_rem_port_stats(dev);
403 ib_unregister_device(&dev->ibdev); 414 ib_unregister_device(&dev->ibdev);
404 415
405 spin_lock(&ocrdma_devlist_lock); 416 spin_lock(&ocrdma_devlist_lock);
@@ -492,6 +503,8 @@ static int __init ocrdma_init_module(void)
492{ 503{
493 int status; 504 int status;
494 505
506 ocrdma_init_debugfs();
507
495 status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier); 508 status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
496 if (status) 509 if (status)
497 return status; 510 return status;
@@ -513,6 +526,7 @@ static void __exit ocrdma_exit_module(void)
513{ 526{
514 be_roce_unregister_driver(&ocrdma_drv); 527 be_roce_unregister_driver(&ocrdma_drv);
515 ocrdma_unregister_inet6addr_notifier(); 528 ocrdma_unregister_inet6addr_notifier();
529 ocrdma_rem_debugfs();
516} 530}
517 531
518module_init(ocrdma_init_module); 532module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index de4ebfc4e0e2..6e048b7283c4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -72,6 +72,7 @@ enum {
72 72
73 OCRDMA_CMD_ATTACH_MCAST, 73 OCRDMA_CMD_ATTACH_MCAST,
74 OCRDMA_CMD_DETACH_MCAST, 74 OCRDMA_CMD_DETACH_MCAST,
75 OCRDMA_CMD_GET_RDMA_STATS,
75 76
76 OCRDMA_CMD_MAX 77 OCRDMA_CMD_MAX
77}; 78};
@@ -82,12 +83,14 @@ enum {
82 OCRDMA_CMD_CREATE_CQ = 12, 83 OCRDMA_CMD_CREATE_CQ = 12,
83 OCRDMA_CMD_CREATE_EQ = 13, 84 OCRDMA_CMD_CREATE_EQ = 13,
84 OCRDMA_CMD_CREATE_MQ = 21, 85 OCRDMA_CMD_CREATE_MQ = 21,
86 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
85 OCRDMA_CMD_GET_FW_VER = 35, 87 OCRDMA_CMD_GET_FW_VER = 35,
86 OCRDMA_CMD_DELETE_MQ = 53, 88 OCRDMA_CMD_DELETE_MQ = 53,
87 OCRDMA_CMD_DELETE_CQ = 54, 89 OCRDMA_CMD_DELETE_CQ = 54,
88 OCRDMA_CMD_DELETE_EQ = 55, 90 OCRDMA_CMD_DELETE_EQ = 55,
89 OCRDMA_CMD_GET_FW_CONFIG = 58, 91 OCRDMA_CMD_GET_FW_CONFIG = 58,
90 OCRDMA_CMD_CREATE_MQ_EXT = 90 92 OCRDMA_CMD_CREATE_MQ_EXT = 90,
93 OCRDMA_CMD_PHY_DETAILS = 102
91}; 94};
92 95
93enum { 96enum {
@@ -578,6 +581,30 @@ enum {
578 OCRDMA_FN_MODE_RDMA = 0x4 581 OCRDMA_FN_MODE_RDMA = 0x4
579}; 582};
580 583
584struct ocrdma_get_phy_info_rsp {
585 struct ocrdma_mqe_hdr hdr;
586 struct ocrdma_mbx_rsp rsp;
587
588 u16 phy_type;
589 u16 interface_type;
590 u32 misc_params;
591 u16 ext_phy_details;
592 u16 rsvd;
593 u16 auto_speeds_supported;
594 u16 fixed_speeds_supported;
595 u32 future_use[2];
596};
597
598enum {
599 OCRDMA_PHY_SPEED_ZERO = 0x0,
600 OCRDMA_PHY_SPEED_10MBPS = 0x1,
601 OCRDMA_PHY_SPEED_100MBPS = 0x2,
602 OCRDMA_PHY_SPEED_1GBPS = 0x4,
603 OCRDMA_PHY_SPEED_10GBPS = 0x8,
604 OCRDMA_PHY_SPEED_40GBPS = 0x20
605};
606
607
581struct ocrdma_get_link_speed_rsp { 608struct ocrdma_get_link_speed_rsp {
582 struct ocrdma_mqe_hdr hdr; 609 struct ocrdma_mqe_hdr hdr;
583 struct ocrdma_mbx_rsp rsp; 610 struct ocrdma_mbx_rsp rsp;
@@ -1719,4 +1746,208 @@ struct ocrdma_av {
1719 u32 valid; 1746 u32 valid;
1720} __packed; 1747} __packed;
1721 1748
1749struct ocrdma_rsrc_stats {
1750 u32 dpp_pds;
1751 u32 non_dpp_pds;
1752 u32 rc_dpp_qps;
1753 u32 uc_dpp_qps;
1754 u32 ud_dpp_qps;
1755 u32 rc_non_dpp_qps;
1756 u32 rsvd;
1757 u32 uc_non_dpp_qps;
1758 u32 ud_non_dpp_qps;
1759 u32 rsvd1;
1760 u32 srqs;
1761 u32 rbqs;
1762 u32 r64K_nsmr;
1763 u32 r64K_to_2M_nsmr;
1764 u32 r2M_to_44M_nsmr;
1765 u32 r44M_to_1G_nsmr;
1766 u32 r1G_to_4G_nsmr;
1767 u32 nsmr_count_4G_to_32G;
1768 u32 r32G_to_64G_nsmr;
1769 u32 r64G_to_128G_nsmr;
1770 u32 r128G_to_higher_nsmr;
1771 u32 embedded_nsmr;
1772 u32 frmr;
1773 u32 prefetch_qps;
1774 u32 ondemand_qps;
1775 u32 phy_mr;
1776 u32 mw;
1777 u32 rsvd2[7];
1778};
1779
1780struct ocrdma_db_err_stats {
1781 u32 sq_doorbell_errors;
1782 u32 cq_doorbell_errors;
1783 u32 rq_srq_doorbell_errors;
1784 u32 cq_overflow_errors;
1785 u32 rsvd[4];
1786};
1787
1788struct ocrdma_wqe_stats {
1789 u32 large_send_rc_wqes_lo;
1790 u32 large_send_rc_wqes_hi;
1791 u32 large_write_rc_wqes_lo;
1792 u32 large_write_rc_wqes_hi;
1793 u32 rsvd[4];
1794 u32 read_wqes_lo;
1795 u32 read_wqes_hi;
1796 u32 frmr_wqes_lo;
1797 u32 frmr_wqes_hi;
1798 u32 mw_bind_wqes_lo;
1799 u32 mw_bind_wqes_hi;
1800 u32 invalidate_wqes_lo;
1801 u32 invalidate_wqes_hi;
1802 u32 rsvd1[2];
1803 u32 dpp_wqe_drops;
1804 u32 rsvd2[5];
1805};
1806
1807struct ocrdma_tx_stats {
1808 u32 send_pkts_lo;
1809 u32 send_pkts_hi;
1810 u32 write_pkts_lo;
1811 u32 write_pkts_hi;
1812 u32 read_pkts_lo;
1813 u32 read_pkts_hi;
1814 u32 read_rsp_pkts_lo;
1815 u32 read_rsp_pkts_hi;
1816 u32 ack_pkts_lo;
1817 u32 ack_pkts_hi;
1818 u32 send_bytes_lo;
1819 u32 send_bytes_hi;
1820 u32 write_bytes_lo;
1821 u32 write_bytes_hi;
1822 u32 read_req_bytes_lo;
1823 u32 read_req_bytes_hi;
1824 u32 read_rsp_bytes_lo;
1825 u32 read_rsp_bytes_hi;
1826 u32 ack_timeouts;
1827 u32 rsvd[5];
1828};
1829
1830
1831struct ocrdma_tx_qp_err_stats {
1832 u32 local_length_errors;
1833 u32 local_protection_errors;
1834 u32 local_qp_operation_errors;
1835 u32 retry_count_exceeded_errors;
1836 u32 rnr_retry_count_exceeded_errors;
1837 u32 rsvd[3];
1838};
1839
1840struct ocrdma_rx_stats {
1841 u32 roce_frame_bytes_lo;
1842 u32 roce_frame_bytes_hi;
1843 u32 roce_frame_icrc_drops;
1844 u32 roce_frame_payload_len_drops;
1845 u32 ud_drops;
1846 u32 qp1_drops;
1847 u32 psn_error_request_packets;
1848 u32 psn_error_resp_packets;
1849 u32 rnr_nak_timeouts;
1850 u32 rnr_nak_receives;
1851 u32 roce_frame_rxmt_drops;
1852 u32 nak_count_psn_sequence_errors;
1853 u32 rc_drop_count_lookup_errors;
1854 u32 rq_rnr_naks;
1855 u32 srq_rnr_naks;
1856 u32 roce_frames_lo;
1857 u32 roce_frames_hi;
1858 u32 rsvd;
1859};
1860
1861struct ocrdma_rx_qp_err_stats {
1862 u32 nak_invalid_requst_errors;
1863 u32 nak_remote_operation_errors;
1864 u32 nak_count_remote_access_errors;
1865 u32 local_length_errors;
1866 u32 local_protection_errors;
1867 u32 local_qp_operation_errors;
1868 u32 rsvd[2];
1869};
1870
1871struct ocrdma_tx_dbg_stats {
1872 u32 data[100];
1873};
1874
1875struct ocrdma_rx_dbg_stats {
1876 u32 data[200];
1877};
1878
1879struct ocrdma_rdma_stats_req {
1880 struct ocrdma_mbx_hdr hdr;
1881 u8 reset_stats;
1882 u8 rsvd[3];
1883} __packed;
1884
1885struct ocrdma_rdma_stats_resp {
1886 struct ocrdma_mbx_hdr hdr;
1887 struct ocrdma_rsrc_stats act_rsrc_stats;
1888 struct ocrdma_rsrc_stats th_rsrc_stats;
1889 struct ocrdma_db_err_stats db_err_stats;
1890 struct ocrdma_wqe_stats wqe_stats;
1891 struct ocrdma_tx_stats tx_stats;
1892 struct ocrdma_tx_qp_err_stats tx_qp_err_stats;
1893 struct ocrdma_rx_stats rx_stats;
1894 struct ocrdma_rx_qp_err_stats rx_qp_err_stats;
1895 struct ocrdma_tx_dbg_stats tx_dbg_stats;
1896 struct ocrdma_rx_dbg_stats rx_dbg_stats;
1897} __packed;
1898
1899
1900struct mgmt_hba_attribs {
1901 u8 flashrom_version_string[32];
1902 u8 manufacturer_name[32];
1903 u32 supported_modes;
1904 u32 rsvd0[3];
1905 u8 ncsi_ver_string[12];
1906 u32 default_extended_timeout;
1907 u8 controller_model_number[32];
1908 u8 controller_description[64];
1909 u8 controller_serial_number[32];
1910 u8 ip_version_string[32];
1911 u8 firmware_version_string[32];
1912 u8 bios_version_string[32];
1913 u8 redboot_version_string[32];
1914 u8 driver_version_string[32];
1915 u8 fw_on_flash_version_string[32];
1916 u32 functionalities_supported;
1917 u16 max_cdblength;
1918 u8 asic_revision;
1919 u8 generational_guid[16];
1920 u8 hba_port_count;
1921 u16 default_link_down_timeout;
1922 u8 iscsi_ver_min_max;
1923 u8 multifunction_device;
1924 u8 cache_valid;
1925 u8 hba_status;
1926 u8 max_domains_supported;
1927 u8 phy_port;
1928 u32 firmware_post_status;
1929 u32 hba_mtu[8];
1930 u32 rsvd1[4];
1931};
1932
1933struct mgmt_controller_attrib {
1934 struct mgmt_hba_attribs hba_attribs;
1935 u16 pci_vendor_id;
1936 u16 pci_device_id;
1937 u16 pci_sub_vendor_id;
1938 u16 pci_sub_system_id;
1939 u8 pci_bus_number;
1940 u8 pci_device_number;
1941 u8 pci_function_number;
1942 u8 interface_type;
1943 u64 unique_identifier;
1944 u32 rsvd0[5];
1945};
1946
1947struct ocrdma_get_ctrl_attribs_rsp {
1948 struct ocrdma_mbx_hdr hdr;
1949 struct mgmt_controller_attrib ctrl_attribs;
1950};
1951
1952
1722#endif /* __OCRDMA_SLI_H__ */ 1953#endif /* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
new file mode 100644
index 000000000000..6c54106f5e64
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -0,0 +1,623 @@
1/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <rdma/ib_addr.h>
29#include "ocrdma_stats.h"
30
31static struct dentry *ocrdma_dbgfs_dir;
32
33static int ocrdma_add_stat(char *start, char *pcur,
34 char *name, u64 count)
35{
36 char buff[128] = {0};
37 int cpy_len = 0;
38
39 snprintf(buff, 128, "%s: %llu\n", name, count);
40 cpy_len = strlen(buff);
41
42 if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
43 pr_err("%s: No space in stats buff\n", __func__);
44 return 0;
45 }
46
47 memcpy(pcur, buff, cpy_len);
48 return cpy_len;
49}
50
51static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
52{
53 struct stats_mem *mem = &dev->stats_mem;
54
55 /* Alloc mbox command mem*/
56 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
57 sizeof(struct ocrdma_rdma_stats_resp));
58
59 mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
60 &mem->pa, GFP_KERNEL);
61 if (!mem->va) {
62 pr_err("%s: stats mbox allocation failed\n", __func__);
63 return false;
64 }
65
66 memset(mem->va, 0, mem->size);
67
68 /* Alloc debugfs mem */
69 mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
70 if (!mem->debugfs_mem) {
71 pr_err("%s: stats debugfs mem allocation failed\n", __func__);
72 return false;
73 }
74
75 return true;
76}
77
78static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
79{
80 struct stats_mem *mem = &dev->stats_mem;
81
82 if (mem->va)
83 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
84 mem->va, mem->pa);
85 kfree(mem->debugfs_mem);
86}
87
88static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
89{
90 char *stats = dev->stats_mem.debugfs_mem, *pcur;
91 struct ocrdma_rdma_stats_resp *rdma_stats =
92 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
93 struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
94
95 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
96
97 pcur = stats;
98 pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
99 (u64)rsrc_stats->dpp_pds);
100 pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
101 (u64)rsrc_stats->non_dpp_pds);
102 pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
103 (u64)rsrc_stats->rc_dpp_qps);
104 pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
105 (u64)rsrc_stats->uc_dpp_qps);
106 pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
107 (u64)rsrc_stats->ud_dpp_qps);
108 pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
109 (u64)rsrc_stats->rc_non_dpp_qps);
110 pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
111 (u64)rsrc_stats->uc_non_dpp_qps);
112 pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
113 (u64)rsrc_stats->ud_non_dpp_qps);
114 pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
115 (u64)rsrc_stats->srqs);
116 pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
117 (u64)rsrc_stats->rbqs);
118 pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
119 (u64)rsrc_stats->r64K_nsmr);
120 pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
121 (u64)rsrc_stats->r64K_to_2M_nsmr);
122 pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
123 (u64)rsrc_stats->r2M_to_44M_nsmr);
124 pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
125 (u64)rsrc_stats->r44M_to_1G_nsmr);
126 pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
127 (u64)rsrc_stats->r1G_to_4G_nsmr);
128 pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
129 (u64)rsrc_stats->nsmr_count_4G_to_32G);
130 pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
131 (u64)rsrc_stats->r32G_to_64G_nsmr);
132 pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
133 (u64)rsrc_stats->r64G_to_128G_nsmr);
134 pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
135 (u64)rsrc_stats->r128G_to_higher_nsmr);
136 pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
137 (u64)rsrc_stats->embedded_nsmr);
138 pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
139 (u64)rsrc_stats->frmr);
140 pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
141 (u64)rsrc_stats->prefetch_qps);
142 pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
143 (u64)rsrc_stats->ondemand_qps);
144 pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
145 (u64)rsrc_stats->phy_mr);
146 pcur += ocrdma_add_stat(stats, pcur, "active_mw",
147 (u64)rsrc_stats->mw);
148
149 /* Print the threshold stats */
150 rsrc_stats = &rdma_stats->th_rsrc_stats;
151
152 pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
153 (u64)rsrc_stats->dpp_pds);
154 pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
155 (u64)rsrc_stats->non_dpp_pds);
156 pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
157 (u64)rsrc_stats->rc_dpp_qps);
158 pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
159 (u64)rsrc_stats->uc_dpp_qps);
160 pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
161 (u64)rsrc_stats->ud_dpp_qps);
162 pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
163 (u64)rsrc_stats->rc_non_dpp_qps);
164 pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
165 (u64)rsrc_stats->uc_non_dpp_qps);
166 pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
167 (u64)rsrc_stats->ud_non_dpp_qps);
168 pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
169 (u64)rsrc_stats->srqs);
170 pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
171 (u64)rsrc_stats->rbqs);
172 pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
173 (u64)rsrc_stats->r64K_nsmr);
174 pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
175 (u64)rsrc_stats->r64K_to_2M_nsmr);
176 pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
177 (u64)rsrc_stats->r2M_to_44M_nsmr);
178 pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
179 (u64)rsrc_stats->r44M_to_1G_nsmr);
180 pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
181 (u64)rsrc_stats->r1G_to_4G_nsmr);
182 pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
183 (u64)rsrc_stats->nsmr_count_4G_to_32G);
184 pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
185 (u64)rsrc_stats->r32G_to_64G_nsmr);
186 pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
187 (u64)rsrc_stats->r64G_to_128G_nsmr);
188 pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
189 (u64)rsrc_stats->r128G_to_higher_nsmr);
190 pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
191 (u64)rsrc_stats->embedded_nsmr);
192 pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
193 (u64)rsrc_stats->frmr);
194 pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
195 (u64)rsrc_stats->prefetch_qps);
196 pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
197 (u64)rsrc_stats->ondemand_qps);
198 pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
199 (u64)rsrc_stats->phy_mr);
200 pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
201 (u64)rsrc_stats->mw);
202 return stats;
203}
204
205static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
206{
207 char *stats = dev->stats_mem.debugfs_mem, *pcur;
208 struct ocrdma_rdma_stats_resp *rdma_stats =
209 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
210 struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
211
212 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
213
214 pcur = stats;
215 pcur += ocrdma_add_stat
216 (stats, pcur, "roce_frame_bytes",
217 convert_to_64bit(rx_stats->roce_frame_bytes_lo,
218 rx_stats->roce_frame_bytes_hi));
219 pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
220 (u64)rx_stats->roce_frame_icrc_drops);
221 pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
222 (u64)rx_stats->roce_frame_payload_len_drops);
223 pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
224 (u64)rx_stats->ud_drops);
225 pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
226 (u64)rx_stats->qp1_drops);
227 pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
228 (u64)rx_stats->psn_error_request_packets);
229 pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
230 (u64)rx_stats->psn_error_resp_packets);
231 pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
232 (u64)rx_stats->rnr_nak_timeouts);
233 pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
234 (u64)rx_stats->rnr_nak_receives);
235 pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
236 (u64)rx_stats->roce_frame_rxmt_drops);
237 pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
238 (u64)rx_stats->nak_count_psn_sequence_errors);
239 pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
240 (u64)rx_stats->rc_drop_count_lookup_errors);
241 pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
242 (u64)rx_stats->rq_rnr_naks);
243 pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
244 (u64)rx_stats->srq_rnr_naks);
245 pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
246 convert_to_64bit(rx_stats->roce_frames_lo,
247 rx_stats->roce_frames_hi));
248
249 return stats;
250}
251
252static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
253{
254 char *stats = dev->stats_mem.debugfs_mem, *pcur;
255 struct ocrdma_rdma_stats_resp *rdma_stats =
256 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
257 struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
258
259 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
260
261 pcur = stats;
262 pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
263 convert_to_64bit(tx_stats->send_pkts_lo,
264 tx_stats->send_pkts_hi));
265 pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
266 convert_to_64bit(tx_stats->write_pkts_lo,
267 tx_stats->write_pkts_hi));
268 pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
269 convert_to_64bit(tx_stats->read_pkts_lo,
270 tx_stats->read_pkts_hi));
271 pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
272 convert_to_64bit(tx_stats->read_rsp_pkts_lo,
273 tx_stats->read_rsp_pkts_hi));
274 pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
275 convert_to_64bit(tx_stats->ack_pkts_lo,
276 tx_stats->ack_pkts_hi));
277 pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
278 convert_to_64bit(tx_stats->send_bytes_lo,
279 tx_stats->send_bytes_hi));
280 pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
281 convert_to_64bit(tx_stats->write_bytes_lo,
282 tx_stats->write_bytes_hi));
283 pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
284 convert_to_64bit(tx_stats->read_req_bytes_lo,
285 tx_stats->read_req_bytes_hi));
286 pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
287 convert_to_64bit(tx_stats->read_rsp_bytes_lo,
288 tx_stats->read_rsp_bytes_hi));
289 pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
290 (u64)tx_stats->ack_timeouts);
291
292 return stats;
293}
294
295static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
296{
297 char *stats = dev->stats_mem.debugfs_mem, *pcur;
298 struct ocrdma_rdma_stats_resp *rdma_stats =
299 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
300 struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats;
301
302 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
303
304 pcur = stats;
305 pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
306 convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
307 wqe_stats->large_send_rc_wqes_hi));
308 pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
309 convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
310 wqe_stats->large_write_rc_wqes_hi));
311 pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
312 convert_to_64bit(wqe_stats->read_wqes_lo,
313 wqe_stats->read_wqes_hi));
314 pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
315 convert_to_64bit(wqe_stats->frmr_wqes_lo,
316 wqe_stats->frmr_wqes_hi));
317 pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
318 convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
319 wqe_stats->mw_bind_wqes_hi));
320 pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
321 convert_to_64bit(wqe_stats->invalidate_wqes_lo,
322 wqe_stats->invalidate_wqes_hi));
323 pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
324 (u64)wqe_stats->dpp_wqe_drops);
325 return stats;
326}
327
328static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
329{
330 char *stats = dev->stats_mem.debugfs_mem, *pcur;
331 struct ocrdma_rdma_stats_resp *rdma_stats =
332 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
333 struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
334
335 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
336
337 pcur = stats;
338 pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
339 (u64)db_err_stats->sq_doorbell_errors);
340 pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
341 (u64)db_err_stats->cq_doorbell_errors);
342 pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
343 (u64)db_err_stats->rq_srq_doorbell_errors);
344 pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
345 (u64)db_err_stats->cq_overflow_errors);
346 return stats;
347}
348
349static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
350{
351 char *stats = dev->stats_mem.debugfs_mem, *pcur;
352 struct ocrdma_rdma_stats_resp *rdma_stats =
353 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
354 struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
355 &rdma_stats->rx_qp_err_stats;
356
357 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
358
359 pcur = stats;
360 pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
361 (u64)rx_qp_err_stats->nak_invalid_requst_errors);
362 pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
363 (u64)rx_qp_err_stats->nak_remote_operation_errors);
364 pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
365 (u64)rx_qp_err_stats->nak_count_remote_access_errors);
366 pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
367 (u64)rx_qp_err_stats->local_length_errors);
368 pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
369 (u64)rx_qp_err_stats->local_protection_errors);
370 pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
371 (u64)rx_qp_err_stats->local_qp_operation_errors);
372 return stats;
373}
374
375static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
376{
377 char *stats = dev->stats_mem.debugfs_mem, *pcur;
378 struct ocrdma_rdma_stats_resp *rdma_stats =
379 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
380 struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
381 &rdma_stats->tx_qp_err_stats;
382
383 memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
384
385 pcur = stats;
386 pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
387 (u64)tx_qp_err_stats->local_length_errors);
388 pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
389 (u64)tx_qp_err_stats->local_protection_errors);
390 pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
391 (u64)tx_qp_err_stats->local_qp_operation_errors);
392 pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
393 (u64)tx_qp_err_stats->retry_count_exceeded_errors);
394 pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
395 (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
396 return stats;
397}
398
399static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
400{
401 int i;
402 char *pstats = dev->stats_mem.debugfs_mem;
403 struct ocrdma_rdma_stats_resp *rdma_stats =
404 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
405 struct ocrdma_tx_dbg_stats *tx_dbg_stats =
406 &rdma_stats->tx_dbg_stats;
407
408 memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
409
410 for (i = 0; i < 100; i++)
411 pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
412 tx_dbg_stats->data[i]);
413
414 return dev->stats_mem.debugfs_mem;
415}
416
417static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
418{
419 int i;
420 char *pstats = dev->stats_mem.debugfs_mem;
421 struct ocrdma_rdma_stats_resp *rdma_stats =
422 (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
423 struct ocrdma_rx_dbg_stats *rx_dbg_stats =
424 &rdma_stats->rx_dbg_stats;
425
426 memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
427
428 for (i = 0; i < 200; i++)
429 pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
430 rx_dbg_stats->data[i]);
431
432 return dev->stats_mem.debugfs_mem;
433}
434
435static void ocrdma_update_stats(struct ocrdma_dev *dev)
436{
437 ulong now = jiffies, secs;
438 int status = 0;
439
440 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
441 if (secs) {
442 /* update */
443 status = ocrdma_mbx_rdma_stats(dev, false);
444 if (status)
445 pr_err("%s: stats mbox failed with status = %d\n",
446 __func__, status);
447 dev->last_stats_time = jiffies;
448 }
449}
450
451static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
452 size_t usr_buf_len, loff_t *ppos)
453{
454 struct ocrdma_stats *pstats = filp->private_data;
455 struct ocrdma_dev *dev = pstats->dev;
456 ssize_t status = 0;
457 char *data = NULL;
458
459 /* No partial reads */
460 if (*ppos != 0)
461 return 0;
462
463 mutex_lock(&dev->stats_lock);
464
465 ocrdma_update_stats(dev);
466
467 switch (pstats->type) {
468 case OCRDMA_RSRC_STATS:
469 data = ocrdma_resource_stats(dev);
470 break;
471 case OCRDMA_RXSTATS:
472 data = ocrdma_rx_stats(dev);
473 break;
474 case OCRDMA_WQESTATS:
475 data = ocrdma_wqe_stats(dev);
476 break;
477 case OCRDMA_TXSTATS:
478 data = ocrdma_tx_stats(dev);
479 break;
480 case OCRDMA_DB_ERRSTATS:
481 data = ocrdma_db_errstats(dev);
482 break;
483 case OCRDMA_RXQP_ERRSTATS:
484 data = ocrdma_rxqp_errstats(dev);
485 break;
486 case OCRDMA_TXQP_ERRSTATS:
487 data = ocrdma_txqp_errstats(dev);
488 break;
489 case OCRDMA_TX_DBG_STATS:
490 data = ocrdma_tx_dbg_stats(dev);
491 break;
492 case OCRDMA_RX_DBG_STATS:
493 data = ocrdma_rx_dbg_stats(dev);
494 break;
495
496 default:
497 status = -EFAULT;
498 goto exit;
499 }
500
501 if (usr_buf_len < strlen(data)) {
502 status = -ENOSPC;
503 goto exit;
504 }
505
506 status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
507 strlen(data));
508exit:
509 mutex_unlock(&dev->stats_lock);
510 return status;
511}
512
513static int ocrdma_debugfs_open(struct inode *inode, struct file *file)
514{
515 if (inode->i_private)
516 file->private_data = inode->i_private;
517 return 0;
518}
519
520static const struct file_operations ocrdma_dbg_ops = {
521 .owner = THIS_MODULE,
522 .open = ocrdma_debugfs_open,
523 .read = ocrdma_dbgfs_ops_read,
524};
525
526void ocrdma_add_port_stats(struct ocrdma_dev *dev)
527{
528 if (!ocrdma_dbgfs_dir)
529 return;
530
531 /* Create post stats base dir */
532 dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
533 if (!dev->dir)
534 goto err;
535
536 dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
537 dev->rsrc_stats.dev = dev;
538 if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
539 &dev->rsrc_stats, &ocrdma_dbg_ops))
540 goto err;
541
542 dev->rx_stats.type = OCRDMA_RXSTATS;
543 dev->rx_stats.dev = dev;
544 if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
545 &dev->rx_stats, &ocrdma_dbg_ops))
546 goto err;
547
548 dev->wqe_stats.type = OCRDMA_WQESTATS;
549 dev->wqe_stats.dev = dev;
550 if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
551 &dev->wqe_stats, &ocrdma_dbg_ops))
552 goto err;
553
554 dev->tx_stats.type = OCRDMA_TXSTATS;
555 dev->tx_stats.dev = dev;
556 if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
557 &dev->tx_stats, &ocrdma_dbg_ops))
558 goto err;
559
560 dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
561 dev->db_err_stats.dev = dev;
562 if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
563 &dev->db_err_stats, &ocrdma_dbg_ops))
564 goto err;
565
566
567 dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
568 dev->tx_qp_err_stats.dev = dev;
569 if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
570 &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
571 goto err;
572
573 dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
574 dev->rx_qp_err_stats.dev = dev;
575 if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
576 &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
577 goto err;
578
579
580 dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
581 dev->tx_dbg_stats.dev = dev;
582 if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
583 &dev->tx_dbg_stats, &ocrdma_dbg_ops))
584 goto err;
585
586 dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
587 dev->rx_dbg_stats.dev = dev;
588 if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
589 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
590 goto err;
591
592 /* Now create dma_mem for stats mbx command */
593 if (!ocrdma_alloc_stats_mem(dev))
594 goto err;
595
596 mutex_init(&dev->stats_lock);
597
598 return;
599err:
600 ocrdma_release_stats_mem(dev);
601 debugfs_remove_recursive(dev->dir);
602 dev->dir = NULL;
603}
604
605void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
606{
607 if (!dev->dir)
608 return;
609 mutex_destroy(&dev->stats_lock);
610 ocrdma_release_stats_mem(dev);
611 debugfs_remove(dev->dir);
612}
613
614void ocrdma_init_debugfs(void)
615{
616 /* Create base dir in debugfs root dir */
617 ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
618}
619
620void ocrdma_rem_debugfs(void)
621{
622 debugfs_remove_recursive(ocrdma_dbgfs_dir);
623}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
new file mode 100644
index 000000000000..5f5e20c46d7c
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -0,0 +1,54 @@
1/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#ifndef __OCRDMA_STATS_H__
29#define __OCRDMA_STATS_H__
30
31#include <linux/debugfs.h>
32#include "ocrdma.h"
33#include "ocrdma_hw.h"
34
35#define OCRDMA_MAX_DBGFS_MEM 4096
36
37enum OCRDMA_STATS_TYPE {
38 OCRDMA_RSRC_STATS,
39 OCRDMA_RXSTATS,
40 OCRDMA_WQESTATS,
41 OCRDMA_TXSTATS,
42 OCRDMA_DB_ERRSTATS,
43 OCRDMA_RXQP_ERRSTATS,
44 OCRDMA_TXQP_ERRSTATS,
45 OCRDMA_TX_DBG_STATS,
46 OCRDMA_RX_DBG_STATS
47};
48
49void ocrdma_rem_debugfs(void);
50void ocrdma_init_debugfs(void);
51void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
52void ocrdma_add_port_stats(struct ocrdma_dev *dev);
53
54#endif /* __OCRDMA_STATS_H__ */