aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/driver.h8
15 files changed, 19 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 3b4dc858cef9..e4056279166d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
348static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 348static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
349 u16 tail, u16 head) 349 u16 tail, u16 head)
350{ 350{
351 int idx; 351 u16 idx;
352 352
353 do { 353 do {
354 idx = tail & (qp->sq.wqe_cnt - 1); 354 idx = tail & (qp->sq.wqe_cnt - 1);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index e259e7393152..b514bbb5610f 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -41,7 +41,7 @@ enum {
41}; 41};
42 42
43int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 43int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
44 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 44 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
45 void *in_mad, void *response_mad) 45 void *in_mad, void *response_mad)
46{ 46{
47 u8 op_modifier = 0; 47 u8 op_modifier = 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index f2cfd363a705..166335a95c59 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -478,7 +478,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
478 int uuarn; 478 int uuarn;
479 int err; 479 int err;
480 int i; 480 int i;
481 int reqlen; 481 size_t reqlen;
482 482
483 if (!dev->ib_active) 483 if (!dev->ib_active)
484 return ERR_PTR(-EAGAIN); 484 return ERR_PTR(-EAGAIN);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 8499aec94db6..a3e81444c825 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
148 u64 off_mask; 148 u64 off_mask;
149 u64 buf_off; 149 u64 buf_off;
150 150
151 page_size = 1 << page_shift; 151 page_size = (u64)1 << page_shift;
152 page_mask = page_size - 1; 152 page_mask = page_size - 1;
153 buf_off = addr & page_mask; 153 buf_off = addr & page_mask;
154 off_size = page_size >> 6; 154 off_size = page_size >> 6;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a0e204ffe367..386780f0d1e1 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -461,7 +461,7 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
461void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 461void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
462void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 462void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
463int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 463int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
464 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 464 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
465 void *in_mad, void *response_mad); 465 void *in_mad, void *response_mad);
466struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, 466struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
467 struct mlx5_ib_ah *ah); 467 struct mlx5_ib_ah *ah);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index b8bb6ad6350c..7efe6e3f3542 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2539,7 +2539,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2539 case IB_WR_RDMA_WRITE_WITH_IMM: 2539 case IB_WR_RDMA_WRITE_WITH_IMM:
2540 set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2540 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2541 wr->wr.rdma.rkey); 2541 wr->wr.rdma.rkey);
2542 seg += sizeof(struct mlx5_wqe_raddr_seg); 2542 seg += sizeof(struct mlx5_wqe_raddr_seg);
2543 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2543 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2544 break; 2544 break;
2545 2545
@@ -2668,7 +2668,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2668 case IB_QPT_SMI: 2668 case IB_QPT_SMI:
2669 case IB_QPT_GSI: 2669 case IB_QPT_GSI:
2670 set_datagram_seg(seg, wr); 2670 set_datagram_seg(seg, wr);
2671 seg += sizeof(struct mlx5_wqe_datagram_seg); 2671 seg += sizeof(struct mlx5_wqe_datagram_seg);
2672 size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 2672 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2673 if (unlikely((seg == qend))) 2673 if (unlikely((seg == qend)))
2674 seg = mlx5_get_send_wqe(qp, 0); 2674 seg = mlx5_get_send_wqe(qp, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index b215742b842f..56779c1c7811 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -56,7 +56,7 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
56 if (size <= max_direct) { 56 if (size <= max_direct) {
57 buf->nbufs = 1; 57 buf->nbufs = 1;
58 buf->npages = 1; 58 buf->npages = 1;
59 buf->page_shift = get_order(size) + PAGE_SHIFT; 59 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
60 buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, 60 buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
61 size, &t, GFP_KERNEL); 61 size, &t, GFP_KERNEL);
62 if (!buf->direct.buf) 62 if (!buf->direct.buf)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 87d1b018a9c3..4671747dd365 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -464,7 +464,7 @@ static void dump_command(struct mlx5_core_dev *dev,
464 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 464 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
465 struct mlx5_cmd_mailbox *next = msg->next; 465 struct mlx5_cmd_mailbox *next = msg->next;
466 int data_only; 466 int data_only;
467 int offset = 0; 467 u32 offset = 0;
468 int dump_len; 468 int dump_len;
469 469
470 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 470 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 7f39ebcd6ad0..67cead2c079e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -252,7 +252,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
252 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 252 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
253 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 253 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
254 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 254 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
255 dev->event(dev, port_subtype_event(eqe->sub_type), &port); 255 if (dev->event)
256 dev->event(dev, port_subtype_event(eqe->sub_type), &port);
256 break; 257 break;
257 default: 258 default:
258 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", 259 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index 18d6fd5dd90b..fd80ecfa7195 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -37,7 +37,7 @@
37#include "mlx5_core.h" 37#include "mlx5_core.h"
38 38
39int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, 39int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
40 u16 opmod, int port) 40 u16 opmod, u8 port)
41{ 41{
42 struct mlx5_mad_ifc_mbox_in *in = NULL; 42 struct mlx5_mad_ifc_mbox_in *in = NULL;
43 struct mlx5_mad_ifc_mbox_out *out = NULL; 43 struct mlx5_mad_ifc_mbox_out *out = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4b7f9da4bf11..fd782bf49dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -311,7 +311,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
311 311
312 copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); 312 copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
313 313
314 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 314 if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
315 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 315 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
316 316
317 flags = be64_to_cpu(query_out->hca_cap.flags); 317 flags = be64_to_cpu(query_out->hca_cap.flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index c2a953ef0e67..d476918ef269 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -51,7 +51,7 @@ enum {
51 51
52struct mlx5_pages_req { 52struct mlx5_pages_req {
53 struct mlx5_core_dev *dev; 53 struct mlx5_core_dev *dev;
54 u32 func_id; 54 u16 func_id;
55 s32 npages; 55 s32 npages;
56 struct work_struct work; 56 struct work_struct work;
57}; 57};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 8c9ac870ecb1..313965853e10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -86,7 +86,7 @@ struct mlx5_reg_pcap {
86 __be32 caps_31_0; 86 __be32 caps_31_0;
87}; 87};
88 88
89int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) 89int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
90{ 90{
91 struct mlx5_reg_pcap in; 91 struct mlx5_reg_pcap in;
92 struct mlx5_reg_pcap out; 92 struct mlx5_reg_pcap out;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 3406cfb1267a..334947151dfc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -456,9 +456,6 @@ struct mlx5_eqe_cq_err {
456 u8 syndrome; 456 u8 syndrome;
457}; 457};
458 458
459struct mlx5_eqe_dropped_packet {
460};
461
462struct mlx5_eqe_port_state { 459struct mlx5_eqe_port_state {
463 u8 reserved0[8]; 460 u8 reserved0[8];
464 u8 port; 461 u8 port;
@@ -498,7 +495,6 @@ union ev_data {
498 struct mlx5_eqe_comp comp; 495 struct mlx5_eqe_comp comp;
499 struct mlx5_eqe_qp_srq qp_srq; 496 struct mlx5_eqe_qp_srq qp_srq;
500 struct mlx5_eqe_cq_err cq_err; 497 struct mlx5_eqe_cq_err cq_err;
501 struct mlx5_eqe_dropped_packet dp;
502 struct mlx5_eqe_port_state port; 498 struct mlx5_eqe_port_state port;
503 struct mlx5_eqe_gpio gpio; 499 struct mlx5_eqe_gpio gpio;
504 struct mlx5_eqe_congestion cong; 500 struct mlx5_eqe_congestion cong;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d0cb5984a45f..76de0cc41640 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -381,8 +381,8 @@ struct mlx5_buf {
381 struct mlx5_buf_list *page_list; 381 struct mlx5_buf_list *page_list;
382 int nbufs; 382 int nbufs;
383 int npages; 383 int npages;
384 int page_shift;
385 int size; 384 int size;
385 u8 page_shift;
386}; 386};
387 387
388struct mlx5_eq { 388struct mlx5_eq {
@@ -736,7 +736,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
736int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 736int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
737int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 737int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
738int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, 738int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
739 u16 opmod, int port); 739 u16 opmod, u8 port);
740void mlx5_pagealloc_init(struct mlx5_core_dev *dev); 740void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
741void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 741void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
742int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 742int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
@@ -769,7 +769,7 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
769int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, 769int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
770 int size_in, void *data_out, int size_out, 770 int size_in, void *data_out, int size_out,
771 u16 reg_num, int arg, int write); 771 u16 reg_num, int arg, int write);
772int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); 772int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
773 773
774int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 774int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
775void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 775void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -826,7 +826,7 @@ void mlx5_unregister_interface(struct mlx5_interface *intf);
826 826
827struct mlx5_profile { 827struct mlx5_profile {
828 u64 mask; 828 u64 mask;
829 u32 log_max_qp; 829 u8 log_max_qp;
830 struct { 830 struct {
831 int size; 831 int size;
832 int limit; 832 int limit;