aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVipul Pandya <vipul@chelsio.com>2013-01-07 08:12:00 -0500
committerRoland Dreier <roland@purestorage.com>2013-02-14 18:51:58 -0500
commitef5d6355ed4bcf574e8473c3ce667cbf6c66a0ee (patch)
tree194ade849c55b53f8189f7a46bdd3697ffdbdb0a
parentb3de6cfebc6167761c40947f05f4c817531f37d5 (diff)
RDMA/cxgb4: Address sparse warnings
Fixe the following types of sparse warnings - cast to pointer from integer of different size - cast from pointer to integer of different size - incorrect type in assignment (different base types) - incorrect type in argument 1 (different base types) - cast from restricted __be64 - cast from restricted __be32 Signed-off-by: Vipul Pandya <vipul@chelsio.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c3
2 files changed, 40 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 37ea2fcf3b10..dfca515cc933 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1492,11 +1492,11 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1492 V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 1492 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1493 req->tcb.cplrxdataack_cplpassacceptrpl = 1493 req->tcb.cplrxdataack_cplpassacceptrpl =
1494 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1494 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1495 req->tcb.tx_max = jiffies; 1495 req->tcb.tx_max = (__force __be32) jiffies;
1496 req->tcb.rcv_adv = htons(1); 1496 req->tcb.rcv_adv = htons(1);
1497 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1497 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1498 wscale = compute_wscale(rcv_win); 1498 wscale = compute_wscale(rcv_win);
1499 req->tcb.opt0 = TCAM_BYPASS(1) | 1499 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1500 (nocong ? NO_CONG(1) : 0) | 1500 (nocong ? NO_CONG(1) : 0) |
1501 KEEP_ALIVE(1) | 1501 KEEP_ALIVE(1) |
1502 DELACK(1) | 1502 DELACK(1) |
@@ -1507,20 +1507,20 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1507 SMAC_SEL(ep->smac_idx) | 1507 SMAC_SEL(ep->smac_idx) |
1508 DSCP(ep->tos) | 1508 DSCP(ep->tos) |
1509 ULP_MODE(ULP_MODE_TCPDDP) | 1509 ULP_MODE(ULP_MODE_TCPDDP) |
1510 RCV_BUFSIZ(rcv_win >> 10); 1510 RCV_BUFSIZ(rcv_win >> 10));
1511 req->tcb.opt2 = PACE(1) | 1511 req->tcb.opt2 = (__force __be32) (PACE(1) |
1512 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1512 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1513 RX_CHANNEL(0) | 1513 RX_CHANNEL(0) |
1514 CCTRL_ECN(enable_ecn) | 1514 CCTRL_ECN(enable_ecn) |
1515 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1515 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
1516 if (enable_tcp_timestamps) 1516 if (enable_tcp_timestamps)
1517 req->tcb.opt2 |= TSTAMPS_EN(1); 1517 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
1518 if (enable_tcp_sack) 1518 if (enable_tcp_sack)
1519 req->tcb.opt2 |= SACK_EN(1); 1519 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
1520 if (wscale && enable_tcp_window_scaling) 1520 if (wscale && enable_tcp_window_scaling)
1521 req->tcb.opt2 |= WND_SCALE_EN(1); 1521 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1522 req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); 1522 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1523 req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); 1523 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
1524 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1524 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1525 set_bit(ACT_OFLD_CONN, &ep->com.history); 1525 set_bit(ACT_OFLD_CONN, &ep->com.history);
1526 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1526 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2773,7 +2773,8 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2773 struct c4iw_ep *ep; 2773 struct c4iw_ep *ep;
2774 int atid = be32_to_cpu(req->tid); 2774 int atid = be32_to_cpu(req->tid);
2775 2775
2776 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); 2776 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
2777 (__force u32) req->tid);
2777 if (!ep) 2778 if (!ep)
2778 return; 2779 return;
2779 2780
@@ -2817,7 +2818,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2817 struct cpl_pass_accept_req *cpl; 2818 struct cpl_pass_accept_req *cpl;
2818 int ret; 2819 int ret;
2819 2820
2820 rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie); 2821 rpl_skb = (__force struct sk_buff *)cpu_to_be64(req->cookie);
2821 BUG_ON(!rpl_skb); 2822 BUG_ON(!rpl_skb);
2822 if (req->retval) { 2823 if (req->retval) {
2823 PDBG("%s passive open failure %d\n", __func__, req->retval); 2824 PDBG("%s passive open failure %d\n", __func__, req->retval);
@@ -2828,7 +2829,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2828 } else { 2829 } else {
2829 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 2830 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
2830 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 2831 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
2831 htonl(req->tid))); 2832 (__force u32) htonl(
2833 (__force u32) req->tid)));
2832 ret = pass_accept_req(dev, rpl_skb); 2834 ret = pass_accept_req(dev, rpl_skb);
2833 if (!ret) 2835 if (!ret)
2834 kfree_skb(rpl_skb); 2836 kfree_skb(rpl_skb);
@@ -2874,10 +2876,10 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2874 struct tcp_options_received tmp_opt; 2876 struct tcp_options_received tmp_opt;
2875 2877
2876 /* Store values from cpl_rx_pkt in temporary location. */ 2878 /* Store values from cpl_rx_pkt in temporary location. */
2877 vlantag = cpl->vlan; 2879 vlantag = (__force u16) cpl->vlan;
2878 len = cpl->len; 2880 len = (__force u16) cpl->len;
2879 l2info = cpl->l2info; 2881 l2info = (__force u32) cpl->l2info;
2880 hdr_len = cpl->hdr_len; 2882 hdr_len = (__force u16) cpl->hdr_len;
2881 intf = cpl->iff; 2883 intf = cpl->iff;
2882 2884
2883 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 2885 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
@@ -2888,19 +2890,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2888 */ 2890 */
2889 memset(&tmp_opt, 0, sizeof(tmp_opt)); 2891 memset(&tmp_opt, 0, sizeof(tmp_opt));
2890 tcp_clear_options(&tmp_opt); 2892 tcp_clear_options(&tmp_opt);
2891 tcp_parse_options(skb, &tmp_opt, 0, 0, NULL); 2893 tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
2892 2894
2893 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 2895 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2894 memset(req, 0, sizeof(*req)); 2896 memset(req, 0, sizeof(*req));
2895 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 2897 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
2896 V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) | 2898 V_SYN_MAC_IDX(G_RX_MACIDX(
2899 (__force int) htonl(l2info))) |
2897 F_SYN_XACT_MATCH); 2900 F_SYN_XACT_MATCH);
2898 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) | 2901 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2899 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) | 2902 (__force int) htonl(l2info))) |
2900 V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) | 2903 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2901 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info)))); 2904 (__force int) htons(hdr_len))) |
2902 req->vlan = vlantag; 2905 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2903 req->len = len; 2906 (__force int) htons(hdr_len))) |
2907 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
2908 (__force int) htonl(l2info))));
2909 req->vlan = (__force __be16) vlantag;
2910 req->len = (__force __be16) len;
2904 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 2911 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
2905 PASS_OPEN_TOS(tos)); 2912 PASS_OPEN_TOS(tos));
2906 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 2913 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
@@ -2929,7 +2936,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
2929 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 2936 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
2930 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 2937 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
2931 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 2938 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
2932 req->le.filter = filter; 2939 req->le.filter = (__force __be32) filter;
2933 req->le.lport = lport; 2940 req->le.lport = lport;
2934 req->le.pport = rport; 2941 req->le.pport = rport;
2935 req->le.u.ipv4.lip = laddr; 2942 req->le.u.ipv4.lip = laddr;
@@ -2955,7 +2962,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
2955 * TP will ignore any value > 0 for MSS index. 2962 * TP will ignore any value > 0 for MSS index.
2956 */ 2963 */
2957 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 2964 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
2958 req->cookie = cpu_to_be64((u64)skb); 2965 req->cookie = (__force __u64) cpu_to_be64((u64)skb);
2959 2966
2960 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 2967 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
2961 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 2968 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3005,7 +3012,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3005 /* 3012 /*
3006 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3013 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3007 */ 3014 */
3008 stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base 3015 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
3016 - dev->rdev.lldi.tids->sftid_base
3009 + dev->rdev.lldi.tids->nstids; 3017 + dev->rdev.lldi.tids->nstids;
3010 3018
3011 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3019 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
@@ -3066,10 +3074,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3066 3074
3067 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3075 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3068 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3076 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3069 window = htons(tcph->window); 3077 window = (__force u16) htons((__force u16)tcph->window);
3070 3078
3071 /* Calcuate filter portion for LE region. */ 3079 /* Calcuate filter portion for LE region. */
3072 filter = cpu_to_be32(select_ntuple(dev, dst, e)); 3080 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
3073 3081
3074 /* 3082 /*
3075 * Synthesize the cpl_pass_accept_req. We have everything except the 3083 * Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ba11c76c0b5a..dc6adb213cd6 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
797 "RSS %#llx, FL %#llx, len %u\n", 797 "RSS %#llx, FL %#llx, len %u\n",
798 pci_name(ctx->lldi.pdev), gl->va, 798 pci_name(ctx->lldi.pdev), gl->va,
799 (unsigned long long)be64_to_cpu(*rsp), 799 (unsigned long long)be64_to_cpu(*rsp),
800 (unsigned long long)be64_to_cpu(*(u64 *)gl->va), 800 (unsigned long long)be64_to_cpu(
801 *(__force __be64 *)gl->va),
801 gl->tot_len); 802 gl->tot_len);
802 803
803 return 0; 804 return 0;