aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2009-01-17 20:11:57 -0500
committerRoland Dreier <rolandd@cisco.com>2009-01-17 20:11:57 -0500
commit9c3da0991754d480328eeaa2b90cb231a1cea9b6 (patch)
tree744f69e5b04af0e950658fb26ad95bd18868c8e8
parentf3b8436ad9a8ad36b3c9fa1fe030c7f38e5d3d0b (diff)
IB: Remove __constant_{endian} uses
The base versions handle constant folding just fine, use them directly. The replacements are OK in the include/ files as they are not exported to userspace so we don't need the __ prefixed versions. This patch does not affect code generation at all. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/core/cm.c15
-rw-r--r--drivers/infiniband/core/cm_msgs.h22
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h10
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--include/rdma/ib_cm.h12
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_smi.h34
18 files changed, 124 insertions, 126 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f1e82a92e61e..5130fc55b8e2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
927 unsigned long flags; 927 unsigned long flags;
928 int ret = 0; 928 int ret = 0;
929 929
930 service_mask = service_mask ? service_mask : 930 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
931 __constant_cpu_to_be64(~0ULL);
932 service_id &= service_mask; 931 service_id &= service_mask;
933 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 932 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
934 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 933 (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
954 spin_lock_irqsave(&cm.lock, flags); 953 spin_lock_irqsave(&cm.lock, flags);
955 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 954 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
956 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 955 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
957 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 956 cm_id->service_mask = ~cpu_to_be64(0);
958 } else { 957 } else {
959 cm_id->service_id = service_id; 958 cm_id->service_id = service_id;
960 cm_id->service_mask = service_mask; 959 cm_id->service_mask = service_mask;
@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1134 goto error1; 1133 goto error1;
1135 } 1134 }
1136 cm_id->service_id = param->service_id; 1135 cm_id->service_id = param->service_id;
1137 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1136 cm_id->service_mask = ~cpu_to_be64(0);
1138 cm_id_priv->timeout_ms = cm_convert_to_ms( 1137 cm_id_priv->timeout_ms = cm_convert_to_ms(
1139 param->primary_path->packet_life_time) * 2 + 1138 param->primary_path->packet_life_time) * 2 +
1140 cm_convert_to_ms( 1139 cm_convert_to_ms(
@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
1545 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1544 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1546 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1545 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1547 cm_id_priv->id.service_id = req_msg->service_id; 1546 cm_id_priv->id.service_id = req_msg->service_id;
1548 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1547 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1549 1548
1550 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1549 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1551 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1550 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2898 goto out; 2897 goto out;
2899 2898
2900 cm_id->service_id = param->service_id; 2899 cm_id->service_id = param->service_id;
2901 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2900 cm_id->service_mask = ~cpu_to_be64(0);
2902 cm_id_priv->timeout_ms = param->timeout_ms; 2901 cm_id_priv->timeout_ms = param->timeout_ms;
2903 cm_id_priv->max_cm_retries = param->max_cm_retries; 2902 cm_id_priv->max_cm_retries = param->max_cm_retries;
2904 ret = cm_alloc_msg(cm_id_priv, &msg); 2903 ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2992 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2991 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2993 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2992 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2994 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2993 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2995 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2994 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2996 2995
2997 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2996 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2998 cm_process_work(cm_id_priv, work); 2997 cm_process_work(cm_id_priv, work);
@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
3789 rwlock_init(&cm.device_lock); 3788 rwlock_init(&cm.device_lock);
3790 spin_lock_init(&cm.lock); 3789 spin_lock_init(&cm.lock);
3791 cm.listen_service_table = RB_ROOT; 3790 cm.listen_service_table = RB_ROOT;
3792 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3791 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3793 cm.remote_id_table = RB_ROOT; 3792 cm.remote_id_table = RB_ROOT;
3794 cm.remote_qp_table = RB_ROOT; 3793 cm.remote_qp_table = RB_ROOT;
3795 cm.remote_sidr_table = RB_ROOT; 3794 cm.remote_sidr_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index aec9c7af825d..7e63c08f697c 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,17 +44,17 @@
44 44
45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
46 46
47#define CM_REQ_ATTR_ID __constant_htons(0x0010) 47#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48#define CM_MRA_ATTR_ID __constant_htons(0x0011) 48#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49#define CM_REJ_ATTR_ID __constant_htons(0x0012) 49#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50#define CM_REP_ATTR_ID __constant_htons(0x0013) 50#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51#define CM_RTU_ATTR_ID __constant_htons(0x0014) 51#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52#define CM_DREQ_ATTR_ID __constant_htons(0x0015) 52#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53#define CM_DREP_ATTR_ID __constant_htons(0x0016) 53#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) 54#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) 55#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56#define CM_LAP_ATTR_ID __constant_htons(0x0019) 56#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57#define CM_APR_ATTR_ID __constant_htons(0x001A) 57#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
58 58
59enum cm_msg_sequence { 59enum cm_msg_sequence {
60 CM_MSG_SEQUENCE_REQ, 60 CM_MSG_SEQUENCE_REQ,
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 3af2b84cd838..57a3c6f947b2 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
735 goto bad; 735 goto bad;
736 } 736 }
737 737
738 if (rmpp_hdr->seg_num == __constant_htonl(1)) { 738 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
741 goto bad; 741 goto bad;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 19661b2f0406..48e2b0bcabd0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
100 plen = 4; 100 plen = 4;
101 wqe->write.sgl[0].stag = wr->ex.imm_data; 101 wqe->write.sgl[0].stag = wr->ex.imm_data;
102 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 102 wqe->write.sgl[0].len = cpu_to_be32(0);
103 wqe->write.num_sgle = __constant_cpu_to_be32(0); 103 wqe->write.num_sgle = cpu_to_be32(0);
104 *flit_cnt = 6; 104 *flit_cnt = 6;
105 } else { 105 } else {
106 plen = 0; 106 plen = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 44447aaa5501..c568b28f4e20 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -46,11 +46,11 @@
46#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
47#include "hcp_if.h" 47#include "hcp_if.h"
48 48
49#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) 49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) 50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) 51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52 52
53#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54 54
55/** 55/**
56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index dc37277f1c80..fc7181985e8e 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
772 "0x%x, not 0x%x\n", csum, ifp->if_csum); 772 "0x%x, not 0x%x\n", csum, ifp->if_csum);
773 goto done; 773 goto done;
774 } 774 }
775 if (*(__be64 *) ifp->if_guid == 0ULL || 775 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
776 *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) { 776 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
777 ipath_dev_err(dd, "Invalid GUID %llx from flash; " 777 ipath_dev_err(dd, "Invalid GUID %llx from flash; "
778 "ignoring\n", 778 "ignoring\n",
779 *(unsigned long long *) ifp->if_guid); 779 *(unsigned long long *) ifp->if_guid);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 17a123197477..16a702d46018 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -37,10 +37,10 @@
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38#include "ipath_common.h" 38#include "ipath_common.h"
39 39
40#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) 40#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
41#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) 41#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
42#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) 42#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
43#define IB_SMP_INVALID_FIELD __constant_htons(0x001C) 43#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
44 44
45static int reply(struct ib_smp *smp) 45static int reply(struct ib_smp *smp)
46{ 46{
@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
789 return recv_subn_get_pkeytable(smp, ibdev); 789 return recv_subn_get_pkeytable(smp, ibdev);
790} 790}
791 791
792#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 792#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
793#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) 793#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
794#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) 794#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
795#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) 795#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
796#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) 796#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
797#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) 797#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
798 798
799struct ib_perf { 799struct ib_perf {
800 u8 base_version; 800 u8 base_version;
@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
884 __be32 port_rcv_packets; 884 __be32 port_rcv_packets;
885} __attribute__ ((packed)); 885} __attribute__ ((packed));
886 886
887#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) 887#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
888#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) 888#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
889#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) 889#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
890#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) 890#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) 891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
892#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) 892#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) 893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) 894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
895#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) 895#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
896#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) 896#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
897#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) 897#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
898#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) 898#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
899#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) 899#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
900 900
901struct ib_pma_portcounters_ext { 901struct ib_pma_portcounters_ext {
902 u8 reserved; 902 u8 reserved;
@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
913 __be64 port_multicast_rcv_packets; 913 __be64 port_multicast_rcv_packets;
914} __attribute__ ((packed)); 914} __attribute__ ((packed));
915 915
916#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) 916#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
917#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) 917#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
918#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) 918#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
919#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) 919#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) 920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) 921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) 922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) 923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
924 924
925static int recv_pma_get_classportinfo(struct ib_perf *pmp) 925static int recv_pma_get_classportinfo(struct ib_perf *pmp)
926{ 926{
@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
933 pmp->status |= IB_SMP_INVALID_FIELD; 933 pmp->status |= IB_SMP_INVALID_FIELD;
934 934
935 /* Indicate AllPortSelect is valid (only one port anyway) */ 935 /* Indicate AllPortSelect is valid (only one port anyway) */
936 p->cap_mask = __constant_cpu_to_be16(1 << 8); 936 p->cap_mask = cpu_to_be16(1 << 8);
937 p->base_version = 1; 937 p->base_version = 1;
938 p->class_version = 1; 938 p->class_version = 1;
939 /* 939 /*
@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
951 * We support 5 counters which only count the mandatory quantities. 951 * We support 5 counters which only count the mandatory quantities.
952 */ 952 */
953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) 953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
954#define COUNTER_MASK0_9 \ 954#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
955 __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ 955 COUNTER_MASK(1, 1) | \
956 COUNTER_MASK(1, 1) | \ 956 COUNTER_MASK(1, 2) | \
957 COUNTER_MASK(1, 2) | \ 957 COUNTER_MASK(1, 3) | \
958 COUNTER_MASK(1, 3) | \ 958 COUNTER_MASK(1, 4))
959 COUNTER_MASK(1, 4))
960 959
961static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, 960static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
962 struct ib_device *ibdev, u8 port) 961 struct ib_device *ibdev, u8 port)
@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1137 status = dev->pma_sample_status; 1136 status = dev->pma_sample_status;
1138 p->sample_status = cpu_to_be16(status); 1137 p->sample_status = cpu_to_be16(status);
1139 /* 64 bits */ 1138 /* 64 bits */
1140 p->extended_width = __constant_cpu_to_be32(0x80000000); 1139 p->extended_width = cpu_to_be32(0x80000000);
1141 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1140 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1142 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : 1141 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1143 cpu_to_be64( 1142 cpu_to_be64(
@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1185 pmp->status |= IB_SMP_INVALID_FIELD; 1184 pmp->status |= IB_SMP_INVALID_FIELD;
1186 1185
1187 if (cntrs.symbol_error_counter > 0xFFFFUL) 1186 if (cntrs.symbol_error_counter > 0xFFFFUL)
1188 p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); 1187 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1189 else 1188 else
1190 p->symbol_error_counter = 1189 p->symbol_error_counter =
1191 cpu_to_be16((u16)cntrs.symbol_error_counter); 1190 cpu_to_be16((u16)cntrs.symbol_error_counter);
@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1199 else 1198 else
1200 p->link_downed_counter = (u8)cntrs.link_downed_counter; 1199 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1201 if (cntrs.port_rcv_errors > 0xFFFFUL) 1200 if (cntrs.port_rcv_errors > 0xFFFFUL)
1202 p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); 1201 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1203 else 1202 else
1204 p->port_rcv_errors = 1203 p->port_rcv_errors =
1205 cpu_to_be16((u16) cntrs.port_rcv_errors); 1204 cpu_to_be16((u16) cntrs.port_rcv_errors);
1206 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1205 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1207 p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); 1206 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1208 else 1207 else
1209 p->port_rcv_remphys_errors = 1208 p->port_rcv_remphys_errors =
1210 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); 1209 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1211 if (cntrs.port_xmit_discards > 0xFFFFUL) 1210 if (cntrs.port_xmit_discards > 0xFFFFUL)
1212 p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); 1211 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1213 else 1212 else
1214 p->port_xmit_discards = 1213 p->port_xmit_discards =
1215 cpu_to_be16((u16)cntrs.port_xmit_discards); 1214 cpu_to_be16((u16)cntrs.port_xmit_discards);
@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1220 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1219 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1221 cntrs.excessive_buffer_overrun_errors; 1220 cntrs.excessive_buffer_overrun_errors;
1222 if (cntrs.vl15_dropped > 0xFFFFUL) 1221 if (cntrs.vl15_dropped > 0xFFFFUL)
1223 p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); 1222 p->vl15_dropped = cpu_to_be16(0xFFFF);
1224 else 1223 else
1225 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1224 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1226 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1225 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1227 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); 1226 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1228 else 1227 else
1229 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); 1228 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1230 if (cntrs.port_rcv_data > 0xFFFFFFFFUL) 1229 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1231 p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); 1230 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1232 else 1231 else
1233 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); 1232 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1234 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) 1233 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1235 p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1234 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1236 else 1235 else
1237 p->port_xmit_packets = 1236 p->port_xmit_packets =
1238 cpu_to_be32((u32)cntrs.port_xmit_packets); 1237 cpu_to_be32((u32)cntrs.port_xmit_packets);
1239 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) 1238 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1240 p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1239 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1241 else 1240 else
1242 p->port_rcv_packets = 1241 p->port_rcv_packets =
1243 cpu_to_be32((u32) cntrs.port_rcv_packets); 1242 cpu_to_be32((u32) cntrs.port_rcv_packets);
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 9170710b950d..79b3dbc97179 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1744 /* Signal completion event if the solicited bit is set. */ 1744 /* Signal completion event if the solicited bit is set. */
1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1746 (ohdr->bth[0] & 1746 (ohdr->bth[0] &
1747 __constant_cpu_to_be32(1 << 23)) != 0); 1747 cpu_to_be32(1 << 23)) != 0);
1748 break; 1748 break;
1749 1749
1750 case OP(RDMA_WRITE_FIRST): 1750 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 8e255adf5d9b..4b0698590850 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -781,10 +781,10 @@ retry:
781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; 781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
782 descqp -= 2; 782 descqp -= 2;
783 /* SDmaLastDesc */ 783 /* SDmaLastDesc */
784 descqp[0] |= __constant_cpu_to_le64(1ULL << 11); 784 descqp[0] |= cpu_to_le64(1ULL << 11);
785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { 785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
786 /* SDmaIntReq */ 786 /* SDmaIntReq */
787 descqp[0] |= __constant_cpu_to_le64(1ULL << 15); 787 descqp[0] |= cpu_to_le64(1ULL << 15);
788 } 788 }
789 789
790 /* Commit writes to memory and advance the tail on the chip */ 790 /* Commit writes to memory and advance the tail on the chip */
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 82cc588b8bf2..22e60998f1a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
419 /* Signal completion event if the solicited bit is set. */ 419 /* Signal completion event if the solicited bit is set. */
420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
421 (ohdr->bth[0] & 421 (ohdr->bth[0] &
422 __constant_cpu_to_be32(1 << 23)) != 0); 422 cpu_to_be32(1 << 23)) != 0);
423 break; 423 break;
424 424
425 case OP(RDMA_WRITE_FIRST): 425 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 91c74cc797ae..6076cb61bf6a 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
370 */ 370 */
371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
372 ah_attr->dlid != IPATH_PERMISSIVE_LID ? 372 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
373 __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : 373 cpu_to_be32(IPATH_MULTICAST_QPN) :
374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
376 /* 376 /*
@@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
573 /* Signal completion event if the solicited bit is set. */ 573 /* Signal completion event if the solicited bit is set. */
574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
575 (ohdr->bth[0] & 575 (ohdr->bth[0] &
576 __constant_cpu_to_be32(1 << 23)) != 0); 576 cpu_to_be32(1 << 23)) != 0);
577 577
578bail:; 578bail:;
579} 579}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 82d9a0b5ca2f..7bff4b9baa0a 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
667 667
668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) 668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
669{ 669{
670 return descq | __constant_cpu_to_le64(1ULL << 12); 670 return descq | cpu_to_le64(1ULL << 12);
671} 671}
672 672
673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) 673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
674{ 674{
675 /* last */ /* dma head */ 675 /* last */ /* dma head */
676 return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13); 676 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
677} 677}
678 678
679static inline __le64 ipath_sdma_make_desc1(u64 addr) 679static inline __le64 ipath_sdma_make_desc1(u64 addr)
@@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
763 if (ofs >= IPATH_SMALLBUF_DWORDS) { 763 if (ofs >= IPATH_SMALLBUF_DWORDS) {
764 for (i = 0; i < pkt->naddr; i++) { 764 for (i = 0; i < pkt->naddr; i++) {
765 dd->ipath_sdma_descq[dtail].qw[0] |= 765 dd->ipath_sdma_descq[dtail].qw[0] |=
766 __constant_cpu_to_le64(1ULL << 14); 766 cpu_to_le64(1ULL << 14);
767 if (++dtail == dd->ipath_sdma_descq_cnt) 767 if (++dtail == dd->ipath_sdma_descq_cnt)
768 dtail = 0; 768 dtail = 0;
769 } 769 }
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index cdf0e6abd34d..9289ab4b0ae8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev,
1585 u64 ibcstat; 1585 u64 ibcstat;
1586 1586
1587 memset(props, 0, sizeof(*props)); 1587 memset(props, 0, sizeof(*props));
1588 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1588 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1589 props->lmc = dd->ipath_lmc; 1589 props->lmc = dd->ipath_lmc;
1590 props->sm_lid = dev->sm_lid; 1590 props->sm_lid = dev->sm_lid;
1591 props->sm_sl = dev->sm_sl; 1591 props->sm_sl = dev->sm_sl;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 11e3f613df93..ae6cff4abffc 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -86,11 +86,11 @@
86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
87 87
88/* Mandatory IB performance counter select values. */ 88/* Mandatory IB performance counter select values. */
89#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) 89#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) 90#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) 91#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) 92#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) 93#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
94 94
95struct ib_reth { 95struct ib_reth {
96 __be64 vaddr; 96 __be64 vaddr;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a91cb4c3fa5c..f385a24d31d2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,17 +71,17 @@ enum {
71}; 71};
72 72
73static const __be32 mlx4_ib_opcode[] = { 73static const __be32 mlx4_ib_opcode[] = {
74 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 74 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
75 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), 75 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
76 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 76 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
77 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 77 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
78 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 78 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
79 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 79 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
80 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 80 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
81 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 81 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
82 [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 82 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
83 [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 83 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
84 [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), 84 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
85}; 85};
86 86
87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index ec7c6d99ed3f..938858304300 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -314,12 +314,12 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
314 */ 314 */
315void ib_destroy_cm_id(struct ib_cm_id *cm_id); 315void ib_destroy_cm_id(struct ib_cm_id *cm_id);
316 316
317#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL) 317#define IB_SERVICE_ID_AGN_MASK cpu_to_be64(0xFF00000000000000ULL)
318#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL) 318#define IB_CM_ASSIGN_SERVICE_ID cpu_to_be64(0x0200000000000000ULL)
319#define IB_CMA_SERVICE_ID __constant_cpu_to_be64(0x0000000001000000ULL) 319#define IB_CMA_SERVICE_ID cpu_to_be64(0x0000000001000000ULL)
320#define IB_CMA_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFF000000ULL) 320#define IB_CMA_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFF000000ULL)
321#define IB_SDP_SERVICE_ID __constant_cpu_to_be64(0x0000000000010000ULL) 321#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL)
322#define IB_SDP_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 322#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
323 323
324struct ib_cm_compare_data { 324struct ib_cm_compare_data {
325 u8 data[IB_CM_COMPARE_SIZE]; 325 u8 data[IB_CM_COMPARE_SIZE];
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 5f6c40fffcf4..8cc71a130d1b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -107,7 +107,7 @@
107#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 107#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
108 108
109#define IB_QP0 0 109#define IB_QP0 0
110#define IB_QP1 __constant_htonl(1) 110#define IB_QP1 cpu_to_be32(1)
111#define IB_QP1_QKEY 0x80010000 111#define IB_QP1_QKEY 0x80010000
112#define IB_QP_SET_QKEY 0x80000000 112#define IB_QP_SET_QKEY 0x80000000
113 113
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index aaca0878668f..98b9086d769a 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -63,25 +63,25 @@ struct ib_smp {
63 u8 return_path[IB_SMP_MAX_PATH_HOPS]; 63 u8 return_path[IB_SMP_MAX_PATH_HOPS];
64} __attribute__ ((packed)); 64} __attribute__ ((packed));
65 65
66#define IB_SMP_DIRECTION __constant_htons(0x8000) 66#define IB_SMP_DIRECTION cpu_to_be16(0x8000)
67 67
68/* Subnet management attributes */ 68/* Subnet management attributes */
69#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002) 69#define IB_SMP_ATTR_NOTICE cpu_to_be16(0x0002)
70#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010) 70#define IB_SMP_ATTR_NODE_DESC cpu_to_be16(0x0010)
71#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011) 71#define IB_SMP_ATTR_NODE_INFO cpu_to_be16(0x0011)
72#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012) 72#define IB_SMP_ATTR_SWITCH_INFO cpu_to_be16(0x0012)
73#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014) 73#define IB_SMP_ATTR_GUID_INFO cpu_to_be16(0x0014)
74#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015) 74#define IB_SMP_ATTR_PORT_INFO cpu_to_be16(0x0015)
75#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016) 75#define IB_SMP_ATTR_PKEY_TABLE cpu_to_be16(0x0016)
76#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017) 76#define IB_SMP_ATTR_SL_TO_VL_TABLE cpu_to_be16(0x0017)
77#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018) 77#define IB_SMP_ATTR_VL_ARB_TABLE cpu_to_be16(0x0018)
78#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019) 78#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE cpu_to_be16(0x0019)
79#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A) 79#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE cpu_to_be16(0x001A)
80#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B) 80#define IB_SMP_ATTR_MCAST_FORWARD_TABLE cpu_to_be16(0x001B)
81#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020) 81#define IB_SMP_ATTR_SM_INFO cpu_to_be16(0x0020)
82#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030) 82#define IB_SMP_ATTR_VENDOR_DIAG cpu_to_be16(0x0030)
83#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031) 83#define IB_SMP_ATTR_LED_INFO cpu_to_be16(0x0031)
84#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00) 84#define IB_SMP_ATTR_VENDOR_MASK cpu_to_be16(0xFF00)
85 85
86struct ib_port_info { 86struct ib_port_info {
87 __be64 mkey; 87 __be64 mkey;