diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_av.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 40 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 89 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_doorbell.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 62 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mad.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mcg.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mr.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 187 |
14 files changed, 266 insertions, 263 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index d58dcbe6648..e596210f11b 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c | |||
@@ -41,16 +41,16 @@ | |||
41 | #include "mthca_dev.h" | 41 | #include "mthca_dev.h" |
42 | 42 | ||
43 | struct mthca_av { | 43 | struct mthca_av { |
44 | u32 port_pd; | 44 | __be32 port_pd; |
45 | u8 reserved1; | 45 | u8 reserved1; |
46 | u8 g_slid; | 46 | u8 g_slid; |
47 | u16 dlid; | 47 | __be16 dlid; |
48 | u8 reserved2; | 48 | u8 reserved2; |
49 | u8 gid_index; | 49 | u8 gid_index; |
50 | u8 msg_sr; | 50 | u8 msg_sr; |
51 | u8 hop_limit; | 51 | u8 hop_limit; |
52 | u32 sl_tclass_flowlabel; | 52 | __be32 sl_tclass_flowlabel; |
53 | u32 dgid[4]; | 53 | __be32 dgid[4]; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | int mthca_create_ah(struct mthca_dev *dev, | 56 | int mthca_create_ah(struct mthca_dev *dev, |
@@ -128,7 +128,7 @@ on_hca_fail: | |||
128 | av, (unsigned long) ah->avdma); | 128 | av, (unsigned long) ah->avdma); |
129 | for (j = 0; j < 8; ++j) | 129 | for (j = 0; j < 8; ++j) |
130 | printk(KERN_DEBUG " [%2x] %08x\n", | 130 | printk(KERN_DEBUG " [%2x] %08x\n", |
131 | j * 4, be32_to_cpu(((u32 *) av)[j])); | 131 | j * 4, be32_to_cpu(((__be32 *) av)[j])); |
132 | } | 132 | } |
133 | 133 | ||
134 | if (ah->type == MTHCA_AH_ON_HCA) { | 134 | if (ah->type == MTHCA_AH_ON_HCA) { |
@@ -169,7 +169,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, | |||
169 | 169 | ||
170 | header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; | 170 | header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; |
171 | header->lrh.destination_lid = ah->av->dlid; | 171 | header->lrh.destination_lid = ah->av->dlid; |
172 | header->lrh.source_lid = ah->av->g_slid & 0x7f; | 172 | header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); |
173 | if (ah->av->g_slid & 0x80) { | 173 | if (ah->av->g_slid & 0x80) { |
174 | header->grh_present = 1; | 174 | header->grh_present = 1; |
175 | header->grh.traffic_class = | 175 | header->grh.traffic_class = |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 0ff5900e093..1e60487ecd7 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -220,20 +220,20 @@ static int mthca_cmd_post(struct mthca_dev *dev, | |||
220 | * (and some architectures such as ia64 implement memcpy_toio | 220 | * (and some architectures such as ia64 implement memcpy_toio |
221 | * in terms of writeb). | 221 | * in terms of writeb). |
222 | */ | 222 | */ |
223 | __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); | 223 | __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); |
224 | __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); | 224 | __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); |
225 | __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); | 225 | __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4); |
226 | __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); | 226 | __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); |
227 | __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); | 227 | __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); |
228 | __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); | 228 | __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4); |
229 | 229 | ||
230 | /* __raw_writel may not order writes. */ | 230 | /* __raw_writel may not order writes. */ |
231 | wmb(); | 231 | wmb(); |
232 | 232 | ||
233 | __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | | 233 | __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | |
234 | (event ? (1 << HCA_E_BIT) : 0) | | 234 | (event ? (1 << HCA_E_BIT) : 0) | |
235 | (op_modifier << HCR_OPMOD_SHIFT) | | 235 | (op_modifier << HCR_OPMOD_SHIFT) | |
236 | op), dev->hcr + 6 * 4); | 236 | op), dev->hcr + 6 * 4); |
237 | 237 | ||
238 | out: | 238 | out: |
239 | up(&dev->cmd.hcr_sem); | 239 | up(&dev->cmd.hcr_sem); |
@@ -274,12 +274,14 @@ static int mthca_cmd_poll(struct mthca_dev *dev, | |||
274 | goto out; | 274 | goto out; |
275 | } | 275 | } |
276 | 276 | ||
277 | if (out_is_imm) { | 277 | if (out_is_imm) |
278 | memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); | 278 | *out_param = |
279 | be64_to_cpus(out_param); | 279 | (u64) be32_to_cpu((__force __be32) |
280 | } | 280 | __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | |
281 | (u64) be32_to_cpu((__force __be32) | ||
282 | __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); | ||
281 | 283 | ||
282 | *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; | 284 | *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; |
283 | 285 | ||
284 | out: | 286 | out: |
285 | up(&dev->cmd.poll_sem); | 287 | up(&dev->cmd.poll_sem); |
@@ -1122,7 +1124,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev, | |||
1122 | u8 *status) | 1124 | u8 *status) |
1123 | { | 1125 | { |
1124 | struct mthca_mailbox *mailbox; | 1126 | struct mthca_mailbox *mailbox; |
1125 | u32 *inbox; | 1127 | __be32 *inbox; |
1126 | int err; | 1128 | int err; |
1127 | 1129 | ||
1128 | #define INIT_HCA_IN_SIZE 0x200 | 1130 | #define INIT_HCA_IN_SIZE 0x200 |
@@ -1343,7 +1345,7 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st | |||
1343 | int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) | 1345 | int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) |
1344 | { | 1346 | { |
1345 | struct mthca_mailbox *mailbox; | 1347 | struct mthca_mailbox *mailbox; |
1346 | u64 *inbox; | 1348 | __be64 *inbox; |
1347 | int err; | 1349 | int err; |
1348 | 1350 | ||
1349 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | 1351 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
@@ -1514,7 +1516,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, | |||
1514 | if (i % 8 == 0) | 1516 | if (i % 8 == 0) |
1515 | printk(" [%02x] ", i * 4); | 1517 | printk(" [%02x] ", i * 4); |
1516 | printk(" %08x", | 1518 | printk(" %08x", |
1517 | be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); | 1519 | be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); |
1518 | if ((i + 1) % 8 == 0) | 1520 | if ((i + 1) % 8 == 0) |
1519 | printk("\n"); | 1521 | printk("\n"); |
1520 | } | 1522 | } |
@@ -1534,7 +1536,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, | |||
1534 | if (i % 8 == 0) | 1536 | if (i % 8 == 0) |
1535 | printk("[%02x] ", i * 4); | 1537 | printk("[%02x] ", i * 4); |
1536 | printk(" %08x", | 1538 | printk(" %08x", |
1537 | be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); | 1539 | be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); |
1538 | if ((i + 1) % 8 == 0) | 1540 | if ((i + 1) % 8 == 0) |
1539 | printk("\n"); | 1541 | printk("\n"); |
1540 | } | 1542 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index bd7807cec50..907867d1f2e 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -57,21 +57,21 @@ enum { | |||
57 | * Must be packed because start is 64 bits but only aligned to 32 bits. | 57 | * Must be packed because start is 64 bits but only aligned to 32 bits. |
58 | */ | 58 | */ |
59 | struct mthca_cq_context { | 59 | struct mthca_cq_context { |
60 | u32 flags; | 60 | __be32 flags; |
61 | u64 start; | 61 | __be64 start; |
62 | u32 logsize_usrpage; | 62 | __be32 logsize_usrpage; |
63 | u32 error_eqn; /* Tavor only */ | 63 | __be32 error_eqn; /* Tavor only */ |
64 | u32 comp_eqn; | 64 | __be32 comp_eqn; |
65 | u32 pd; | 65 | __be32 pd; |
66 | u32 lkey; | 66 | __be32 lkey; |
67 | u32 last_notified_index; | 67 | __be32 last_notified_index; |
68 | u32 solicit_producer_index; | 68 | __be32 solicit_producer_index; |
69 | u32 consumer_index; | 69 | __be32 consumer_index; |
70 | u32 producer_index; | 70 | __be32 producer_index; |
71 | u32 cqn; | 71 | __be32 cqn; |
72 | u32 ci_db; /* Arbel only */ | 72 | __be32 ci_db; /* Arbel only */ |
73 | u32 state_db; /* Arbel only */ | 73 | __be32 state_db; /* Arbel only */ |
74 | u32 reserved; | 74 | u32 reserved; |
75 | } __attribute__((packed)); | 75 | } __attribute__((packed)); |
76 | 76 | ||
77 | #define MTHCA_CQ_STATUS_OK ( 0 << 28) | 77 | #define MTHCA_CQ_STATUS_OK ( 0 << 28) |
@@ -110,31 +110,31 @@ enum { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | struct mthca_cqe { | 112 | struct mthca_cqe { |
113 | u32 my_qpn; | 113 | __be32 my_qpn; |
114 | u32 my_ee; | 114 | __be32 my_ee; |
115 | u32 rqpn; | 115 | __be32 rqpn; |
116 | u16 sl_g_mlpath; | 116 | __be16 sl_g_mlpath; |
117 | u16 rlid; | 117 | __be16 rlid; |
118 | u32 imm_etype_pkey_eec; | 118 | __be32 imm_etype_pkey_eec; |
119 | u32 byte_cnt; | 119 | __be32 byte_cnt; |
120 | u32 wqe; | 120 | __be32 wqe; |
121 | u8 opcode; | 121 | u8 opcode; |
122 | u8 is_send; | 122 | u8 is_send; |
123 | u8 reserved; | 123 | u8 reserved; |
124 | u8 owner; | 124 | u8 owner; |
125 | }; | 125 | }; |
126 | 126 | ||
127 | struct mthca_err_cqe { | 127 | struct mthca_err_cqe { |
128 | u32 my_qpn; | 128 | __be32 my_qpn; |
129 | u32 reserved1[3]; | 129 | u32 reserved1[3]; |
130 | u8 syndrome; | 130 | u8 syndrome; |
131 | u8 reserved2; | 131 | u8 reserved2; |
132 | u16 db_cnt; | 132 | __be16 db_cnt; |
133 | u32 reserved3; | 133 | u32 reserved3; |
134 | u32 wqe; | 134 | __be32 wqe; |
135 | u8 opcode; | 135 | u8 opcode; |
136 | u8 reserved4[2]; | 136 | u8 reserved4[2]; |
137 | u8 owner; | 137 | u8 owner; |
138 | }; | 138 | }; |
139 | 139 | ||
140 | #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) | 140 | #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) |
@@ -193,7 +193,7 @@ static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) | |||
193 | static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, | 193 | static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, |
194 | int incr) | 194 | int incr) |
195 | { | 195 | { |
196 | u32 doorbell[2]; | 196 | __be32 doorbell[2]; |
197 | 197 | ||
198 | if (mthca_is_memfree(dev)) { | 198 | if (mthca_is_memfree(dev)) { |
199 | *cq->set_ci_db = cpu_to_be32(cq->cons_index); | 199 | *cq->set_ci_db = cpu_to_be32(cq->cons_index); |
@@ -293,7 +293,7 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, | |||
293 | { | 293 | { |
294 | int err; | 294 | int err; |
295 | int dbd; | 295 | int dbd; |
296 | u32 new_wqe; | 296 | __be32 new_wqe; |
297 | 297 | ||
298 | if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { | 298 | if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { |
299 | mthca_dbg(dev, "local QP operation err " | 299 | mthca_dbg(dev, "local QP operation err " |
@@ -586,13 +586,13 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, | |||
586 | 586 | ||
587 | int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) | 587 | int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) |
588 | { | 588 | { |
589 | u32 doorbell[2]; | 589 | __be32 doorbell[2]; |
590 | 590 | ||
591 | doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? | 591 | doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? |
592 | MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : | 592 | MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : |
593 | MTHCA_TAVOR_CQ_DB_REQ_NOT) | | 593 | MTHCA_TAVOR_CQ_DB_REQ_NOT) | |
594 | to_mcq(cq)->cqn); | 594 | to_mcq(cq)->cqn); |
595 | doorbell[1] = 0xffffffff; | 595 | doorbell[1] = (__force __be32) 0xffffffff; |
596 | 596 | ||
597 | mthca_write64(doorbell, | 597 | mthca_write64(doorbell, |
598 | to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, | 598 | to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, |
@@ -604,9 +604,9 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) | |||
604 | int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | 604 | int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) |
605 | { | 605 | { |
606 | struct mthca_cq *cq = to_mcq(ibcq); | 606 | struct mthca_cq *cq = to_mcq(ibcq); |
607 | u32 doorbell[2]; | 607 | __be32 doorbell[2]; |
608 | u32 sn; | 608 | u32 sn; |
609 | u32 ci; | 609 | __be32 ci; |
610 | 610 | ||
611 | sn = cq->arm_sn & 3; | 611 | sn = cq->arm_sn & 3; |
612 | ci = cpu_to_be32(cq->cons_index); | 612 | ci = cpu_to_be32(cq->cons_index); |
@@ -813,7 +813,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
813 | cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | | 813 | cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | |
814 | MTHCA_CQ_STATE_DISARMED | | 814 | MTHCA_CQ_STATE_DISARMED | |
815 | MTHCA_CQ_FLAG_TR); | 815 | MTHCA_CQ_FLAG_TR); |
816 | cq_context->start = cpu_to_be64(0); | ||
817 | cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); | 816 | cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); |
818 | if (ctx) | 817 | if (ctx) |
819 | cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); | 818 | cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); |
@@ -906,7 +905,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
906 | mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); | 905 | mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); |
907 | 906 | ||
908 | if (0) { | 907 | if (0) { |
909 | u32 *ctx = mailbox->buf; | 908 | __be32 *ctx = mailbox->buf; |
910 | int j; | 909 | int j; |
911 | 910 | ||
912 | printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", | 911 | printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 33162a960c7..3519ca4e086 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -333,14 +333,13 @@ extern void __buggy_use_of_MTHCA_PUT(void); | |||
333 | 333 | ||
334 | #define MTHCA_PUT(dest, source, offset) \ | 334 | #define MTHCA_PUT(dest, source, offset) \ |
335 | do { \ | 335 | do { \ |
336 | __typeof__(source) *__p = \ | 336 | void *__d = ((char *) (dest) + (offset)); \ |
337 | (__typeof__(source) *) ((char *) (dest) + (offset)); \ | ||
338 | switch (sizeof(source)) { \ | 337 | switch (sizeof(source)) { \ |
339 | case 1: *__p = (source); break; \ | 338 | case 1: *(u8 *) __d = (source); break; \ |
340 | case 2: *__p = cpu_to_be16(source); break; \ | 339 | case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ |
341 | case 4: *__p = cpu_to_be32(source); break; \ | 340 | case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ |
342 | case 8: *__p = cpu_to_be64(source); break; \ | 341 | case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ |
343 | default: __buggy_use_of_MTHCA_PUT(); \ | 342 | default: __buggy_use_of_MTHCA_PUT(); \ |
344 | } \ | 343 | } \ |
345 | } while (0) | 344 | } while (0) |
346 | 345 | ||
@@ -435,7 +434,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
435 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | 434 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
436 | struct ib_recv_wr **bad_wr); | 435 | struct ib_recv_wr **bad_wr); |
437 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | 436 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, |
438 | int index, int *dbd, u32 *new_wqe); | 437 | int index, int *dbd, __be32 *new_wqe); |
439 | int mthca_alloc_qp(struct mthca_dev *dev, | 438 | int mthca_alloc_qp(struct mthca_dev *dev, |
440 | struct mthca_pd *pd, | 439 | struct mthca_pd *pd, |
441 | struct mthca_cq *send_cq, | 440 | struct mthca_cq *send_cq, |
diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h index 3be4a4a606a..dd9a44d170c 100644 --- a/drivers/infiniband/hw/mthca/mthca_doorbell.h +++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h | |||
@@ -58,13 +58,13 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest) | |||
58 | __raw_writeq((__force u64) val, dest); | 58 | __raw_writeq((__force u64) val, dest); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void mthca_write64(u32 val[2], void __iomem *dest, | 61 | static inline void mthca_write64(__be32 val[2], void __iomem *dest, |
62 | spinlock_t *doorbell_lock) | 62 | spinlock_t *doorbell_lock) |
63 | { | 63 | { |
64 | __raw_writeq(*(u64 *) val, dest); | 64 | __raw_writeq(*(u64 *) val, dest); |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline void mthca_write_db_rec(u32 val[2], u32 *db) | 67 | static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) |
68 | { | 68 | { |
69 | *(u64 *) db = *(u64 *) val; | 69 | *(u64 *) db = *(u64 *) val; |
70 | } | 70 | } |
@@ -87,18 +87,18 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest) | |||
87 | __raw_writel(((__force u32 *) &val)[1], dest + 4); | 87 | __raw_writel(((__force u32 *) &val)[1], dest + 4); |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline void mthca_write64(u32 val[2], void __iomem *dest, | 90 | static inline void mthca_write64(__be32 val[2], void __iomem *dest, |
91 | spinlock_t *doorbell_lock) | 91 | spinlock_t *doorbell_lock) |
92 | { | 92 | { |
93 | unsigned long flags; | 93 | unsigned long flags; |
94 | 94 | ||
95 | spin_lock_irqsave(doorbell_lock, flags); | 95 | spin_lock_irqsave(doorbell_lock, flags); |
96 | __raw_writel(val[0], dest); | 96 | __raw_writel((__force u32) val[0], dest); |
97 | __raw_writel(val[1], dest + 4); | 97 | __raw_writel((__force u32) val[1], dest + 4); |
98 | spin_unlock_irqrestore(doorbell_lock, flags); | 98 | spin_unlock_irqrestore(doorbell_lock, flags); |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline void mthca_write_db_rec(u32 val[2], u32 *db) | 101 | static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) |
102 | { | 102 | { |
103 | db[0] = val[0]; | 103 | db[0] = val[0]; |
104 | wmb(); | 104 | wmb(); |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 54a809adab6..18f0981eb0c 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -52,18 +52,18 @@ enum { | |||
52 | * Must be packed because start is 64 bits but only aligned to 32 bits. | 52 | * Must be packed because start is 64 bits but only aligned to 32 bits. |
53 | */ | 53 | */ |
54 | struct mthca_eq_context { | 54 | struct mthca_eq_context { |
55 | u32 flags; | 55 | __be32 flags; |
56 | u64 start; | 56 | __be64 start; |
57 | u32 logsize_usrpage; | 57 | __be32 logsize_usrpage; |
58 | u32 tavor_pd; /* reserved for Arbel */ | 58 | __be32 tavor_pd; /* reserved for Arbel */ |
59 | u8 reserved1[3]; | 59 | u8 reserved1[3]; |
60 | u8 intr; | 60 | u8 intr; |
61 | u32 arbel_pd; /* lost_count for Tavor */ | 61 | __be32 arbel_pd; /* lost_count for Tavor */ |
62 | u32 lkey; | 62 | __be32 lkey; |
63 | u32 reserved2[2]; | 63 | u32 reserved2[2]; |
64 | u32 consumer_index; | 64 | __be32 consumer_index; |
65 | u32 producer_index; | 65 | __be32 producer_index; |
66 | u32 reserved3[4]; | 66 | u32 reserved3[4]; |
67 | } __attribute__((packed)); | 67 | } __attribute__((packed)); |
68 | 68 | ||
69 | #define MTHCA_EQ_STATUS_OK ( 0 << 28) | 69 | #define MTHCA_EQ_STATUS_OK ( 0 << 28) |
@@ -128,28 +128,28 @@ struct mthca_eqe { | |||
128 | union { | 128 | union { |
129 | u32 raw[6]; | 129 | u32 raw[6]; |
130 | struct { | 130 | struct { |
131 | u32 cqn; | 131 | __be32 cqn; |
132 | } __attribute__((packed)) comp; | 132 | } __attribute__((packed)) comp; |
133 | struct { | 133 | struct { |
134 | u16 reserved1; | 134 | u16 reserved1; |
135 | u16 token; | 135 | __be16 token; |
136 | u32 reserved2; | 136 | u32 reserved2; |
137 | u8 reserved3[3]; | 137 | u8 reserved3[3]; |
138 | u8 status; | 138 | u8 status; |
139 | u64 out_param; | 139 | __be64 out_param; |
140 | } __attribute__((packed)) cmd; | 140 | } __attribute__((packed)) cmd; |
141 | struct { | 141 | struct { |
142 | u32 qpn; | 142 | __be32 qpn; |
143 | } __attribute__((packed)) qp; | 143 | } __attribute__((packed)) qp; |
144 | struct { | 144 | struct { |
145 | u32 cqn; | 145 | __be32 cqn; |
146 | u32 reserved1; | 146 | u32 reserved1; |
147 | u8 reserved2[3]; | 147 | u8 reserved2[3]; |
148 | u8 syndrome; | 148 | u8 syndrome; |
149 | } __attribute__((packed)) cq_err; | 149 | } __attribute__((packed)) cq_err; |
150 | struct { | 150 | struct { |
151 | u32 reserved1[2]; | 151 | u32 reserved1[2]; |
152 | u32 port; | 152 | __be32 port; |
153 | } __attribute__((packed)) port_change; | 153 | } __attribute__((packed)) port_change; |
154 | } event; | 154 | } event; |
155 | u8 reserved3[3]; | 155 | u8 reserved3[3]; |
@@ -168,7 +168,7 @@ static inline u64 async_mask(struct mthca_dev *dev) | |||
168 | 168 | ||
169 | static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | 169 | static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) |
170 | { | 170 | { |
171 | u32 doorbell[2]; | 171 | __be32 doorbell[2]; |
172 | 172 | ||
173 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); | 173 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); |
174 | doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); | 174 | doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); |
@@ -191,8 +191,8 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u | |||
191 | { | 191 | { |
192 | /* See comment in tavor_set_eq_ci() above. */ | 192 | /* See comment in tavor_set_eq_ci() above. */ |
193 | wmb(); | 193 | wmb(); |
194 | __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + | 194 | __raw_writel((__force u32) cpu_to_be32(ci), |
195 | eq->eqn * 8); | 195 | dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); |
196 | /* We still want ordering, just not swabbing, so add a barrier */ | 196 | /* We still want ordering, just not swabbing, so add a barrier */ |
197 | mb(); | 197 | mb(); |
198 | } | 198 | } |
@@ -207,7 +207,7 @@ static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |||
207 | 207 | ||
208 | static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) | 208 | static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) |
209 | { | 209 | { |
210 | u32 doorbell[2]; | 210 | __be32 doorbell[2]; |
211 | 211 | ||
212 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); | 212 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); |
213 | doorbell[1] = 0; | 213 | doorbell[1] = 0; |
@@ -225,7 +225,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) | |||
225 | static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) | 225 | static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) |
226 | { | 226 | { |
227 | if (!mthca_is_memfree(dev)) { | 227 | if (!mthca_is_memfree(dev)) { |
228 | u32 doorbell[2]; | 228 | __be32 doorbell[2]; |
229 | 229 | ||
230 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); | 230 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); |
231 | doorbell[1] = cpu_to_be32(cqn); | 231 | doorbell[1] = cpu_to_be32(cqn); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 3c7fae6cb12..64fa78722cf 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -194,7 +194,7 @@ int mthca_process_mad(struct ib_device *ibdev, | |||
194 | { | 194 | { |
195 | int err; | 195 | int err; |
196 | u8 status; | 196 | u8 status; |
197 | u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; | 197 | u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); |
198 | 198 | ||
199 | /* Forward locally generated traps to the SM */ | 199 | /* Forward locally generated traps to the SM */ |
200 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && | 200 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && |
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 5be7d949dbf..a2707605f4c 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c | |||
@@ -42,10 +42,10 @@ enum { | |||
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct mthca_mgm { | 44 | struct mthca_mgm { |
45 | u32 next_gid_index; | 45 | __be32 next_gid_index; |
46 | u32 reserved[3]; | 46 | u32 reserved[3]; |
47 | u8 gid[16]; | 47 | u8 gid[16]; |
48 | u32 qp[MTHCA_QP_PER_MGM]; | 48 | __be32 qp[MTHCA_QP_PER_MGM]; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ | 51 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ |
@@ -94,10 +94,14 @@ static int find_mgm(struct mthca_dev *dev, | |||
94 | if (0) | 94 | if (0) |
95 | mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" | 95 | mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" |
96 | "%04x:%04x:%04x:%04x is %04x\n", | 96 | "%04x:%04x:%04x:%04x is %04x\n", |
97 | be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]), | 97 | be16_to_cpu(((__be16 *) gid)[0]), |
98 | be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]), | 98 | be16_to_cpu(((__be16 *) gid)[1]), |
99 | be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]), | 99 | be16_to_cpu(((__be16 *) gid)[2]), |
100 | be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]), | 100 | be16_to_cpu(((__be16 *) gid)[3]), |
101 | be16_to_cpu(((__be16 *) gid)[4]), | ||
102 | be16_to_cpu(((__be16 *) gid)[5]), | ||
103 | be16_to_cpu(((__be16 *) gid)[6]), | ||
104 | be16_to_cpu(((__be16 *) gid)[7]), | ||
101 | *hash); | 105 | *hash); |
102 | 106 | ||
103 | *index = *hash; | 107 | *index = *hash; |
@@ -258,14 +262,14 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
258 | if (index == -1) { | 262 | if (index == -1) { |
259 | mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " | 263 | mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " |
260 | "not found\n", | 264 | "not found\n", |
261 | be16_to_cpu(((u16 *) gid->raw)[0]), | 265 | be16_to_cpu(((__be16 *) gid->raw)[0]), |
262 | be16_to_cpu(((u16 *) gid->raw)[1]), | 266 | be16_to_cpu(((__be16 *) gid->raw)[1]), |
263 | be16_to_cpu(((u16 *) gid->raw)[2]), | 267 | be16_to_cpu(((__be16 *) gid->raw)[2]), |
264 | be16_to_cpu(((u16 *) gid->raw)[3]), | 268 | be16_to_cpu(((__be16 *) gid->raw)[3]), |
265 | be16_to_cpu(((u16 *) gid->raw)[4]), | 269 | be16_to_cpu(((__be16 *) gid->raw)[4]), |
266 | be16_to_cpu(((u16 *) gid->raw)[5]), | 270 | be16_to_cpu(((__be16 *) gid->raw)[5]), |
267 | be16_to_cpu(((u16 *) gid->raw)[6]), | 271 | be16_to_cpu(((__be16 *) gid->raw)[6]), |
268 | be16_to_cpu(((u16 *) gid->raw)[7])); | 272 | be16_to_cpu(((__be16 *) gid->raw)[7])); |
269 | err = -EINVAL; | 273 | err = -EINVAL; |
270 | goto out; | 274 | goto out; |
271 | } | 275 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 9efb0322c76..fba0a53ba6e 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
@@ -482,7 +482,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, | |||
482 | } | 482 | } |
483 | } | 483 | } |
484 | 484 | ||
485 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) | 485 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) |
486 | { | 486 | { |
487 | int group; | 487 | int group; |
488 | int start, end, dir; | 488 | int start, end, dir; |
@@ -565,7 +565,7 @@ found: | |||
565 | 565 | ||
566 | page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); | 566 | page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); |
567 | 567 | ||
568 | *db = (u32 *) &page->db_rec[j]; | 568 | *db = (__be32 *) &page->db_rec[j]; |
569 | 569 | ||
570 | out: | 570 | out: |
571 | up(&dev->db_tab->mutex); | 571 | up(&dev->db_tab->mutex); |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index 59c2f555b13..bafa51544aa 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h | |||
@@ -138,7 +138,7 @@ enum { | |||
138 | 138 | ||
139 | struct mthca_db_page { | 139 | struct mthca_db_page { |
140 | DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); | 140 | DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); |
141 | u64 *db_rec; | 141 | __be64 *db_rec; |
142 | dma_addr_t mapping; | 142 | dma_addr_t mapping; |
143 | }; | 143 | }; |
144 | 144 | ||
@@ -173,7 +173,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, | |||
173 | 173 | ||
174 | int mthca_init_db_tab(struct mthca_dev *dev); | 174 | int mthca_init_db_tab(struct mthca_dev *dev); |
175 | void mthca_cleanup_db_tab(struct mthca_dev *dev); | 175 | void mthca_cleanup_db_tab(struct mthca_dev *dev); |
176 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); | 176 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); |
177 | void mthca_free_db(struct mthca_dev *dev, int type, int db_index); | 177 | void mthca_free_db(struct mthca_dev *dev, int type, int db_index); |
178 | 178 | ||
179 | #endif /* MTHCA_MEMFREE_H */ | 179 | #endif /* MTHCA_MEMFREE_H */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 15d9f8f290a..0965e66061b 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -51,18 +51,18 @@ struct mthca_mtt { | |||
51 | * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. | 51 | * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. |
52 | */ | 52 | */ |
53 | struct mthca_mpt_entry { | 53 | struct mthca_mpt_entry { |
54 | u32 flags; | 54 | __be32 flags; |
55 | u32 page_size; | 55 | __be32 page_size; |
56 | u32 key; | 56 | __be32 key; |
57 | u32 pd; | 57 | __be32 pd; |
58 | u64 start; | 58 | __be64 start; |
59 | u64 length; | 59 | __be64 length; |
60 | u32 lkey; | 60 | __be32 lkey; |
61 | u32 window_count; | 61 | __be32 window_count; |
62 | u32 window_count_limit; | 62 | __be32 window_count_limit; |
63 | u64 mtt_seg; | 63 | __be64 mtt_seg; |
64 | u32 mtt_sz; /* Arbel only */ | 64 | __be32 mtt_sz; /* Arbel only */ |
65 | u32 reserved[2]; | 65 | u32 reserved[2]; |
66 | } __attribute__((packed)); | 66 | } __attribute__((packed)); |
67 | 67 | ||
68 | #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) | 68 | #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) |
@@ -248,7 +248,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, | |||
248 | int start_index, u64 *buffer_list, int list_len) | 248 | int start_index, u64 *buffer_list, int list_len) |
249 | { | 249 | { |
250 | struct mthca_mailbox *mailbox; | 250 | struct mthca_mailbox *mailbox; |
251 | u64 *mtt_entry; | 251 | __be64 *mtt_entry; |
252 | int err = 0; | 252 | int err = 0; |
253 | u8 status; | 253 | u8 status; |
254 | int i; | 254 | int i; |
@@ -390,7 +390,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, | |||
390 | for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { | 390 | for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { |
391 | if (i % 4 == 0) | 391 | if (i % 4 == 0) |
392 | printk("[%02x] ", i * 4); | 392 | printk("[%02x] ", i * 4); |
393 | printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); | 393 | printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); |
394 | if ((i + 1) % 4 == 0) | 394 | if ((i + 1) % 4 == 0) |
395 | printk("\n"); | 395 | printk("\n"); |
396 | } | 396 | } |
@@ -563,7 +563,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, | |||
563 | for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { | 563 | for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { |
564 | if (i % 4 == 0) | 564 | if (i % 4 == 0) |
565 | printk("[%02x] ", i * 4); | 565 | printk("[%02x] ", i * 4); |
566 | printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); | 566 | printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); |
567 | if ((i + 1) % 4 == 0) | 567 | if ((i + 1) % 4 == 0) |
568 | printk("\n"); | 568 | printk("\n"); |
569 | } | 569 | } |
@@ -670,7 +670,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
670 | mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); | 670 | mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); |
671 | mpt_entry.start = cpu_to_be64(iova); | 671 | mpt_entry.start = cpu_to_be64(iova); |
672 | 672 | ||
673 | writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key); | 673 | __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); |
674 | memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, | 674 | memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, |
675 | offsetof(struct mthca_mpt_entry, window_count) - | 675 | offsetof(struct mthca_mpt_entry, window_count) - |
676 | offsetof(struct mthca_mpt_entry, start)); | 676 | offsetof(struct mthca_mpt_entry, start)); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 34e6b8685ba..e2db5e00186 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -81,10 +81,10 @@ static int mthca_query_device(struct ib_device *ibdev, | |||
81 | } | 81 | } |
82 | 82 | ||
83 | props->device_cap_flags = mdev->device_cap_flags; | 83 | props->device_cap_flags = mdev->device_cap_flags; |
84 | props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) & | 84 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
85 | 0xffffff; | 85 | 0xffffff; |
86 | props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30)); | 86 | props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); |
87 | props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32)); | 87 | props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); |
88 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); | 88 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); |
89 | memcpy(&props->node_guid, out_mad->data + 12, 8); | 89 | memcpy(&props->node_guid, out_mad->data + 12, 8); |
90 | 90 | ||
@@ -138,16 +138,16 @@ static int mthca_query_port(struct ib_device *ibdev, | |||
138 | goto out; | 138 | goto out; |
139 | } | 139 | } |
140 | 140 | ||
141 | props->lid = be16_to_cpup((u16 *) (out_mad->data + 16)); | 141 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); |
142 | props->lmc = out_mad->data[34] & 0x7; | 142 | props->lmc = out_mad->data[34] & 0x7; |
143 | props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18)); | 143 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); |
144 | props->sm_sl = out_mad->data[36] & 0xf; | 144 | props->sm_sl = out_mad->data[36] & 0xf; |
145 | props->state = out_mad->data[32] & 0xf; | 145 | props->state = out_mad->data[32] & 0xf; |
146 | props->phys_state = out_mad->data[33] >> 4; | 146 | props->phys_state = out_mad->data[33] >> 4; |
147 | props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20)); | 147 | props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); |
148 | props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; | 148 | props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; |
149 | props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; | 149 | props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; |
150 | props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48)); | 150 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); |
151 | props->active_width = out_mad->data[31] & 0xf; | 151 | props->active_width = out_mad->data[31] & 0xf; |
152 | props->active_speed = out_mad->data[35] >> 4; | 152 | props->active_speed = out_mad->data[35] >> 4; |
153 | 153 | ||
@@ -223,7 +223,7 @@ static int mthca_query_pkey(struct ib_device *ibdev, | |||
223 | goto out; | 223 | goto out; |
224 | } | 224 | } |
225 | 225 | ||
226 | *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]); | 226 | *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); |
227 | 227 | ||
228 | out: | 228 | out: |
229 | kfree(in_mad); | 229 | kfree(in_mad); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 727aad8d4f3..624651edf57 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -182,9 +182,9 @@ struct mthca_cq { | |||
182 | 182 | ||
183 | /* Next fields are Arbel only */ | 183 | /* Next fields are Arbel only */ |
184 | int set_ci_db_index; | 184 | int set_ci_db_index; |
185 | u32 *set_ci_db; | 185 | __be32 *set_ci_db; |
186 | int arm_db_index; | 186 | int arm_db_index; |
187 | u32 *arm_db; | 187 | __be32 *arm_db; |
188 | int arm_sn; | 188 | int arm_sn; |
189 | 189 | ||
190 | union { | 190 | union { |
@@ -207,7 +207,7 @@ struct mthca_wq { | |||
207 | int wqe_shift; | 207 | int wqe_shift; |
208 | 208 | ||
209 | int db_index; /* Arbel only */ | 209 | int db_index; /* Arbel only */ |
210 | u32 *db; | 210 | __be32 *db; |
211 | }; | 211 | }; |
212 | 212 | ||
213 | struct mthca_qp { | 213 | struct mthca_qp { |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 2f429815d19..8fbb4f1f539 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -97,62 +97,62 @@ enum { | |||
97 | }; | 97 | }; |
98 | 98 | ||
99 | struct mthca_qp_path { | 99 | struct mthca_qp_path { |
100 | u32 port_pkey; | 100 | __be32 port_pkey; |
101 | u8 rnr_retry; | 101 | u8 rnr_retry; |
102 | u8 g_mylmc; | 102 | u8 g_mylmc; |
103 | u16 rlid; | 103 | __be16 rlid; |
104 | u8 ackto; | 104 | u8 ackto; |
105 | u8 mgid_index; | 105 | u8 mgid_index; |
106 | u8 static_rate; | 106 | u8 static_rate; |
107 | u8 hop_limit; | 107 | u8 hop_limit; |
108 | u32 sl_tclass_flowlabel; | 108 | __be32 sl_tclass_flowlabel; |
109 | u8 rgid[16]; | 109 | u8 rgid[16]; |
110 | } __attribute__((packed)); | 110 | } __attribute__((packed)); |
111 | 111 | ||
112 | struct mthca_qp_context { | 112 | struct mthca_qp_context { |
113 | u32 flags; | 113 | __be32 flags; |
114 | u32 tavor_sched_queue; /* Reserved on Arbel */ | 114 | __be32 tavor_sched_queue; /* Reserved on Arbel */ |
115 | u8 mtu_msgmax; | 115 | u8 mtu_msgmax; |
116 | u8 rq_size_stride; /* Reserved on Tavor */ | 116 | u8 rq_size_stride; /* Reserved on Tavor */ |
117 | u8 sq_size_stride; /* Reserved on Tavor */ | 117 | u8 sq_size_stride; /* Reserved on Tavor */ |
118 | u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ | 118 | u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ |
119 | u32 usr_page; | 119 | __be32 usr_page; |
120 | u32 local_qpn; | 120 | __be32 local_qpn; |
121 | u32 remote_qpn; | 121 | __be32 remote_qpn; |
122 | u32 reserved1[2]; | 122 | u32 reserved1[2]; |
123 | struct mthca_qp_path pri_path; | 123 | struct mthca_qp_path pri_path; |
124 | struct mthca_qp_path alt_path; | 124 | struct mthca_qp_path alt_path; |
125 | u32 rdd; | 125 | __be32 rdd; |
126 | u32 pd; | 126 | __be32 pd; |
127 | u32 wqe_base; | 127 | __be32 wqe_base; |
128 | u32 wqe_lkey; | 128 | __be32 wqe_lkey; |
129 | u32 params1; | 129 | __be32 params1; |
130 | u32 reserved2; | 130 | __be32 reserved2; |
131 | u32 next_send_psn; | 131 | __be32 next_send_psn; |
132 | u32 cqn_snd; | 132 | __be32 cqn_snd; |
133 | u32 snd_wqe_base_l; /* Next send WQE on Tavor */ | 133 | __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ |
134 | u32 snd_db_index; /* (debugging only entries) */ | 134 | __be32 snd_db_index; /* (debugging only entries) */ |
135 | u32 last_acked_psn; | 135 | __be32 last_acked_psn; |
136 | u32 ssn; | 136 | __be32 ssn; |
137 | u32 params2; | 137 | __be32 params2; |
138 | u32 rnr_nextrecvpsn; | 138 | __be32 rnr_nextrecvpsn; |
139 | u32 ra_buff_indx; | 139 | __be32 ra_buff_indx; |
140 | u32 cqn_rcv; | 140 | __be32 cqn_rcv; |
141 | u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ | 141 | __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ |
142 | u32 rcv_db_index; /* (debugging only entries) */ | 142 | __be32 rcv_db_index; /* (debugging only entries) */ |
143 | u32 qkey; | 143 | __be32 qkey; |
144 | u32 srqn; | 144 | __be32 srqn; |
145 | u32 rmsn; | 145 | __be32 rmsn; |
146 | u16 rq_wqe_counter; /* reserved on Tavor */ | 146 | __be16 rq_wqe_counter; /* reserved on Tavor */ |
147 | u16 sq_wqe_counter; /* reserved on Tavor */ | 147 | __be16 sq_wqe_counter; /* reserved on Tavor */ |
148 | u32 reserved3[18]; | 148 | u32 reserved3[18]; |
149 | } __attribute__((packed)); | 149 | } __attribute__((packed)); |
150 | 150 | ||
151 | struct mthca_qp_param { | 151 | struct mthca_qp_param { |
152 | u32 opt_param_mask; | 152 | __be32 opt_param_mask; |
153 | u32 reserved1; | 153 | u32 reserved1; |
154 | struct mthca_qp_context context; | 154 | struct mthca_qp_context context; |
155 | u32 reserved2[62]; | 155 | u32 reserved2[62]; |
156 | } __attribute__((packed)); | 156 | } __attribute__((packed)); |
157 | 157 | ||
158 | enum { | 158 | enum { |
@@ -191,62 +191,62 @@ enum { | |||
191 | }; | 191 | }; |
192 | 192 | ||
193 | struct mthca_next_seg { | 193 | struct mthca_next_seg { |
194 | u32 nda_op; /* [31:6] next WQE [4:0] next opcode */ | 194 | __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ |
195 | u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ | 195 | __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ |
196 | u32 flags; /* [3] CQ [2] Event [1] Solicit */ | 196 | __be32 flags; /* [3] CQ [2] Event [1] Solicit */ |
197 | u32 imm; /* immediate data */ | 197 | __be32 imm; /* immediate data */ |
198 | }; | 198 | }; |
199 | 199 | ||
200 | struct mthca_tavor_ud_seg { | 200 | struct mthca_tavor_ud_seg { |
201 | u32 reserved1; | 201 | u32 reserved1; |
202 | u32 lkey; | 202 | __be32 lkey; |
203 | u64 av_addr; | 203 | __be64 av_addr; |
204 | u32 reserved2[4]; | 204 | u32 reserved2[4]; |
205 | u32 dqpn; | 205 | __be32 dqpn; |
206 | u32 qkey; | 206 | __be32 qkey; |
207 | u32 reserved3[2]; | 207 | u32 reserved3[2]; |
208 | }; | 208 | }; |
209 | 209 | ||
210 | struct mthca_arbel_ud_seg { | 210 | struct mthca_arbel_ud_seg { |
211 | u32 av[8]; | 211 | __be32 av[8]; |
212 | u32 dqpn; | 212 | __be32 dqpn; |
213 | u32 qkey; | 213 | __be32 qkey; |
214 | u32 reserved[2]; | 214 | u32 reserved[2]; |
215 | }; | 215 | }; |
216 | 216 | ||
217 | struct mthca_bind_seg { | 217 | struct mthca_bind_seg { |
218 | u32 flags; /* [31] Atomic [30] rem write [29] rem read */ | 218 | __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ |
219 | u32 reserved; | 219 | u32 reserved; |
220 | u32 new_rkey; | 220 | __be32 new_rkey; |
221 | u32 lkey; | 221 | __be32 lkey; |
222 | u64 addr; | 222 | __be64 addr; |
223 | u64 length; | 223 | __be64 length; |
224 | }; | 224 | }; |
225 | 225 | ||
226 | struct mthca_raddr_seg { | 226 | struct mthca_raddr_seg { |
227 | u64 raddr; | 227 | __be64 raddr; |
228 | u32 rkey; | 228 | __be32 rkey; |
229 | u32 reserved; | 229 | u32 reserved; |
230 | }; | 230 | }; |
231 | 231 | ||
232 | struct mthca_atomic_seg { | 232 | struct mthca_atomic_seg { |
233 | u64 swap_add; | 233 | __be64 swap_add; |
234 | u64 compare; | 234 | __be64 compare; |
235 | }; | 235 | }; |
236 | 236 | ||
237 | struct mthca_data_seg { | 237 | struct mthca_data_seg { |
238 | u32 byte_count; | 238 | __be32 byte_count; |
239 | u32 lkey; | 239 | __be32 lkey; |
240 | u64 addr; | 240 | __be64 addr; |
241 | }; | 241 | }; |
242 | 242 | ||
243 | struct mthca_mlx_seg { | 243 | struct mthca_mlx_seg { |
244 | u32 nda_op; | 244 | __be32 nda_op; |
245 | u32 nds; | 245 | __be32 nds; |
246 | u32 flags; /* [17] VL15 [16] SLR [14:12] static rate | 246 | __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate |
247 | [11:8] SL [3] C [2] E */ | 247 | [11:8] SL [3] C [2] E */ |
248 | u16 rlid; | 248 | __be16 rlid; |
249 | u16 vcrc; | 249 | __be16 vcrc; |
250 | }; | 250 | }; |
251 | 251 | ||
252 | static const u8 mthca_opcode[] = { | 252 | static const u8 mthca_opcode[] = { |
@@ -1459,6 +1459,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1459 | { | 1459 | { |
1460 | int header_size; | 1460 | int header_size; |
1461 | int err; | 1461 | int err; |
1462 | u16 pkey; | ||
1462 | 1463 | ||
1463 | ib_ud_header_init(256, /* assume a MAD */ | 1464 | ib_ud_header_init(256, /* assume a MAD */ |
1464 | sqp->ud_header.grh_present, | 1465 | sqp->ud_header.grh_present, |
@@ -1469,8 +1470,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1469 | return err; | 1470 | return err; |
1470 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | 1471 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); |
1471 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | | 1472 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | |
1472 | (sqp->ud_header.lrh.destination_lid == 0xffff ? | 1473 | (sqp->ud_header.lrh.destination_lid == |
1473 | MTHCA_MLX_SLR : 0) | | 1474 | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | |
1474 | (sqp->ud_header.lrh.service_level << 8)); | 1475 | (sqp->ud_header.lrh.service_level << 8)); |
1475 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | 1476 | mlx->rlid = sqp->ud_header.lrh.destination_lid; |
1476 | mlx->vcrc = 0; | 1477 | mlx->vcrc = 0; |
@@ -1490,18 +1491,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1490 | } | 1491 | } |
1491 | 1492 | ||
1492 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | 1493 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; |
1493 | if (sqp->ud_header.lrh.destination_lid == 0xffff) | 1494 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1494 | sqp->ud_header.lrh.source_lid = 0xffff; | 1495 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; |
1495 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 1496 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
1496 | if (!sqp->qp.ibqp.qp_num) | 1497 | if (!sqp->qp.ibqp.qp_num) |
1497 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | 1498 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, |
1498 | sqp->pkey_index, | 1499 | sqp->pkey_index, &pkey); |
1499 | &sqp->ud_header.bth.pkey); | ||
1500 | else | 1500 | else |
1501 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | 1501 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, |
1502 | wr->wr.ud.pkey_index, | 1502 | wr->wr.ud.pkey_index, &pkey); |
1503 | &sqp->ud_header.bth.pkey); | 1503 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
1504 | cpu_to_be16s(&sqp->ud_header.bth.pkey); | ||
1505 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1504 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1506 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | 1505 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
1507 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | 1506 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? |
@@ -1744,7 +1743,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1744 | 1743 | ||
1745 | out: | 1744 | out: |
1746 | if (likely(nreq)) { | 1745 | if (likely(nreq)) { |
1747 | u32 doorbell[2]; | 1746 | __be32 doorbell[2]; |
1748 | 1747 | ||
1749 | doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + | 1748 | doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + |
1750 | qp->send_wqe_offset) | f0 | op0); | 1749 | qp->send_wqe_offset) | f0 | op0); |
@@ -1845,7 +1844,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1845 | 1844 | ||
1846 | out: | 1845 | out: |
1847 | if (likely(nreq)) { | 1846 | if (likely(nreq)) { |
1848 | u32 doorbell[2]; | 1847 | __be32 doorbell[2]; |
1849 | 1848 | ||
1850 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | 1849 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); |
1851 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); | 1850 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); |
@@ -2066,7 +2065,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2066 | 2065 | ||
2067 | out: | 2066 | out: |
2068 | if (likely(nreq)) { | 2067 | if (likely(nreq)) { |
2069 | u32 doorbell[2]; | 2068 | __be32 doorbell[2]; |
2070 | 2069 | ||
2071 | doorbell[0] = cpu_to_be32((nreq << 24) | | 2070 | doorbell[0] = cpu_to_be32((nreq << 24) | |
2072 | ((qp->sq.head & 0xffff) << 8) | | 2071 | ((qp->sq.head & 0xffff) << 8) | |
@@ -2176,7 +2175,7 @@ out: | |||
2176 | } | 2175 | } |
2177 | 2176 | ||
2178 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | 2177 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, |
2179 | int index, int *dbd, u32 *new_wqe) | 2178 | int index, int *dbd, __be32 *new_wqe) |
2180 | { | 2179 | { |
2181 | struct mthca_next_seg *next; | 2180 | struct mthca_next_seg *next; |
2182 | 2181 | ||