aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/amso1100
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-04-17 00:01:08 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:01:08 -0400
commitdc544bc9cb8aa91c5d7fc9116a302f88a8a97250 (patch)
treee8ae7a400584c94252371068e08f63373dd449fd /drivers/infiniband/hw/amso1100
parentd23b9d8ff2fcadc6f2fba83f654a122b9e16f02c (diff)
RDMA/amso1100: Start of endianness annotation
Signed-off-by: Roland Dreier <rolandd@cisco.com> Acked-by: Steve Wise <swise@opengridcomputing.com>
Diffstat (limited to 'drivers/infiniband/hw/amso1100')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c70
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h10
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mm.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c24
-rw-r--r--drivers/infiniband/hw/amso1100/c2_wr.h212
8 files changed, 166 insertions, 160 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index c50533b5999f..113f3c03c5b5 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -130,10 +130,10 @@ static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
130 tx_desc->status = 0; 130 tx_desc->status = 0;
131 131
132 /* Set TXP_HTXD_UNINIT */ 132 /* Set TXP_HTXD_UNINIT */
133 __raw_writeq(cpu_to_be64(0x1122334455667788ULL), 133 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
134 (void __iomem *) txp_desc + C2_TXP_ADDR); 134 (void __iomem *) txp_desc + C2_TXP_ADDR);
135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); 135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
136 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), 136 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
137 (void __iomem *) txp_desc + C2_TXP_FLAGS); 137 (void __iomem *) txp_desc + C2_TXP_FLAGS);
138 138
139 elem->skb = NULL; 139 elem->skb = NULL;
@@ -179,13 +179,13 @@ static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
179 rx_desc->status = 0; 179 rx_desc->status = 0;
180 180
181 /* Set RXP_HRXD_UNINIT */ 181 /* Set RXP_HRXD_UNINIT */
182 __raw_writew(cpu_to_be16(RXP_HRXD_OK), 182 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
183 (void __iomem *) rxp_desc + C2_RXP_STATUS); 183 (void __iomem *) rxp_desc + C2_RXP_STATUS);
184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); 184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); 185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
186 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), 186 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
187 (void __iomem *) rxp_desc + C2_RXP_ADDR); 187 (void __iomem *) rxp_desc + C2_RXP_ADDR);
188 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), 188 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
189 (void __iomem *) rxp_desc + C2_RXP_FLAGS); 189 (void __iomem *) rxp_desc + C2_RXP_FLAGS);
190 190
191 elem->skb = NULL; 191 elem->skb = NULL;
@@ -239,10 +239,11 @@ static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
239 rxp_hdr->flags = RXP_HRXD_READY; 239 rxp_hdr->flags = RXP_HRXD_READY;
240 240
241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
242 __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), 242 __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
243 elem->hw_desc + C2_RXP_LEN); 243 elem->hw_desc + C2_RXP_LEN);
244 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); 244 __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
245 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); 245 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
246 elem->hw_desc + C2_RXP_FLAGS);
246 247
247 elem->skb = skb; 248 elem->skb = skb;
248 elem->mapaddr = mapaddr; 249 elem->mapaddr = mapaddr;
@@ -290,9 +291,9 @@ static void c2_rx_clean(struct c2_port *c2_port)
290 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 291 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
291 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); 292 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
292 __raw_writew(0, elem->hw_desc + C2_RXP_LEN); 293 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
293 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), 294 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
294 elem->hw_desc + C2_RXP_ADDR); 295 elem->hw_desc + C2_RXP_ADDR);
295 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), 296 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
296 elem->hw_desc + C2_RXP_FLAGS); 297 elem->hw_desc + C2_RXP_FLAGS);
297 298
298 if (elem->skb) { 299 if (elem->skb) {
@@ -346,16 +347,16 @@ static void c2_tx_clean(struct c2_port *c2_port)
346 elem->hw_desc + C2_TXP_LEN); 347 elem->hw_desc + C2_TXP_LEN);
347 __raw_writeq(0, 348 __raw_writeq(0,
348 elem->hw_desc + C2_TXP_ADDR); 349 elem->hw_desc + C2_TXP_ADDR);
349 __raw_writew(cpu_to_be16(TXP_HTXD_DONE), 350 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
350 elem->hw_desc + C2_TXP_FLAGS); 351 elem->hw_desc + C2_TXP_FLAGS);
351 c2_port->netstats.tx_dropped++; 352 c2_port->netstats.tx_dropped++;
352 break; 353 break;
353 } else { 354 } else {
354 __raw_writew(0, 355 __raw_writew(0,
355 elem->hw_desc + C2_TXP_LEN); 356 elem->hw_desc + C2_TXP_LEN);
356 __raw_writeq(cpu_to_be64(0x1122334455667788ULL), 357 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
357 elem->hw_desc + C2_TXP_ADDR); 358 elem->hw_desc + C2_TXP_ADDR);
358 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), 359 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
359 elem->hw_desc + C2_TXP_FLAGS); 360 elem->hw_desc + C2_TXP_FLAGS);
360 } 361 }
361 362
@@ -390,7 +391,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
390 for (elem = tx_ring->to_clean; elem != tx_ring->to_use; 391 for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
391 elem = elem->next) { 392 elem = elem->next) {
392 txp_htxd.flags = 393 txp_htxd.flags =
393 be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS)); 394 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
394 395
395 if (txp_htxd.flags != TXP_HTXD_DONE) 396 if (txp_htxd.flags != TXP_HTXD_DONE)
396 break; 397 break;
@@ -398,7 +399,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
398 if (netif_msg_tx_done(c2_port)) { 399 if (netif_msg_tx_done(c2_port)) {
399 /* PCI reads are expensive in fast path */ 400 /* PCI reads are expensive in fast path */
400 txp_htxd.len = 401 txp_htxd.len =
401 be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN)); 402 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
402 pr_debug("%s: tx done slot %3Zu status 0x%x len " 403 pr_debug("%s: tx done slot %3Zu status 0x%x len "
403 "%5u bytes\n", 404 "%5u bytes\n",
404 netdev->name, elem - tx_ring->start, 405 netdev->name, elem - tx_ring->start,
@@ -448,10 +449,12 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
448 /* Write the descriptor to the adapter's rx ring */ 449 /* Write the descriptor to the adapter's rx ring */
449 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 450 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
450 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); 451 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
451 __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), 452 __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
452 elem->hw_desc + C2_RXP_LEN); 453 elem->hw_desc + C2_RXP_LEN);
453 __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR); 454 __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
454 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); 455 elem->hw_desc + C2_RXP_ADDR);
456 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
457 elem->hw_desc + C2_RXP_FLAGS);
455 458
456 pr_debug("packet dropped\n"); 459 pr_debug("packet dropped\n");
457 c2_port->netstats.rx_dropped++; 460 c2_port->netstats.rx_dropped++;
@@ -653,7 +656,7 @@ static int c2_up(struct net_device *netdev)
653 i++, elem++) { 656 i++, elem++) {
654 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; 657 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
655 rxp_hdr->flags = 0; 658 rxp_hdr->flags = 0;
656 __raw_writew(cpu_to_be16(RXP_HRXD_READY), 659 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
657 elem->hw_desc + C2_RXP_FLAGS); 660 elem->hw_desc + C2_RXP_FLAGS);
658 } 661 }
659 662
@@ -787,9 +790,12 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
787 elem->maplen = maplen; 790 elem->maplen = maplen;
788 791
789 /* Tell HW to xmit */ 792 /* Tell HW to xmit */
790 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR); 793 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
791 __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN); 794 elem->hw_desc + C2_TXP_ADDR);
792 __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); 795 __raw_writew((__force u16) cpu_to_be16(maplen),
796 elem->hw_desc + C2_TXP_LEN);
797 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
798 elem->hw_desc + C2_TXP_FLAGS);
793 799
794 c2_port->netstats.tx_packets++; 800 c2_port->netstats.tx_packets++;
795 c2_port->netstats.tx_bytes += maplen; 801 c2_port->netstats.tx_bytes += maplen;
@@ -810,11 +816,11 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
810 elem->maplen = maplen; 816 elem->maplen = maplen;
811 817
812 /* Tell HW to xmit */ 818 /* Tell HW to xmit */
813 __raw_writeq(cpu_to_be64(mapaddr), 819 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
814 elem->hw_desc + C2_TXP_ADDR); 820 elem->hw_desc + C2_TXP_ADDR);
815 __raw_writew(cpu_to_be16(maplen), 821 __raw_writew((__force u16) cpu_to_be16(maplen),
816 elem->hw_desc + C2_TXP_LEN); 822 elem->hw_desc + C2_TXP_LEN);
817 __raw_writew(cpu_to_be16(TXP_HTXD_READY), 823 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
818 elem->hw_desc + C2_TXP_FLAGS); 824 elem->hw_desc + C2_TXP_FLAGS);
819 825
820 c2_port->netstats.tx_packets++; 826 c2_port->netstats.tx_packets++;
@@ -1029,10 +1035,10 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1029 } 1035 }
1030 1036
1031 /* Validate the adapter version */ 1037 /* Validate the adapter version */
1032 if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { 1038 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1033 printk(KERN_ERR PFX "Version mismatch " 1039 printk(KERN_ERR PFX "Version mismatch "
1034 "[fw=%u, c2=%u], Adapter not claimed\n", 1040 "[fw=%u, c2=%u], Adapter not claimed\n",
1035 be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)), 1041 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
1036 C2_VERSION); 1042 C2_VERSION);
1037 ret = -EINVAL; 1043 ret = -EINVAL;
1038 iounmap(mmio_regs); 1044 iounmap(mmio_regs);
@@ -1040,12 +1046,12 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1040 } 1046 }
1041 1047
1042 /* Validate the adapter IVN */ 1048 /* Validate the adapter IVN */
1043 if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { 1049 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1044 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " 1050 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1045 "the OpenIB device support kit. " 1051 "the OpenIB device support kit. "
1046 "[fw=0x%x, c2=0x%x], Adapter not claimed\n", 1052 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1047 be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)), 1053 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1048 C2_IVN); 1054 C2_IVN);
1049 ret = -EINVAL; 1055 ret = -EINVAL;
1050 iounmap(mmio_regs); 1056 iounmap(mmio_regs);
1051 goto bail2; 1057 goto bail2;
@@ -1068,7 +1074,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1068 1074
1069 /* Get the last RX index */ 1075 /* Get the last RX index */
1070 c2dev->cur_rx = 1076 c2dev->cur_rx =
1071 (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) - 1077 (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
1072 0xffffc000) / sizeof(struct c2_rxp_desc); 1078 0xffffc000) / sizeof(struct c2_rxp_desc);
1073 1079
1074 /* Request an interrupt line for the driver */ 1080 /* Request an interrupt line for the driver */
@@ -1090,7 +1096,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1090 } 1096 }
1091 1097
1092 /* Save off the actual size prior to unmapping mmio_regs */ 1098 /* Save off the actual size prior to unmapping mmio_regs */
1093 kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE)); 1099 kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1094 1100
1095 /* Unmap the adapter PCI registers in BAR4 */ 1101 /* Unmap the adapter PCI registers in BAR4 */
1096 iounmap(mmio_regs); 1102 iounmap(mmio_regs);
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index fa58200217a1..21bcb3ada7fe 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -346,7 +346,7 @@ struct c2_dev {
346 // spinlock_t aeq_lock; 346 // spinlock_t aeq_lock;
347 // spinlock_t rnic_lock; 347 // spinlock_t rnic_lock;
348 348
349 u16 *hint_count; 349 __be16 *hint_count;
350 dma_addr_t hint_count_dma; 350 dma_addr_t hint_count_dma;
351 u16 hints_read; 351 u16 hints_read;
352 352
@@ -425,10 +425,10 @@ static inline void __raw_writeq(u64 val, void __iomem * addr)
425#endif 425#endif
426 426
427#define C2_SET_CUR_RX(c2dev, cur_rx) \ 427#define C2_SET_CUR_RX(c2dev, cur_rx) \
428 __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) 428 __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
429 429
430#define C2_GET_CUR_RX(c2dev) \ 430#define C2_GET_CUR_RX(c2dev) \
431 be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092)) 431 be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
432 432
433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev) 433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
434{ 434{
@@ -485,8 +485,8 @@ extern void c2_unregister_device(struct c2_dev *c2dev);
485extern int c2_rnic_init(struct c2_dev *c2dev); 485extern int c2_rnic_init(struct c2_dev *c2dev);
486extern void c2_rnic_term(struct c2_dev *c2dev); 486extern void c2_rnic_term(struct c2_dev *c2dev);
487extern void c2_rnic_interrupt(struct c2_dev *c2dev); 487extern void c2_rnic_interrupt(struct c2_dev *c2dev);
488extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); 488extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
489extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); 489extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
490 490
491/* QPs */ 491/* QPs */
492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, 492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index a31439bd3b67..07ce952699ba 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -195,7 +195,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
195 "resource=%x, qp_state=%s\n", 195 "resource=%x, qp_state=%s\n",
196 __FUNCTION__, 196 __FUNCTION__,
197 to_event_str(event_id), 197 to_event_str(event_id),
198 (unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context), 198 (unsigned long long) wr->ae.ae_generic.user_context,
199 be32_to_cpu(wr->ae.ae_generic.resource_type), 199 be32_to_cpu(wr->ae.ae_generic.resource_type),
200 be32_to_cpu(wr->ae.ae_generic.resource), 200 be32_to_cpu(wr->ae.ae_generic.resource),
201 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); 201 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c
index 1e4f46493fcb..b506fe22b4d4 100644
--- a/drivers/infiniband/hw/amso1100/c2_mm.c
+++ b/drivers/infiniband/hw/amso1100/c2_mm.c
@@ -45,7 +45,7 @@
45 * Reply buffer _is_ freed by this function. 45 * Reply buffer _is_ freed by this function.
46 */ 46 */
47static int 47static int
48send_pbl_messages(struct c2_dev *c2dev, u32 stag_index, 48send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
49 unsigned long va, u32 pbl_depth, 49 unsigned long va, u32 pbl_depth,
50 struct c2_vq_req *vq_req, int pbl_type) 50 struct c2_vq_req *vq_req, int pbl_type)
51{ 51{
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c
index b88a75592102..0cddc49beae1 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.c
+++ b/drivers/infiniband/hw/amso1100/c2_mq.c
@@ -64,7 +64,7 @@ void c2_mq_produce(struct c2_mq *q)
64 q->priv = (q->priv + 1) % q->q_size; 64 q->priv = (q->priv + 1) % q->q_size;
65 q->hint_count++; 65 q->hint_count++;
66 /* Update peer's offset. */ 66 /* Update peer's offset. */
67 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); 67 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
68 } 68 }
69} 69}
70 70
@@ -105,7 +105,7 @@ void c2_mq_free(struct c2_mq *q)
105#endif 105#endif
106 q->priv = (q->priv + 1) % q->q_size; 106 q->priv = (q->priv + 1) % q->q_size;
107 /* Update peer's offset. */ 107 /* Update peer's offset. */
108 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); 108 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
109 } 109 }
110} 110}
111 111
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index 9185bbb21658..acede007b94a 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -75,7 +75,7 @@ struct c2_mq {
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
78 u16 *shared; 78 __be16 *shared;
79 dma_addr_t shared_dma; 79 dma_addr_t shared_dma;
80 u32 q_size; 80 u32 q_size;
81 u32 msg_size; 81 u32 msg_size;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 1687c511cb2f..7be1f87dcfa7 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -208,7 +208,7 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
208/* 208/*
209 * Add an IP address to the RNIC interface 209 * Add an IP address to the RNIC interface
210 */ 210 */
211int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 211int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
212{ 212{
213 struct c2_vq_req *vq_req; 213 struct c2_vq_req *vq_req;
214 struct c2wr_rnic_setconfig_req *wr; 214 struct c2wr_rnic_setconfig_req *wr;
@@ -270,7 +270,7 @@ int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
270/* 270/*
271 * Delete an IP address from the RNIC interface 271 * Delete an IP address from the RNIC interface
272 */ 272 */
273int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 273int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
274{ 274{
275 struct c2_vq_req *vq_req; 275 struct c2_vq_req *vq_req;
276 struct c2wr_rnic_setconfig_req *wr; 276 struct c2wr_rnic_setconfig_req *wr;
@@ -506,17 +506,17 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
506 mmio_regs = c2dev->kva; 506 mmio_regs = c2dev->kva;
507 /* Initialize the Verbs Request Queue */ 507 /* Initialize the Verbs Request Queue */
508 c2_mq_req_init(&c2dev->req_vq, 0, 508 c2_mq_req_init(&c2dev->req_vq, 0,
509 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), 509 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
510 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), 510 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
511 mmio_regs + 511 mmio_regs +
512 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), 512 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
513 mmio_regs + 513 mmio_regs +
514 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), 514 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
515 C2_MQ_ADAPTER_TARGET); 515 C2_MQ_ADAPTER_TARGET);
516 516
517 /* Initialize the Verbs Reply Queue */ 517 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 518 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 519 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL); 521 &c2dev->rep_vq.host_dma, GFP_KERNEL);
522 if (!q1_pages) { 522 if (!q1_pages) {
@@ -532,12 +532,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
532 msgsize, 532 msgsize,
533 q1_pages, 533 q1_pages,
534 mmio_regs + 534 mmio_regs +
535 be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), 535 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
536 C2_MQ_HOST_TARGET); 536 C2_MQ_HOST_TARGET);
537 537
538 /* Initialize the Asynchronus Event Queue */ 538 /* Initialize the Asynchronus Event Queue */
539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 539 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 540 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL); 542 &c2dev->aeq.host_dma, GFP_KERNEL);
543 if (!q2_pages) { 543 if (!q2_pages) {
@@ -553,7 +553,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
553 msgsize, 553 msgsize,
554 q2_pages, 554 q2_pages,
555 mmio_regs + 555 mmio_regs +
556 be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), 556 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
557 C2_MQ_HOST_TARGET); 557 C2_MQ_HOST_TARGET);
558 558
559 /* Initialize the verbs request allocator */ 559 /* Initialize the verbs request allocator */
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h
index 3ec6c43bb0ef..c65fbdd6e469 100644
--- a/drivers/infiniband/hw/amso1100/c2_wr.h
+++ b/drivers/infiniband/hw/amso1100/c2_wr.h
@@ -180,8 +180,8 @@ enum c2_wr_type {
180}; 180};
181 181
182struct c2_netaddr { 182struct c2_netaddr {
183 u32 ip_addr; 183 __be32 ip_addr;
184 u32 netmask; 184 __be32 netmask;
185 u32 mtu; 185 u32 mtu;
186}; 186};
187 187
@@ -199,9 +199,9 @@ struct c2_route {
199 * A Scatter Gather Entry. 199 * A Scatter Gather Entry.
200 */ 200 */
201struct c2_data_addr { 201struct c2_data_addr {
202 u32 stag; 202 __be32 stag;
203 u32 length; 203 __be32 length;
204 u64 to; 204 __be64 to;
205}; 205};
206 206
207/* 207/*
@@ -274,7 +274,7 @@ struct c2wr_hdr {
274 * from the host to adapter by libccil, but we copy it anyway 274 * from the host to adapter by libccil, but we copy it anyway
275 * to make the memcpy to the adapter better aligned. 275 * to make the memcpy to the adapter better aligned.
276 */ 276 */
277 u32 wqe_count; 277 __be32 wqe_count;
278 278
279 /* Put these fields next so that later 32- and 64-bit 279 /* Put these fields next so that later 32- and 64-bit
280 * quantities are naturally aligned. 280 * quantities are naturally aligned.
@@ -316,8 +316,8 @@ enum c2_rnic_flags {
316struct c2wr_rnic_open_req { 316struct c2wr_rnic_open_req {
317 struct c2wr_hdr hdr; 317 struct c2wr_hdr hdr;
318 u64 user_context; 318 u64 user_context;
319 u16 flags; /* See enum c2_rnic_flags */ 319 __be16 flags; /* See enum c2_rnic_flags */
320 u16 port_num; 320 __be16 port_num;
321} __attribute__((packed)); 321} __attribute__((packed));
322 322
323struct c2wr_rnic_open_rep { 323struct c2wr_rnic_open_rep {
@@ -341,30 +341,30 @@ struct c2wr_rnic_query_req {
341struct c2wr_rnic_query_rep { 341struct c2wr_rnic_query_rep {
342 struct c2wr_hdr hdr; 342 struct c2wr_hdr hdr;
343 u64 user_context; 343 u64 user_context;
344 u32 vendor_id; 344 __be32 vendor_id;
345 u32 part_number; 345 __be32 part_number;
346 u32 hw_version; 346 __be32 hw_version;
347 u32 fw_ver_major; 347 __be32 fw_ver_major;
348 u32 fw_ver_minor; 348 __be32 fw_ver_minor;
349 u32 fw_ver_patch; 349 __be32 fw_ver_patch;
350 char fw_ver_build_str[WR_BUILD_STR_LEN]; 350 char fw_ver_build_str[WR_BUILD_STR_LEN];
351 u32 max_qps; 351 __be32 max_qps;
352 u32 max_qp_depth; 352 __be32 max_qp_depth;
353 u32 max_srq_depth; 353 u32 max_srq_depth;
354 u32 max_send_sgl_depth; 354 u32 max_send_sgl_depth;
355 u32 max_rdma_sgl_depth; 355 u32 max_rdma_sgl_depth;
356 u32 max_cqs; 356 __be32 max_cqs;
357 u32 max_cq_depth; 357 __be32 max_cq_depth;
358 u32 max_cq_event_handlers; 358 u32 max_cq_event_handlers;
359 u32 max_mrs; 359 __be32 max_mrs;
360 u32 max_pbl_depth; 360 u32 max_pbl_depth;
361 u32 max_pds; 361 __be32 max_pds;
362 u32 max_global_ird; 362 __be32 max_global_ird;
363 u32 max_global_ord; 363 u32 max_global_ord;
364 u32 max_qp_ird; 364 __be32 max_qp_ird;
365 u32 max_qp_ord; 365 __be32 max_qp_ord;
366 u32 flags; 366 u32 flags;
367 u32 max_mws; 367 __be32 max_mws;
368 u32 pbe_range_low; 368 u32 pbe_range_low;
369 u32 pbe_range_high; 369 u32 pbe_range_high;
370 u32 max_srqs; 370 u32 max_srqs;
@@ -405,7 +405,7 @@ union c2wr_rnic_getconfig {
405struct c2wr_rnic_setconfig_req { 405struct c2wr_rnic_setconfig_req {
406 struct c2wr_hdr hdr; 406 struct c2wr_hdr hdr;
407 u32 rnic_handle; 407 u32 rnic_handle;
408 u32 option; /* See c2_setconfig_cmd_t */ 408 __be32 option; /* See c2_setconfig_cmd_t */
409 /* variable data and pad. See c2_netaddr and c2_route */ 409 /* variable data and pad. See c2_netaddr and c2_route */
410 u8 data[0]; 410 u8 data[0];
411} __attribute__((packed)) ; 411} __attribute__((packed)) ;
@@ -441,18 +441,18 @@ union c2wr_rnic_close {
441 */ 441 */
442struct c2wr_cq_create_req { 442struct c2wr_cq_create_req {
443 struct c2wr_hdr hdr; 443 struct c2wr_hdr hdr;
444 u64 shared_ht; 444 __be64 shared_ht;
445 u64 user_context; 445 u64 user_context;
446 u64 msg_pool; 446 __be64 msg_pool;
447 u32 rnic_handle; 447 u32 rnic_handle;
448 u32 msg_size; 448 __be32 msg_size;
449 u32 depth; 449 __be32 depth;
450} __attribute__((packed)) ; 450} __attribute__((packed)) ;
451 451
452struct c2wr_cq_create_rep { 452struct c2wr_cq_create_rep {
453 struct c2wr_hdr hdr; 453 struct c2wr_hdr hdr;
454 u32 mq_index; 454 __be32 mq_index;
455 u32 adapter_shared; 455 __be32 adapter_shared;
456 u32 cq_handle; 456 u32 cq_handle;
457} __attribute__((packed)) ; 457} __attribute__((packed)) ;
458 458
@@ -585,40 +585,40 @@ enum c2wr_qp_flags {
585 585
586struct c2wr_qp_create_req { 586struct c2wr_qp_create_req {
587 struct c2wr_hdr hdr; 587 struct c2wr_hdr hdr;
588 u64 shared_sq_ht; 588 __be64 shared_sq_ht;
589 u64 shared_rq_ht; 589 __be64 shared_rq_ht;
590 u64 user_context; 590 u64 user_context;
591 u32 rnic_handle; 591 u32 rnic_handle;
592 u32 sq_cq_handle; 592 u32 sq_cq_handle;
593 u32 rq_cq_handle; 593 u32 rq_cq_handle;
594 u32 sq_depth; 594 __be32 sq_depth;
595 u32 rq_depth; 595 __be32 rq_depth;
596 u32 srq_handle; 596 u32 srq_handle;
597 u32 srq_limit; 597 u32 srq_limit;
598 u32 flags; /* see enum c2wr_qp_flags */ 598 __be32 flags; /* see enum c2wr_qp_flags */
599 u32 send_sgl_depth; 599 __be32 send_sgl_depth;
600 u32 recv_sgl_depth; 600 __be32 recv_sgl_depth;
601 u32 rdma_write_sgl_depth; 601 __be32 rdma_write_sgl_depth;
602 u32 ord; 602 __be32 ord;
603 u32 ird; 603 __be32 ird;
604 u32 pd_id; 604 u32 pd_id;
605} __attribute__((packed)) ; 605} __attribute__((packed)) ;
606 606
607struct c2wr_qp_create_rep { 607struct c2wr_qp_create_rep {
608 struct c2wr_hdr hdr; 608 struct c2wr_hdr hdr;
609 u32 sq_depth; 609 __be32 sq_depth;
610 u32 rq_depth; 610 __be32 rq_depth;
611 u32 send_sgl_depth; 611 u32 send_sgl_depth;
612 u32 recv_sgl_depth; 612 u32 recv_sgl_depth;
613 u32 rdma_write_sgl_depth; 613 u32 rdma_write_sgl_depth;
614 u32 ord; 614 u32 ord;
615 u32 ird; 615 u32 ird;
616 u32 sq_msg_size; 616 __be32 sq_msg_size;
617 u32 sq_mq_index; 617 __be32 sq_mq_index;
618 u32 sq_mq_start; 618 __be32 sq_mq_start;
619 u32 rq_msg_size; 619 __be32 rq_msg_size;
620 u32 rq_mq_index; 620 __be32 rq_mq_index;
621 u32 rq_mq_start; 621 __be32 rq_mq_start;
622 u32 qp_handle; 622 u32 qp_handle;
623} __attribute__((packed)) ; 623} __attribute__((packed)) ;
624 624
@@ -667,11 +667,11 @@ struct c2wr_qp_modify_req {
667 u32 stream_msg_length; 667 u32 stream_msg_length;
668 u32 rnic_handle; 668 u32 rnic_handle;
669 u32 qp_handle; 669 u32 qp_handle;
670 u32 next_qp_state; 670 __be32 next_qp_state;
671 u32 ord; 671 __be32 ord;
672 u32 ird; 672 __be32 ird;
673 u32 sq_depth; 673 __be32 sq_depth;
674 u32 rq_depth; 674 __be32 rq_depth;
675 u32 llp_ep_handle; 675 u32 llp_ep_handle;
676} __attribute__((packed)) ; 676} __attribute__((packed)) ;
677 677
@@ -721,10 +721,10 @@ struct c2wr_qp_connect_req {
721 struct c2wr_hdr hdr; 721 struct c2wr_hdr hdr;
722 u32 rnic_handle; 722 u32 rnic_handle;
723 u32 qp_handle; 723 u32 qp_handle;
724 u32 remote_addr; 724 __be32 remote_addr;
725 u16 remote_port; 725 __be16 remote_port;
726 u16 pad; 726 u16 pad;
727 u32 private_data_length; 727 __be32 private_data_length;
728 u8 private_data[0]; /* Private data in-line. */ 728 u8 private_data[0]; /* Private data in-line. */
729} __attribute__((packed)) ; 729} __attribute__((packed)) ;
730 730
@@ -759,25 +759,25 @@ union c2wr_nsmr_stag_alloc {
759 759
760struct c2wr_nsmr_register_req { 760struct c2wr_nsmr_register_req {
761 struct c2wr_hdr hdr; 761 struct c2wr_hdr hdr;
762 u64 va; 762 __be64 va;
763 u32 rnic_handle; 763 u32 rnic_handle;
764 u16 flags; 764 __be16 flags;
765 u8 stag_key; 765 u8 stag_key;
766 u8 pad; 766 u8 pad;
767 u32 pd_id; 767 u32 pd_id;
768 u32 pbl_depth; 768 __be32 pbl_depth;
769 u32 pbe_size; 769 __be32 pbe_size;
770 u32 fbo; 770 __be32 fbo;
771 u32 length; 771 __be32 length;
772 u32 addrs_length; 772 __be32 addrs_length;
773 /* array of paddrs (must be aligned on a 64bit boundary) */ 773 /* array of paddrs (must be aligned on a 64bit boundary) */
774 u64 paddrs[0]; 774 __be64 paddrs[0];
775} __attribute__((packed)) ; 775} __attribute__((packed)) ;
776 776
777struct c2wr_nsmr_register_rep { 777struct c2wr_nsmr_register_rep {
778 struct c2wr_hdr hdr; 778 struct c2wr_hdr hdr;
779 u32 pbl_depth; 779 u32 pbl_depth;
780 u32 stag_index; 780 __be32 stag_index;
781} __attribute__((packed)) ; 781} __attribute__((packed)) ;
782 782
783union c2wr_nsmr_register { 783union c2wr_nsmr_register {
@@ -788,11 +788,11 @@ union c2wr_nsmr_register {
788struct c2wr_nsmr_pbl_req { 788struct c2wr_nsmr_pbl_req {
789 struct c2wr_hdr hdr; 789 struct c2wr_hdr hdr;
790 u32 rnic_handle; 790 u32 rnic_handle;
791 u32 flags; 791 __be32 flags;
792 u32 stag_index; 792 __be32 stag_index;
793 u32 addrs_length; 793 __be32 addrs_length;
794 /* array of paddrs (must be aligned on a 64bit boundary) */ 794 /* array of paddrs (must be aligned on a 64bit boundary) */
795 u64 paddrs[0]; 795 __be64 paddrs[0];
796} __attribute__((packed)) ; 796} __attribute__((packed)) ;
797 797
798struct c2wr_nsmr_pbl_rep { 798struct c2wr_nsmr_pbl_rep {
@@ -847,7 +847,7 @@ union c2wr_mw_query {
847struct c2wr_stag_dealloc_req { 847struct c2wr_stag_dealloc_req {
848 struct c2wr_hdr hdr; 848 struct c2wr_hdr hdr;
849 u32 rnic_handle; 849 u32 rnic_handle;
850 u32 stag_index; 850 __be32 stag_index;
851} __attribute__((packed)) ; 851} __attribute__((packed)) ;
852 852
853struct c2wr_stag_dealloc_rep { 853struct c2wr_stag_dealloc_rep {
@@ -949,7 +949,7 @@ struct c2wr_ce {
949 u64 qp_user_context; /* c2_user_qp_t * */ 949 u64 qp_user_context; /* c2_user_qp_t * */
950 u32 qp_state; /* Current QP State */ 950 u32 qp_state; /* Current QP State */
951 u32 handle; /* QPID or EP Handle */ 951 u32 handle; /* QPID or EP Handle */
952 u32 bytes_rcvd; /* valid for RECV WCs */ 952 __be32 bytes_rcvd; /* valid for RECV WCs */
953 u32 stag; 953 u32 stag;
954} __attribute__((packed)) ; 954} __attribute__((packed)) ;
955 955
@@ -984,8 +984,8 @@ struct c2_rq_hdr {
984 */ 984 */
985struct c2wr_send_req { 985struct c2wr_send_req {
986 struct c2_sq_hdr sq_hdr; 986 struct c2_sq_hdr sq_hdr;
987 u32 sge_len; 987 __be32 sge_len;
988 u32 remote_stag; 988 __be32 remote_stag;
989 u8 data[0]; /* SGE array */ 989 u8 data[0]; /* SGE array */
990} __attribute__((packed)); 990} __attribute__((packed));
991 991
@@ -996,9 +996,9 @@ union c2wr_send {
996 996
997struct c2wr_rdma_write_req { 997struct c2wr_rdma_write_req {
998 struct c2_sq_hdr sq_hdr; 998 struct c2_sq_hdr sq_hdr;
999 u64 remote_to; 999 __be64 remote_to;
1000 u32 remote_stag; 1000 __be32 remote_stag;
1001 u32 sge_len; 1001 __be32 sge_len;
1002 u8 data[0]; /* SGE array */ 1002 u8 data[0]; /* SGE array */
1003} __attribute__((packed)); 1003} __attribute__((packed));
1004 1004
@@ -1009,11 +1009,11 @@ union c2wr_rdma_write {
1009 1009
1010struct c2wr_rdma_read_req { 1010struct c2wr_rdma_read_req {
1011 struct c2_sq_hdr sq_hdr; 1011 struct c2_sq_hdr sq_hdr;
1012 u64 local_to; 1012 __be64 local_to;
1013 u64 remote_to; 1013 __be64 remote_to;
1014 u32 local_stag; 1014 __be32 local_stag;
1015 u32 remote_stag; 1015 __be32 remote_stag;
1016 u32 length; 1016 __be32 length;
1017} __attribute__((packed)); 1017} __attribute__((packed));
1018 1018
1019union c2wr_rdma_read { 1019union c2wr_rdma_read {
@@ -1113,9 +1113,9 @@ union c2wr_recv {
1113struct c2wr_ae_hdr { 1113struct c2wr_ae_hdr {
1114 struct c2wr_hdr hdr; 1114 struct c2wr_hdr hdr;
1115 u64 user_context; /* user context for this res. */ 1115 u64 user_context; /* user context for this res. */
1116 u32 resource_type; /* see enum c2_resource_indicator */ 1116 __be32 resource_type; /* see enum c2_resource_indicator */
1117 u32 resource; /* handle for resource */ 1117 __be32 resource; /* handle for resource */
1118 u32 qp_state; /* current QP State */ 1118 __be32 qp_state; /* current QP State */
1119} __attribute__((packed)); 1119} __attribute__((packed));
1120 1120
1121/* 1121/*
@@ -1124,11 +1124,11 @@ struct c2wr_ae_hdr {
1124 */ 1124 */
1125struct c2wr_ae_active_connect_results { 1125struct c2wr_ae_active_connect_results {
1126 struct c2wr_ae_hdr ae_hdr; 1126 struct c2wr_ae_hdr ae_hdr;
1127 u32 laddr; 1127 __be32 laddr;
1128 u32 raddr; 1128 __be32 raddr;
1129 u16 lport; 1129 __be16 lport;
1130 u16 rport; 1130 __be16 rport;
1131 u32 private_data_length; 1131 __be32 private_data_length;
1132 u8 private_data[0]; /* data is in-line in the msg. */ 1132 u8 private_data[0]; /* data is in-line in the msg. */
1133} __attribute__((packed)); 1133} __attribute__((packed));
1134 1134
@@ -1142,11 +1142,11 @@ struct c2wr_ae_active_connect_results {
1142struct c2wr_ae_connection_request { 1142struct c2wr_ae_connection_request {
1143 struct c2wr_ae_hdr ae_hdr; 1143 struct c2wr_ae_hdr ae_hdr;
1144 u32 cr_handle; /* connreq handle (sock ptr) */ 1144 u32 cr_handle; /* connreq handle (sock ptr) */
1145 u32 laddr; 1145 __be32 laddr;
1146 u32 raddr; 1146 __be32 raddr;
1147 u16 lport; 1147 __be16 lport;
1148 u16 rport; 1148 __be16 rport;
1149 u32 private_data_length; 1149 __be32 private_data_length;
1150 u8 private_data[0]; /* data is in-line in the msg. */ 1150 u8 private_data[0]; /* data is in-line in the msg. */
1151} __attribute__((packed)); 1151} __attribute__((packed));
1152 1152
@@ -1158,12 +1158,12 @@ union c2wr_ae {
1158 1158
1159struct c2wr_init_req { 1159struct c2wr_init_req {
1160 struct c2wr_hdr hdr; 1160 struct c2wr_hdr hdr;
1161 u64 hint_count; 1161 __be64 hint_count;
1162 u64 q0_host_shared; 1162 __be64 q0_host_shared;
1163 u64 q1_host_shared; 1163 __be64 q1_host_shared;
1164 u64 q1_host_msg_pool; 1164 __be64 q1_host_msg_pool;
1165 u64 q2_host_shared; 1165 __be64 q2_host_shared;
1166 u64 q2_host_msg_pool; 1166 __be64 q2_host_msg_pool;
1167} __attribute__((packed)); 1167} __attribute__((packed));
1168 1168
1169struct c2wr_init_rep { 1169struct c2wr_init_rep {
@@ -1276,10 +1276,10 @@ struct c2wr_ep_listen_create_req {
1276 struct c2wr_hdr hdr; 1276 struct c2wr_hdr hdr;
1277 u64 user_context; /* returned in AEs. */ 1277 u64 user_context; /* returned in AEs. */
1278 u32 rnic_handle; 1278 u32 rnic_handle;
1279 u32 local_addr; /* local addr, or 0 */ 1279 __be32 local_addr; /* local addr, or 0 */
1280 u16 local_port; /* 0 means "pick one" */ 1280 __be16 local_port; /* 0 means "pick one" */
1281 u16 pad; 1281 u16 pad;
1282 u32 backlog; /* tradional tcp listen bl */ 1282 __be32 backlog; /* tradional tcp listen bl */
1283} __attribute__((packed)); 1283} __attribute__((packed));
1284 1284
1285struct c2wr_ep_listen_create_rep { 1285struct c2wr_ep_listen_create_rep {
@@ -1340,7 +1340,7 @@ struct c2wr_cr_accept_req {
1340 u32 rnic_handle; 1340 u32 rnic_handle;
1341 u32 qp_handle; /* QP to bind to this LLP conn */ 1341 u32 qp_handle; /* QP to bind to this LLP conn */
1342 u32 ep_handle; /* LLP handle to accept */ 1342 u32 ep_handle; /* LLP handle to accept */
1343 u32 private_data_length; 1343 __be32 private_data_length;
1344 u8 private_data[0]; /* data in-line in msg. */ 1344 u8 private_data[0]; /* data in-line in msg. */
1345} __attribute__((packed)); 1345} __attribute__((packed));
1346 1346
@@ -1508,7 +1508,7 @@ static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
1508{ 1508{
1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count; 1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count;
1510} 1510}
1511static __inline__ u32 c2_wr_get_wqe_count(void *wr) 1511static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
1512{ 1512{
1513 return ((struct c2wr_hdr *) wr)->wqe_count; 1513 return ((struct c2wr_hdr *) wr)->wqe_count;
1514} 1514}