aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/amso1100/c2_rnic.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-04-17 00:01:08 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:01:08 -0400
commitdc544bc9cb8aa91c5d7fc9116a302f88a8a97250 (patch)
treee8ae7a400584c94252371068e08f63373dd449fd /drivers/infiniband/hw/amso1100/c2_rnic.c
parentd23b9d8ff2fcadc6f2fba83f654a122b9e16f02c (diff)
RDMA/amso1100: Start of endianness annotation
Signed-off-by: Roland Dreier <rolandd@cisco.com> Acked-by: Steve Wise <swise@opengridcomputing.com>
Diffstat (limited to 'drivers/infiniband/hw/amso1100/c2_rnic.c')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 1687c511cb2f..7be1f87dcfa7 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -208,7 +208,7 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
208/* 208/*
209 * Add an IP address to the RNIC interface 209 * Add an IP address to the RNIC interface
210 */ 210 */
211int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 211int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
212{ 212{
213 struct c2_vq_req *vq_req; 213 struct c2_vq_req *vq_req;
214 struct c2wr_rnic_setconfig_req *wr; 214 struct c2wr_rnic_setconfig_req *wr;
@@ -270,7 +270,7 @@ int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
270/* 270/*
271 * Delete an IP address from the RNIC interface 271 * Delete an IP address from the RNIC interface
272 */ 272 */
273int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 273int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
274{ 274{
275 struct c2_vq_req *vq_req; 275 struct c2_vq_req *vq_req;
276 struct c2wr_rnic_setconfig_req *wr; 276 struct c2wr_rnic_setconfig_req *wr;
@@ -506,17 +506,17 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
506 mmio_regs = c2dev->kva; 506 mmio_regs = c2dev->kva;
507 /* Initialize the Verbs Request Queue */ 507 /* Initialize the Verbs Request Queue */
508 c2_mq_req_init(&c2dev->req_vq, 0, 508 c2_mq_req_init(&c2dev->req_vq, 0,
509 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), 509 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
510 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), 510 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
511 mmio_regs + 511 mmio_regs +
512 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), 512 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
513 mmio_regs + 513 mmio_regs +
514 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), 514 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
515 C2_MQ_ADAPTER_TARGET); 515 C2_MQ_ADAPTER_TARGET);
516 516
517 /* Initialize the Verbs Reply Queue */ 517 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 518 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 519 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL); 521 &c2dev->rep_vq.host_dma, GFP_KERNEL);
522 if (!q1_pages) { 522 if (!q1_pages) {
@@ -532,12 +532,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
532 msgsize, 532 msgsize,
533 q1_pages, 533 q1_pages,
534 mmio_regs + 534 mmio_regs +
535 be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), 535 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
536 C2_MQ_HOST_TARGET); 536 C2_MQ_HOST_TARGET);
537 537
538 /* Initialize the Asynchronus Event Queue */ 538 /* Initialize the Asynchronus Event Queue */
539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 539 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 540 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL); 542 &c2dev->aeq.host_dma, GFP_KERNEL);
543 if (!q2_pages) { 543 if (!q2_pages) {
@@ -553,7 +553,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
553 msgsize, 553 msgsize,
554 q2_pages, 554 q2_pages,
555 mmio_regs + 555 mmio_regs +
556 be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), 556 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
557 C2_MQ_HOST_TARGET); 557 C2_MQ_HOST_TARGET);
558 558
559 /* Initialize the verbs request allocator */ 559 /* Initialize the verbs request allocator */