diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-03-01 17:11:53 -0500 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:11:58 -0400 |
commit | f8b3aaf2ba8ca9e27b47f8bfdff07c8b968f2c05 (patch) | |
tree | 42de574167e590ca2e413314b6486df916cd1f31 /net/rds/rdma.c | |
parent | d0ab25a83c4a08cd98b73a37d3f4c069f7b4f50b (diff) |
RDS: Remove struct rds_rdma_op
A big changeset, but it's all pretty dumb.
struct rds_rdma_op was already embedded in struct rm_rdma_op.
Remove rds_rdma_op and put its members in rm_rdma_op. Rename
members with "op_" prefix instead of "r_", for consistency.
Of course this breaks a lot, so fixup the code accordingly.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/rdma.c')
-rw-r--r-- | net/rds/rdma.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 0df86a382e2e..8d22999b0471 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -440,26 +440,26 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) | |||
440 | rds_mr_put(mr); | 440 | rds_mr_put(mr); |
441 | } | 441 | } |
442 | 442 | ||
443 | void rds_rdma_free_op(struct rds_rdma_op *ro) | 443 | void rds_rdma_free_op(struct rm_rdma_op *ro) |
444 | { | 444 | { |
445 | unsigned int i; | 445 | unsigned int i; |
446 | 446 | ||
447 | for (i = 0; i < ro->r_nents; i++) { | 447 | for (i = 0; i < ro->op_nents; i++) { |
448 | struct page *page = sg_page(&ro->r_sg[i]); | 448 | struct page *page = sg_page(&ro->op_sg[i]); |
449 | 449 | ||
450 | /* Mark page dirty if it was possibly modified, which | 450 | /* Mark page dirty if it was possibly modified, which |
451 | * is the case for a RDMA_READ which copies from remote | 451 | * is the case for a RDMA_READ which copies from remote |
452 | * to local memory */ | 452 | * to local memory */ |
453 | if (!ro->r_write) { | 453 | if (!ro->op_write) { |
454 | BUG_ON(irqs_disabled()); | 454 | BUG_ON(irqs_disabled()); |
455 | set_page_dirty(page); | 455 | set_page_dirty(page); |
456 | } | 456 | } |
457 | put_page(page); | 457 | put_page(page); |
458 | } | 458 | } |
459 | 459 | ||
460 | kfree(ro->r_notifier); | 460 | kfree(ro->op_notifier); |
461 | ro->r_notifier = NULL; | 461 | ro->op_notifier = NULL; |
462 | ro->r_active = 0; | 462 | ro->op_active = 0; |
463 | } | 463 | } |
464 | 464 | ||
465 | void rds_atomic_free_op(struct rm_atomic_op *ao) | 465 | void rds_atomic_free_op(struct rm_atomic_op *ao) |
@@ -521,7 +521,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
521 | { | 521 | { |
522 | struct rds_rdma_args *args; | 522 | struct rds_rdma_args *args; |
523 | struct rds_iovec vec; | 523 | struct rds_iovec vec; |
524 | struct rds_rdma_op *op = &rm->rdma.m_rdma_op; | 524 | struct rm_rdma_op *op = &rm->rdma; |
525 | unsigned int nr_pages; | 525 | unsigned int nr_pages; |
526 | unsigned int nr_bytes; | 526 | unsigned int nr_bytes; |
527 | struct page **pages = NULL; | 527 | struct page **pages = NULL; |
@@ -531,7 +531,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
531 | int ret = 0; | 531 | int ret = 0; |
532 | 532 | ||
533 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) | 533 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) |
534 | || rm->rdma.m_rdma_op.r_active) | 534 | || rm->rdma.op_active) |
535 | return -EINVAL; | 535 | return -EINVAL; |
536 | 536 | ||
537 | args = CMSG_DATA(cmsg); | 537 | args = CMSG_DATA(cmsg); |
@@ -556,27 +556,27 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
556 | goto out; | 556 | goto out; |
557 | } | 557 | } |
558 | 558 | ||
559 | op->r_write = !!(args->flags & RDS_RDMA_READWRITE); | 559 | op->op_write = !!(args->flags & RDS_RDMA_READWRITE); |
560 | op->r_fence = !!(args->flags & RDS_RDMA_FENCE); | 560 | op->op_fence = !!(args->flags & RDS_RDMA_FENCE); |
561 | op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); | 561 | op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); |
562 | op->r_active = 1; | 562 | op->op_active = 1; |
563 | op->r_recverr = rs->rs_recverr; | 563 | op->op_recverr = rs->rs_recverr; |
564 | WARN_ON(!nr_pages); | 564 | WARN_ON(!nr_pages); |
565 | op->r_sg = rds_message_alloc_sgs(rm, nr_pages); | 565 | op->op_sg = rds_message_alloc_sgs(rm, nr_pages); |
566 | 566 | ||
567 | if (op->r_notify || op->r_recverr) { | 567 | if (op->op_notify || op->op_recverr) { |
568 | /* We allocate an uninitialized notifier here, because | 568 | /* We allocate an uninitialized notifier here, because |
569 | * we don't want to do that in the completion handler. We | 569 | * we don't want to do that in the completion handler. We |
570 | * would have to use GFP_ATOMIC there, and don't want to deal | 570 | * would have to use GFP_ATOMIC there, and don't want to deal |
571 | * with failed allocations. | 571 | * with failed allocations. |
572 | */ | 572 | */ |
573 | op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); | 573 | op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); |
574 | if (!op->r_notifier) { | 574 | if (!op->op_notifier) { |
575 | ret = -ENOMEM; | 575 | ret = -ENOMEM; |
576 | goto out; | 576 | goto out; |
577 | } | 577 | } |
578 | op->r_notifier->n_user_token = args->user_token; | 578 | op->op_notifier->n_user_token = args->user_token; |
579 | op->r_notifier->n_status = RDS_RDMA_SUCCESS; | 579 | op->op_notifier->n_status = RDS_RDMA_SUCCESS; |
580 | } | 580 | } |
581 | 581 | ||
582 | /* The cookie contains the R_Key of the remote memory region, and | 582 | /* The cookie contains the R_Key of the remote memory region, and |
@@ -586,15 +586,15 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
586 | * destination address (which is really an offset into the MR) | 586 | * destination address (which is really an offset into the MR) |
587 | * FIXME: We may want to move this into ib_rdma.c | 587 | * FIXME: We may want to move this into ib_rdma.c |
588 | */ | 588 | */ |
589 | op->r_key = rds_rdma_cookie_key(args->cookie); | 589 | op->op_rkey = rds_rdma_cookie_key(args->cookie); |
590 | op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); | 590 | op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); |
591 | 591 | ||
592 | nr_bytes = 0; | 592 | nr_bytes = 0; |
593 | 593 | ||
594 | rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", | 594 | rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", |
595 | (unsigned long long)args->nr_local, | 595 | (unsigned long long)args->nr_local, |
596 | (unsigned long long)args->remote_vec.addr, | 596 | (unsigned long long)args->remote_vec.addr, |
597 | op->r_key); | 597 | op->op_rkey); |
598 | 598 | ||
599 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; | 599 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; |
600 | 600 | ||
@@ -617,7 +617,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
617 | /* If it's a WRITE operation, we want to pin the pages for reading. | 617 | /* If it's a WRITE operation, we want to pin the pages for reading. |
618 | * If it's a READ operation, we need to pin the pages for writing. | 618 | * If it's a READ operation, we need to pin the pages for writing. |
619 | */ | 619 | */ |
620 | ret = rds_pin_pages(vec.addr, nr, pages, !op->r_write); | 620 | ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write); |
621 | if (ret < 0) | 621 | if (ret < 0) |
622 | goto out; | 622 | goto out; |
623 | 623 | ||
@@ -630,7 +630,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
630 | unsigned int offset = vec.addr & ~PAGE_MASK; | 630 | unsigned int offset = vec.addr & ~PAGE_MASK; |
631 | struct scatterlist *sg; | 631 | struct scatterlist *sg; |
632 | 632 | ||
633 | sg = &op->r_sg[op->r_nents + j]; | 633 | sg = &op->op_sg[op->op_nents + j]; |
634 | sg_set_page(sg, pages[j], | 634 | sg_set_page(sg, pages[j], |
635 | min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), | 635 | min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), |
636 | offset); | 636 | offset); |
@@ -642,7 +642,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
642 | vec.bytes -= sg->length; | 642 | vec.bytes -= sg->length; |
643 | } | 643 | } |
644 | 644 | ||
645 | op->r_nents += nr; | 645 | op->op_nents += nr; |
646 | } | 646 | } |
647 | 647 | ||
648 | if (nr_bytes > args->remote_vec.bytes) { | 648 | if (nr_bytes > args->remote_vec.bytes) { |
@@ -652,7 +652,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
652 | ret = -EINVAL; | 652 | ret = -EINVAL; |
653 | goto out; | 653 | goto out; |
654 | } | 654 | } |
655 | op->r_bytes = nr_bytes; | 655 | op->op_bytes = nr_bytes; |
656 | 656 | ||
657 | ret = 0; | 657 | ret = 0; |
658 | out: | 658 | out: |
@@ -700,7 +700,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, | |||
700 | 700 | ||
701 | if (mr) { | 701 | if (mr) { |
702 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); | 702 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); |
703 | rm->rdma.m_rdma_mr = mr; | 703 | rm->rdma.op_rdma_mr = mr; |
704 | } | 704 | } |
705 | return err; | 705 | return err; |
706 | } | 706 | } |
@@ -718,7 +718,7 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, | |||
718 | rm->m_rdma_cookie != 0) | 718 | rm->m_rdma_cookie != 0) |
719 | return -EINVAL; | 719 | return -EINVAL; |
720 | 720 | ||
721 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.m_rdma_mr); | 721 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); |
722 | } | 722 | } |
723 | 723 | ||
724 | /* | 724 | /* |