aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_req.c
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2011-01-19 07:12:45 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-08-29 05:27:03 -0400
commit89e58e755e37137135c28a90c93be1b28faff485 (patch)
treed451a7184a8d48b4f8096417375fb618968e505f /drivers/block/drbd/drbd_req.c
parent2111438b30a509cfe8a1595d7fad304308ff2466 (diff)
drbd: moved net_conf from mdev to tconn
Besides moving the struct member, everything else is generated by: sed -i -e 's/mdev->net_conf/mdev->tconn->net_conf/g' \ -e 's/odev->net_conf/odev->tconn->net_conf/g' \ *.[ch] Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r--drivers/block/drbd/drbd_req.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index b3b1d4edbb03..2b2662d4ab3c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -528,7 +528,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
528 drbd_queue_work(&mdev->data.work, &req->w); 528 drbd_queue_work(&mdev->data.work, &req->w);
529 529
530 /* close the epoch, in case it outgrew the limit */ 530 /* close the epoch, in case it outgrew the limit */
531 if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size) 531 if (mdev->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
532 queue_barrier(mdev); 532 queue_barrier(mdev);
533 533
534 break; 534 break;
@@ -558,7 +558,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
558 atomic_add(req->i.size >> 9, &mdev->ap_in_flight); 558 atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
559 559
560 if (bio_data_dir(req->master_bio) == WRITE && 560 if (bio_data_dir(req->master_bio) == WRITE &&
561 mdev->net_conf->wire_protocol == DRBD_PROT_A) { 561 mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A) {
562 /* this is what is dangerous about protocol A: 562 /* this is what is dangerous about protocol A:
563 * pretend it was successfully written on the peer. */ 563 * pretend it was successfully written on the peer. */
564 if (req->rq_state & RQ_NET_PENDING) { 564 if (req->rq_state & RQ_NET_PENDING) {
@@ -697,8 +697,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
697 } 697 }
698 if ((req->rq_state & RQ_NET_MASK) != 0) { 698 if ((req->rq_state & RQ_NET_MASK) != 0) {
699 req->rq_state |= RQ_NET_DONE; 699 req->rq_state |= RQ_NET_DONE;
700 if (mdev->net_conf->wire_protocol == DRBD_PROT_A) 700 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A)
701 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); 701 atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
702 } 702 }
703 _req_may_be_done(req, m); /* Allowed while state.susp */ 703 _req_may_be_done(req, m); /* Allowed while state.susp */
704 break; 704 break;
@@ -951,16 +951,16 @@ allocate_barrier:
951 _req_mod(req, QUEUE_FOR_SEND_OOS); 951 _req_mod(req, QUEUE_FOR_SEND_OOS);
952 952
953 if (remote && 953 if (remote &&
954 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { 954 mdev->tconn->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
955 int congested = 0; 955 int congested = 0;
956 956
957 if (mdev->net_conf->cong_fill && 957 if (mdev->tconn->net_conf->cong_fill &&
958 atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { 958 atomic_read(&mdev->ap_in_flight) >= mdev->tconn->net_conf->cong_fill) {
959 dev_info(DEV, "Congestion-fill threshold reached\n"); 959 dev_info(DEV, "Congestion-fill threshold reached\n");
960 congested = 1; 960 congested = 1;
961 } 961 }
962 962
963 if (mdev->act_log->used >= mdev->net_conf->cong_extents) { 963 if (mdev->act_log->used >= mdev->tconn->net_conf->cong_extents) {
964 dev_info(DEV, "Congestion-extents threshold reached\n"); 964 dev_info(DEV, "Congestion-extents threshold reached\n");
965 congested = 1; 965 congested = 1;
966 } 966 }
@@ -968,9 +968,9 @@ allocate_barrier:
968 if (congested) { 968 if (congested) {
969 queue_barrier(mdev); /* last barrier, after mirrored writes */ 969 queue_barrier(mdev); /* last barrier, after mirrored writes */
970 970
971 if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) 971 if (mdev->tconn->net_conf->on_congestion == OC_PULL_AHEAD)
972 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); 972 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
973 else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ 973 else /*mdev->tconn->net_conf->on_congestion == OC_DISCONNECT */
974 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); 974 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
975 } 975 }
976 } 976 }
@@ -1182,7 +1182,7 @@ void request_timer_fn(unsigned long data)
1182 unsigned long et = 0; /* effective timeout = ko_count * timeout */ 1182 unsigned long et = 0; /* effective timeout = ko_count * timeout */
1183 1183
1184 if (get_net_conf(mdev)) { 1184 if (get_net_conf(mdev)) {
1185 et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count; 1185 et = mdev->tconn->net_conf->timeout*HZ/10 * mdev->tconn->net_conf->ko_count;
1186 put_net_conf(mdev); 1186 put_net_conf(mdev);
1187 } 1187 }
1188 if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) 1188 if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)