diff options
author | Andreas Gruenbacher <agruen@linbit.com> | 2011-01-25 09:37:43 -0500 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2011-08-29 05:26:55 -0400 |
commit | 8554df1c6d3bb7686b39ed775772f507fa857c19 (patch) | |
tree | 50a85c4313859c6104718c7c680ac33a2584a835 /drivers/block/drbd/drbd_req.c | |
parent | bb3bfe96144a4535d47ccfea444bc1ef8e02f4e3 (diff) |
drbd: Convert all constants in enum drbd_req_event to upper case
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8541b16de08b..b3b1d4edbb03 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -225,10 +225,10 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) | |||
225 | return; | 225 | return; |
226 | 226 | ||
227 | if (req->master_bio) { | 227 | if (req->master_bio) { |
228 | /* this is data_received (remote read) | 228 | /* this is DATA_RECEIVED (remote read) |
229 | * or protocol C P_WRITE_ACK | 229 | * or protocol C P_WRITE_ACK |
230 | * or protocol B P_RECV_ACK | 230 | * or protocol B P_RECV_ACK |
231 | * or protocol A "handed_over_to_network" (SendAck) | 231 | * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck) |
232 | * or canceled or failed, | 232 | * or canceled or failed, |
233 | * or killed from the transfer log due to connection loss. | 233 | * or killed from the transfer log due to connection loss. |
234 | */ | 234 | */ |
@@ -393,11 +393,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
393 | 393 | ||
394 | /* does not happen... | 394 | /* does not happen... |
395 | * initialization done in drbd_req_new | 395 | * initialization done in drbd_req_new |
396 | case created: | 396 | case CREATED: |
397 | break; | 397 | break; |
398 | */ | 398 | */ |
399 | 399 | ||
400 | case to_be_send: /* via network */ | 400 | case TO_BE_SENT: /* via network */ |
401 | /* reached via drbd_make_request_common | 401 | /* reached via drbd_make_request_common |
402 | * and from w_read_retry_remote */ | 402 | * and from w_read_retry_remote */ |
403 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | 403 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
@@ -405,13 +405,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
405 | inc_ap_pending(mdev); | 405 | inc_ap_pending(mdev); |
406 | break; | 406 | break; |
407 | 407 | ||
408 | case to_be_submitted: /* locally */ | 408 | case TO_BE_SUBMITTED: /* locally */ |
409 | /* reached via drbd_make_request_common */ | 409 | /* reached via drbd_make_request_common */ |
410 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); | 410 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); |
411 | req->rq_state |= RQ_LOCAL_PENDING; | 411 | req->rq_state |= RQ_LOCAL_PENDING; |
412 | break; | 412 | break; |
413 | 413 | ||
414 | case completed_ok: | 414 | case COMPLETED_OK: |
415 | if (bio_data_dir(req->master_bio) == WRITE) | 415 | if (bio_data_dir(req->master_bio) == WRITE) |
416 | mdev->writ_cnt += req->i.size >> 9; | 416 | mdev->writ_cnt += req->i.size >> 9; |
417 | else | 417 | else |
@@ -424,7 +424,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
424 | put_ldev(mdev); | 424 | put_ldev(mdev); |
425 | break; | 425 | break; |
426 | 426 | ||
427 | case write_completed_with_error: | 427 | case WRITE_COMPLETED_WITH_ERROR: |
428 | req->rq_state |= RQ_LOCAL_COMPLETED; | 428 | req->rq_state |= RQ_LOCAL_COMPLETED; |
429 | req->rq_state &= ~RQ_LOCAL_PENDING; | 429 | req->rq_state &= ~RQ_LOCAL_PENDING; |
430 | 430 | ||
@@ -433,7 +433,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
433 | put_ldev(mdev); | 433 | put_ldev(mdev); |
434 | break; | 434 | break; |
435 | 435 | ||
436 | case read_ahead_completed_with_error: | 436 | case READ_AHEAD_COMPLETED_WITH_ERROR: |
437 | /* it is legal to fail READA */ | 437 | /* it is legal to fail READA */ |
438 | req->rq_state |= RQ_LOCAL_COMPLETED; | 438 | req->rq_state |= RQ_LOCAL_COMPLETED; |
439 | req->rq_state &= ~RQ_LOCAL_PENDING; | 439 | req->rq_state &= ~RQ_LOCAL_PENDING; |
@@ -441,7 +441,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
441 | put_ldev(mdev); | 441 | put_ldev(mdev); |
442 | break; | 442 | break; |
443 | 443 | ||
444 | case read_completed_with_error: | 444 | case READ_COMPLETED_WITH_ERROR: |
445 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); | 445 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
446 | 446 | ||
447 | req->rq_state |= RQ_LOCAL_COMPLETED; | 447 | req->rq_state |= RQ_LOCAL_COMPLETED; |
@@ -459,12 +459,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
459 | break; | 459 | break; |
460 | } | 460 | } |
461 | 461 | ||
462 | /* _req_mod(req,to_be_send); oops, recursion... */ | 462 | /* _req_mod(req,TO_BE_SENT); oops, recursion... */ |
463 | req->rq_state |= RQ_NET_PENDING; | 463 | req->rq_state |= RQ_NET_PENDING; |
464 | inc_ap_pending(mdev); | 464 | inc_ap_pending(mdev); |
465 | /* fall through: _req_mod(req,queue_for_net_read); */ | 465 | /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */ |
466 | 466 | ||
467 | case queue_for_net_read: | 467 | case QUEUE_FOR_NET_READ: |
468 | /* READ or READA, and | 468 | /* READ or READA, and |
469 | * no local disk, | 469 | * no local disk, |
470 | * or target area marked as invalid, | 470 | * or target area marked as invalid, |
@@ -486,7 +486,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
486 | drbd_queue_work(&mdev->data.work, &req->w); | 486 | drbd_queue_work(&mdev->data.work, &req->w); |
487 | break; | 487 | break; |
488 | 488 | ||
489 | case queue_for_net_write: | 489 | case QUEUE_FOR_NET_WRITE: |
490 | /* assert something? */ | 490 | /* assert something? */ |
491 | /* from drbd_make_request_common only */ | 491 | /* from drbd_make_request_common only */ |
492 | 492 | ||
@@ -533,17 +533,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
533 | 533 | ||
534 | break; | 534 | break; |
535 | 535 | ||
536 | case queue_for_send_oos: | 536 | case QUEUE_FOR_SEND_OOS: |
537 | req->rq_state |= RQ_NET_QUEUED; | 537 | req->rq_state |= RQ_NET_QUEUED; |
538 | req->w.cb = w_send_oos; | 538 | req->w.cb = w_send_oos; |
539 | drbd_queue_work(&mdev->data.work, &req->w); | 539 | drbd_queue_work(&mdev->data.work, &req->w); |
540 | break; | 540 | break; |
541 | 541 | ||
542 | case oos_handed_to_network: | 542 | case OOS_HANDED_TO_NETWORK: |
543 | /* actually the same */ | 543 | /* actually the same */ |
544 | case send_canceled: | 544 | case SEND_CANCELED: |
545 | /* treat it the same */ | 545 | /* treat it the same */ |
546 | case send_failed: | 546 | case SEND_FAILED: |
547 | /* real cleanup will be done from tl_clear. just update flags | 547 | /* real cleanup will be done from tl_clear. just update flags |
548 | * so it is no longer marked as on the worker queue */ | 548 | * so it is no longer marked as on the worker queue */ |
549 | req->rq_state &= ~RQ_NET_QUEUED; | 549 | req->rq_state &= ~RQ_NET_QUEUED; |
@@ -552,7 +552,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
552 | _req_may_be_done_not_susp(req, m); | 552 | _req_may_be_done_not_susp(req, m); |
553 | break; | 553 | break; |
554 | 554 | ||
555 | case handed_over_to_network: | 555 | case HANDED_OVER_TO_NETWORK: |
556 | /* assert something? */ | 556 | /* assert something? */ |
557 | if (bio_data_dir(req->master_bio) == WRITE) | 557 | if (bio_data_dir(req->master_bio) == WRITE) |
558 | atomic_add(req->i.size >> 9, &mdev->ap_in_flight); | 558 | atomic_add(req->i.size >> 9, &mdev->ap_in_flight); |
@@ -573,17 +573,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
573 | req->rq_state &= ~RQ_NET_QUEUED; | 573 | req->rq_state &= ~RQ_NET_QUEUED; |
574 | req->rq_state |= RQ_NET_SENT; | 574 | req->rq_state |= RQ_NET_SENT; |
575 | /* because _drbd_send_zc_bio could sleep, and may want to | 575 | /* because _drbd_send_zc_bio could sleep, and may want to |
576 | * dereference the bio even after the "write_acked_by_peer" and | 576 | * dereference the bio even after the "WRITE_ACKED_BY_PEER" and |
577 | * "completed_ok" events came in, once we return from | 577 | * "COMPLETED_OK" events came in, once we return from |
578 | * _drbd_send_zc_bio (drbd_send_dblock), we have to check | 578 | * _drbd_send_zc_bio (drbd_send_dblock), we have to check |
579 | * whether it is done already, and end it. */ | 579 | * whether it is done already, and end it. */ |
580 | _req_may_be_done_not_susp(req, m); | 580 | _req_may_be_done_not_susp(req, m); |
581 | break; | 581 | break; |
582 | 582 | ||
583 | case read_retry_remote_canceled: | 583 | case READ_RETRY_REMOTE_CANCELED: |
584 | req->rq_state &= ~RQ_NET_QUEUED; | 584 | req->rq_state &= ~RQ_NET_QUEUED; |
585 | /* fall through, in case we raced with drbd_disconnect */ | 585 | /* fall through, in case we raced with drbd_disconnect */ |
586 | case connection_lost_while_pending: | 586 | case CONNECTION_LOST_WHILE_PENDING: |
587 | /* transfer log cleanup after connection loss */ | 587 | /* transfer log cleanup after connection loss */ |
588 | /* assert something? */ | 588 | /* assert something? */ |
589 | if (req->rq_state & RQ_NET_PENDING) | 589 | if (req->rq_state & RQ_NET_PENDING) |
@@ -599,19 +599,19 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
599 | _req_may_be_done(req, m); /* Allowed while state.susp */ | 599 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
600 | break; | 600 | break; |
601 | 601 | ||
602 | case write_acked_by_peer_and_sis: | 602 | case WRITE_ACKED_BY_PEER_AND_SIS: |
603 | req->rq_state |= RQ_NET_SIS; | 603 | req->rq_state |= RQ_NET_SIS; |
604 | case conflict_discarded_by_peer: | 604 | case CONFLICT_DISCARDED_BY_PEER: |
605 | /* for discarded conflicting writes of multiple primaries, | 605 | /* for discarded conflicting writes of multiple primaries, |
606 | * there is no need to keep anything in the tl, potential | 606 | * there is no need to keep anything in the tl, potential |
607 | * node crashes are covered by the activity log. */ | 607 | * node crashes are covered by the activity log. */ |
608 | if (what == conflict_discarded_by_peer) | 608 | if (what == CONFLICT_DISCARDED_BY_PEER) |
609 | dev_alert(DEV, "Got DiscardAck packet %llus +%u!" | 609 | dev_alert(DEV, "Got DiscardAck packet %llus +%u!" |
610 | " DRBD is not a random data generator!\n", | 610 | " DRBD is not a random data generator!\n", |
611 | (unsigned long long)req->i.sector, req->i.size); | 611 | (unsigned long long)req->i.sector, req->i.size); |
612 | req->rq_state |= RQ_NET_DONE; | 612 | req->rq_state |= RQ_NET_DONE; |
613 | /* fall through */ | 613 | /* fall through */ |
614 | case write_acked_by_peer: | 614 | case WRITE_ACKED_BY_PEER: |
615 | /* protocol C; successfully written on peer. | 615 | /* protocol C; successfully written on peer. |
616 | * Nothing to do here. | 616 | * Nothing to do here. |
617 | * We want to keep the tl in place for all protocols, to cater | 617 | * We want to keep the tl in place for all protocols, to cater |
@@ -623,9 +623,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
623 | * P_BARRIER_ACK, but that is an unnecessary optimization. */ | 623 | * P_BARRIER_ACK, but that is an unnecessary optimization. */ |
624 | 624 | ||
625 | /* this makes it effectively the same as for: */ | 625 | /* this makes it effectively the same as for: */ |
626 | case recv_acked_by_peer: | 626 | case RECV_ACKED_BY_PEER: |
627 | /* protocol B; pretends to be successfully written on peer. | 627 | /* protocol B; pretends to be successfully written on peer. |
628 | * see also notes above in handed_over_to_network about | 628 | * see also notes above in HANDED_OVER_TO_NETWORK about |
629 | * protocol != C */ | 629 | * protocol != C */ |
630 | req->rq_state |= RQ_NET_OK; | 630 | req->rq_state |= RQ_NET_OK; |
631 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 631 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
@@ -635,7 +635,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
635 | _req_may_be_done_not_susp(req, m); | 635 | _req_may_be_done_not_susp(req, m); |
636 | break; | 636 | break; |
637 | 637 | ||
638 | case neg_acked: | 638 | case NEG_ACKED: |
639 | /* assert something? */ | 639 | /* assert something? */ |
640 | if (req->rq_state & RQ_NET_PENDING) { | 640 | if (req->rq_state & RQ_NET_PENDING) { |
641 | dec_ap_pending(mdev); | 641 | dec_ap_pending(mdev); |
@@ -645,17 +645,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
645 | 645 | ||
646 | req->rq_state |= RQ_NET_DONE; | 646 | req->rq_state |= RQ_NET_DONE; |
647 | _req_may_be_done_not_susp(req, m); | 647 | _req_may_be_done_not_susp(req, m); |
648 | /* else: done by handed_over_to_network */ | 648 | /* else: done by HANDED_OVER_TO_NETWORK */ |
649 | break; | 649 | break; |
650 | 650 | ||
651 | case fail_frozen_disk_io: | 651 | case FAIL_FROZEN_DISK_IO: |
652 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) | 652 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
653 | break; | 653 | break; |
654 | 654 | ||
655 | _req_may_be_done(req, m); /* Allowed while state.susp */ | 655 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
656 | break; | 656 | break; |
657 | 657 | ||
658 | case restart_frozen_disk_io: | 658 | case RESTART_FROZEN_DISK_IO: |
659 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) | 659 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
660 | break; | 660 | break; |
661 | 661 | ||
@@ -670,7 +670,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
670 | drbd_queue_work(&mdev->data.work, &req->w); | 670 | drbd_queue_work(&mdev->data.work, &req->w); |
671 | break; | 671 | break; |
672 | 672 | ||
673 | case resend: | 673 | case RESEND: |
674 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK | 674 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
675 | before the connection loss (B&C only); only P_BARRIER_ACK was missing. | 675 | before the connection loss (B&C only); only P_BARRIER_ACK was missing. |
676 | Trowing them out of the TL here by pretending we got a BARRIER_ACK | 676 | Trowing them out of the TL here by pretending we got a BARRIER_ACK |
@@ -682,9 +682,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
682 | } | 682 | } |
683 | break; | 683 | break; |
684 | } | 684 | } |
685 | /* else, fall through to barrier_acked */ | 685 | /* else, fall through to BARRIER_ACKED */ |
686 | 686 | ||
687 | case barrier_acked: | 687 | case BARRIER_ACKED: |
688 | if (!(req->rq_state & RQ_WRITE)) | 688 | if (!(req->rq_state & RQ_WRITE)) |
689 | break; | 689 | break; |
690 | 690 | ||
@@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
692 | /* barrier came in before all requests have been acked. | 692 | /* barrier came in before all requests have been acked. |
693 | * this is bad, because if the connection is lost now, | 693 | * this is bad, because if the connection is lost now, |
694 | * we won't be able to clean them up... */ | 694 | * we won't be able to clean them up... */ |
695 | dev_err(DEV, "FIXME (barrier_acked but pending)\n"); | 695 | dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); |
696 | list_move(&req->tl_requests, &mdev->out_of_sequence_requests); | 696 | list_move(&req->tl_requests, &mdev->out_of_sequence_requests); |
697 | } | 697 | } |
698 | if ((req->rq_state & RQ_NET_MASK) != 0) { | 698 | if ((req->rq_state & RQ_NET_MASK) != 0) { |
@@ -703,7 +703,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
703 | _req_may_be_done(req, m); /* Allowed while state.susp */ | 703 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
704 | break; | 704 | break; |
705 | 705 | ||
706 | case data_received: | 706 | case DATA_RECEIVED: |
707 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 707 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
708 | dec_ap_pending(mdev); | 708 | dec_ap_pending(mdev); |
709 | req->rq_state &= ~RQ_NET_PENDING; | 709 | req->rq_state &= ~RQ_NET_PENDING; |
@@ -924,9 +924,9 @@ allocate_barrier: | |||
924 | /* mark them early for readability. | 924 | /* mark them early for readability. |
925 | * this just sets some state flags. */ | 925 | * this just sets some state flags. */ |
926 | if (remote) | 926 | if (remote) |
927 | _req_mod(req, to_be_send); | 927 | _req_mod(req, TO_BE_SENT); |
928 | if (local) | 928 | if (local) |
929 | _req_mod(req, to_be_submitted); | 929 | _req_mod(req, TO_BE_SUBMITTED); |
930 | 930 | ||
931 | /* check this request on the collision detection hash tables. | 931 | /* check this request on the collision detection hash tables. |
932 | * if we have a conflict, just complete it here. | 932 | * if we have a conflict, just complete it here. |
@@ -944,11 +944,11 @@ allocate_barrier: | |||
944 | * or READ, but not in sync. | 944 | * or READ, but not in sync. |
945 | */ | 945 | */ |
946 | _req_mod(req, (rw == WRITE) | 946 | _req_mod(req, (rw == WRITE) |
947 | ? queue_for_net_write | 947 | ? QUEUE_FOR_NET_WRITE |
948 | : queue_for_net_read); | 948 | : QUEUE_FOR_NET_READ); |
949 | } | 949 | } |
950 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) | 950 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) |
951 | _req_mod(req, queue_for_send_oos); | 951 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
952 | 952 | ||
953 | if (remote && | 953 | if (remote && |
954 | mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { | 954 | mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { |