diff options
Diffstat (limited to 'drivers/block/rsxx/dma.c')
-rw-r--r-- | drivers/block/rsxx/dma.c | 185 |
1 files changed, 96 insertions, 89 deletions
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 0607513cfb41..bed32f16b084 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | |||
245 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
246 | } | 246 | } |
247 | 247 | ||
248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | ||
249 | struct list_head *q) | ||
250 | { | ||
251 | struct rsxx_dma *dma; | ||
252 | struct rsxx_dma *tmp; | ||
253 | int cnt = 0; | ||
254 | |||
255 | list_for_each_entry_safe(dma, tmp, q, list) { | ||
256 | list_del(&dma->list); | ||
257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
258 | cnt++; | ||
259 | } | ||
260 | |||
261 | return cnt; | ||
262 | } | ||
263 | |||
248 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | 264 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
249 | struct rsxx_dma *dma) | 265 | struct rsxx_dma *dma) |
250 | { | 266 | { |
@@ -252,9 +268,10 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | |||
252 | * Requeued DMAs go to the front of the queue so they are issued | 268 | * Requeued DMAs go to the front of the queue so they are issued |
253 | * first. | 269 | * first. |
254 | */ | 270 | */ |
255 | spin_lock(&ctrl->queue_lock); | 271 | spin_lock_bh(&ctrl->queue_lock); |
272 | ctrl->stats.sw_q_depth++; | ||
256 | list_add(&dma->list, &ctrl->queue); | 273 | list_add(&dma->list, &ctrl->queue); |
257 | spin_unlock(&ctrl->queue_lock); | 274 | spin_unlock_bh(&ctrl->queue_lock); |
258 | } | 275 | } |
259 | 276 | ||
260 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | 277 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, |
@@ -329,6 +346,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
329 | static void dma_engine_stalled(unsigned long data) | 346 | static void dma_engine_stalled(unsigned long data) |
330 | { | 347 | { |
331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 348 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
349 | int cnt; | ||
332 | 350 | ||
333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || | 351 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
334 | unlikely(ctrl->card->eeh_state)) | 352 | unlikely(ctrl->card->eeh_state)) |
@@ -349,18 +367,28 @@ static void dma_engine_stalled(unsigned long data) | |||
349 | "DMA channel %d has stalled, faulting interface.\n", | 367 | "DMA channel %d has stalled, faulting interface.\n", |
350 | ctrl->id); | 368 | ctrl->id); |
351 | ctrl->card->dma_fault = 1; | 369 | ctrl->card->dma_fault = 1; |
370 | |||
371 | /* Clean up the DMA queue */ | ||
372 | spin_lock(&ctrl->queue_lock); | ||
373 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | ||
374 | spin_unlock(&ctrl->queue_lock); | ||
375 | |||
376 | cnt += rsxx_dma_cancel(ctrl); | ||
377 | |||
378 | if (cnt) | ||
379 | dev_info(CARD_TO_DEV(ctrl->card), | ||
380 | "Freed %d queued DMAs on channel %d\n", | ||
381 | cnt, ctrl->id); | ||
352 | } | 382 | } |
353 | } | 383 | } |
354 | 384 | ||
355 | static void rsxx_issue_dmas(struct work_struct *work) | 385 | static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) |
356 | { | 386 | { |
357 | struct rsxx_dma_ctrl *ctrl; | ||
358 | struct rsxx_dma *dma; | 387 | struct rsxx_dma *dma; |
359 | int tag; | 388 | int tag; |
360 | int cmds_pending = 0; | 389 | int cmds_pending = 0; |
361 | struct hw_cmd *hw_cmd_buf; | 390 | struct hw_cmd *hw_cmd_buf; |
362 | 391 | ||
363 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | ||
364 | hw_cmd_buf = ctrl->cmd.buf; | 392 | hw_cmd_buf = ctrl->cmd.buf; |
365 | 393 | ||
366 | if (unlikely(ctrl->card->halt) || | 394 | if (unlikely(ctrl->card->halt) || |
@@ -368,22 +396,22 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
368 | return; | 396 | return; |
369 | 397 | ||
370 | while (1) { | 398 | while (1) { |
371 | spin_lock(&ctrl->queue_lock); | 399 | spin_lock_bh(&ctrl->queue_lock); |
372 | if (list_empty(&ctrl->queue)) { | 400 | if (list_empty(&ctrl->queue)) { |
373 | spin_unlock(&ctrl->queue_lock); | 401 | spin_unlock_bh(&ctrl->queue_lock); |
374 | break; | 402 | break; |
375 | } | 403 | } |
376 | spin_unlock(&ctrl->queue_lock); | 404 | spin_unlock_bh(&ctrl->queue_lock); |
377 | 405 | ||
378 | tag = pop_tracker(ctrl->trackers); | 406 | tag = pop_tracker(ctrl->trackers); |
379 | if (tag == -1) | 407 | if (tag == -1) |
380 | break; | 408 | break; |
381 | 409 | ||
382 | spin_lock(&ctrl->queue_lock); | 410 | spin_lock_bh(&ctrl->queue_lock); |
383 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); | 411 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
384 | list_del(&dma->list); | 412 | list_del(&dma->list); |
385 | ctrl->stats.sw_q_depth--; | 413 | ctrl->stats.sw_q_depth--; |
386 | spin_unlock(&ctrl->queue_lock); | 414 | spin_unlock_bh(&ctrl->queue_lock); |
387 | 415 | ||
388 | /* | 416 | /* |
389 | * This will catch any DMAs that slipped in right before the | 417 | * This will catch any DMAs that slipped in right before the |
@@ -440,9 +468,8 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
440 | } | 468 | } |
441 | } | 469 | } |
442 | 470 | ||
443 | static void rsxx_dma_done(struct work_struct *work) | 471 | static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) |
444 | { | 472 | { |
445 | struct rsxx_dma_ctrl *ctrl; | ||
446 | struct rsxx_dma *dma; | 473 | struct rsxx_dma *dma; |
447 | unsigned long flags; | 474 | unsigned long flags; |
448 | u16 count; | 475 | u16 count; |
@@ -450,7 +477,6 @@ static void rsxx_dma_done(struct work_struct *work) | |||
450 | u8 tag; | 477 | u8 tag; |
451 | struct hw_status *hw_st_buf; | 478 | struct hw_status *hw_st_buf; |
452 | 479 | ||
453 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | ||
454 | hw_st_buf = ctrl->status.buf; | 480 | hw_st_buf = ctrl->status.buf; |
455 | 481 | ||
456 | if (unlikely(ctrl->card->halt) || | 482 | if (unlikely(ctrl->card->halt) || |
@@ -520,33 +546,32 @@ static void rsxx_dma_done(struct work_struct *work) | |||
520 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | 546 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); |
521 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | 547 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); |
522 | 548 | ||
523 | spin_lock(&ctrl->queue_lock); | 549 | spin_lock_bh(&ctrl->queue_lock); |
524 | if (ctrl->stats.sw_q_depth) | 550 | if (ctrl->stats.sw_q_depth) |
525 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | 551 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); |
526 | spin_unlock(&ctrl->queue_lock); | 552 | spin_unlock_bh(&ctrl->queue_lock); |
527 | } | 553 | } |
528 | 554 | ||
529 | static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, | 555 | static void rsxx_schedule_issue(struct work_struct *work) |
530 | struct list_head *q) | ||
531 | { | 556 | { |
532 | struct rsxx_dma *dma; | 557 | struct rsxx_dma_ctrl *ctrl; |
533 | struct rsxx_dma *tmp; | ||
534 | int cnt = 0; | ||
535 | 558 | ||
536 | list_for_each_entry_safe(dma, tmp, q, list) { | 559 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); |
537 | list_del(&dma->list); | ||
538 | 560 | ||
539 | if (dma->dma_addr) | 561 | mutex_lock(&ctrl->work_lock); |
540 | pci_unmap_page(card->dev, dma->dma_addr, | 562 | rsxx_issue_dmas(ctrl); |
541 | get_dma_size(dma), | 563 | mutex_unlock(&ctrl->work_lock); |
542 | (dma->cmd == HW_CMD_BLK_WRITE) ? | 564 | } |
543 | PCI_DMA_TODEVICE : | ||
544 | PCI_DMA_FROMDEVICE); | ||
545 | kmem_cache_free(rsxx_dma_pool, dma); | ||
546 | cnt++; | ||
547 | } | ||
548 | 565 | ||
549 | return cnt; | 566 | static void rsxx_schedule_done(struct work_struct *work) |
567 | { | ||
568 | struct rsxx_dma_ctrl *ctrl; | ||
569 | |||
570 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | ||
571 | |||
572 | mutex_lock(&ctrl->work_lock); | ||
573 | rsxx_dma_done(ctrl); | ||
574 | mutex_unlock(&ctrl->work_lock); | ||
550 | } | 575 | } |
551 | 576 | ||
552 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, | 577 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, |
@@ -698,10 +723,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
698 | 723 | ||
699 | for (i = 0; i < card->n_targets; i++) { | 724 | for (i = 0; i < card->n_targets; i++) { |
700 | if (!list_empty(&dma_list[i])) { | 725 | if (!list_empty(&dma_list[i])) { |
701 | spin_lock(&card->ctrl[i].queue_lock); | 726 | spin_lock_bh(&card->ctrl[i].queue_lock); |
702 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; | 727 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
703 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | 728 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); |
704 | spin_unlock(&card->ctrl[i].queue_lock); | 729 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
705 | 730 | ||
706 | queue_work(card->ctrl[i].issue_wq, | 731 | queue_work(card->ctrl[i].issue_wq, |
707 | &card->ctrl[i].issue_dma_work); | 732 | &card->ctrl[i].issue_dma_work); |
@@ -711,8 +736,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
711 | return 0; | 736 | return 0; |
712 | 737 | ||
713 | bvec_err: | 738 | bvec_err: |
714 | for (i = 0; i < card->n_targets; i++) | 739 | for (i = 0; i < card->n_targets; i++) { |
715 | rsxx_cleanup_dma_queue(card, &dma_list[i]); | 740 | spin_lock_bh(&card->ctrl[i].queue_lock); |
741 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | ||
742 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
743 | } | ||
716 | 744 | ||
717 | return st; | 745 | return st; |
718 | } | 746 | } |
@@ -780,6 +808,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
780 | spin_lock_init(&ctrl->trackers->lock); | 808 | spin_lock_init(&ctrl->trackers->lock); |
781 | 809 | ||
782 | spin_lock_init(&ctrl->queue_lock); | 810 | spin_lock_init(&ctrl->queue_lock); |
811 | mutex_init(&ctrl->work_lock); | ||
783 | INIT_LIST_HEAD(&ctrl->queue); | 812 | INIT_LIST_HEAD(&ctrl->queue); |
784 | 813 | ||
785 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, | 814 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, |
@@ -793,8 +822,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
793 | if (!ctrl->done_wq) | 822 | if (!ctrl->done_wq) |
794 | return -ENOMEM; | 823 | return -ENOMEM; |
795 | 824 | ||
796 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | 825 | INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); |
797 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | 826 | INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); |
798 | 827 | ||
799 | st = rsxx_hw_buffers_init(dev, ctrl); | 828 | st = rsxx_hw_buffers_init(dev, ctrl); |
800 | if (st) | 829 | if (st) |
@@ -918,13 +947,30 @@ failed_dma_setup: | |||
918 | return st; | 947 | return st; |
919 | } | 948 | } |
920 | 949 | ||
950 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) | ||
951 | { | ||
952 | struct rsxx_dma *dma; | ||
953 | int i; | ||
954 | int cnt = 0; | ||
955 | |||
956 | /* Clean up issued DMAs */ | ||
957 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | ||
958 | dma = get_tracker_dma(ctrl->trackers, i); | ||
959 | if (dma) { | ||
960 | atomic_dec(&ctrl->stats.hw_q_depth); | ||
961 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
962 | push_tracker(ctrl->trackers, i); | ||
963 | cnt++; | ||
964 | } | ||
965 | } | ||
966 | |||
967 | return cnt; | ||
968 | } | ||
921 | 969 | ||
922 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | 970 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) |
923 | { | 971 | { |
924 | struct rsxx_dma_ctrl *ctrl; | 972 | struct rsxx_dma_ctrl *ctrl; |
925 | struct rsxx_dma *dma; | 973 | int i; |
926 | int i, j; | ||
927 | int cnt = 0; | ||
928 | 974 | ||
929 | for (i = 0; i < card->n_targets; i++) { | 975 | for (i = 0; i < card->n_targets; i++) { |
930 | ctrl = &card->ctrl[i]; | 976 | ctrl = &card->ctrl[i]; |
@@ -943,33 +989,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
943 | del_timer_sync(&ctrl->activity_timer); | 989 | del_timer_sync(&ctrl->activity_timer); |
944 | 990 | ||
945 | /* Clean up the DMA queue */ | 991 | /* Clean up the DMA queue */ |
946 | spin_lock(&ctrl->queue_lock); | 992 | spin_lock_bh(&ctrl->queue_lock); |
947 | cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); | 993 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); |
948 | spin_unlock(&ctrl->queue_lock); | 994 | spin_unlock_bh(&ctrl->queue_lock); |
949 | |||
950 | if (cnt) | ||
951 | dev_info(CARD_TO_DEV(card), | ||
952 | "Freed %d queued DMAs on channel %d\n", | ||
953 | cnt, i); | ||
954 | |||
955 | /* Clean up issued DMAs */ | ||
956 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
957 | dma = get_tracker_dma(ctrl->trackers, j); | ||
958 | if (dma) { | ||
959 | pci_unmap_page(card->dev, dma->dma_addr, | ||
960 | get_dma_size(dma), | ||
961 | (dma->cmd == HW_CMD_BLK_WRITE) ? | ||
962 | PCI_DMA_TODEVICE : | ||
963 | PCI_DMA_FROMDEVICE); | ||
964 | kmem_cache_free(rsxx_dma_pool, dma); | ||
965 | cnt++; | ||
966 | } | ||
967 | } | ||
968 | 995 | ||
969 | if (cnt) | 996 | rsxx_dma_cancel(ctrl); |
970 | dev_info(CARD_TO_DEV(card), | ||
971 | "Freed %d pending DMAs on channel %d\n", | ||
972 | cnt, i); | ||
973 | 997 | ||
974 | vfree(ctrl->trackers); | 998 | vfree(ctrl->trackers); |
975 | 999 | ||
@@ -1013,7 +1037,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1013 | cnt++; | 1037 | cnt++; |
1014 | } | 1038 | } |
1015 | 1039 | ||
1016 | spin_lock(&card->ctrl[i].queue_lock); | 1040 | spin_lock_bh(&card->ctrl[i].queue_lock); |
1017 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | 1041 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
1018 | 1042 | ||
1019 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | 1043 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); |
@@ -1028,7 +1052,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1028 | PCI_DMA_TODEVICE : | 1052 | PCI_DMA_TODEVICE : |
1029 | PCI_DMA_FROMDEVICE); | 1053 | PCI_DMA_FROMDEVICE); |
1030 | } | 1054 | } |
1031 | spin_unlock(&card->ctrl[i].queue_lock); | 1055 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1032 | } | 1056 | } |
1033 | 1057 | ||
1034 | kfree(issued_dmas); | 1058 | kfree(issued_dmas); |
@@ -1036,30 +1060,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1036 | return 0; | 1060 | return 0; |
1037 | } | 1061 | } |
1038 | 1062 | ||
1039 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
1040 | { | ||
1041 | struct rsxx_dma *dma; | ||
1042 | struct rsxx_dma *tmp; | ||
1043 | int i; | ||
1044 | |||
1045 | for (i = 0; i < card->n_targets; i++) { | ||
1046 | spin_lock(&card->ctrl[i].queue_lock); | ||
1047 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
1048 | list_del(&dma->list); | ||
1049 | |||
1050 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
1051 | } | ||
1052 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | 1063 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) |
1057 | { | 1064 | { |
1058 | struct rsxx_dma *dma; | 1065 | struct rsxx_dma *dma; |
1059 | int i; | 1066 | int i; |
1060 | 1067 | ||
1061 | for (i = 0; i < card->n_targets; i++) { | 1068 | for (i = 0; i < card->n_targets; i++) { |
1062 | spin_lock(&card->ctrl[i].queue_lock); | 1069 | spin_lock_bh(&card->ctrl[i].queue_lock); |
1063 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | 1070 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { |
1064 | dma->dma_addr = pci_map_page(card->dev, dma->page, | 1071 | dma->dma_addr = pci_map_page(card->dev, dma->page, |
1065 | dma->pg_off, get_dma_size(dma), | 1072 | dma->pg_off, get_dma_size(dma), |
@@ -1067,12 +1074,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | |||
1067 | PCI_DMA_TODEVICE : | 1074 | PCI_DMA_TODEVICE : |
1068 | PCI_DMA_FROMDEVICE); | 1075 | PCI_DMA_FROMDEVICE); |
1069 | if (!dma->dma_addr) { | 1076 | if (!dma->dma_addr) { |
1070 | spin_unlock(&card->ctrl[i].queue_lock); | 1077 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1071 | kmem_cache_free(rsxx_dma_pool, dma); | 1078 | kmem_cache_free(rsxx_dma_pool, dma); |
1072 | return -ENOMEM; | 1079 | return -ENOMEM; |
1073 | } | 1080 | } |
1074 | } | 1081 | } |
1075 | spin_unlock(&card->ctrl[i].queue_lock); | 1082 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1076 | } | 1083 | } |
1077 | 1084 | ||
1078 | return 0; | 1085 | return 0; |