diff options
author | Philip J Kelleher <pjk1939@linux.vnet.ibm.com> | 2013-06-18 15:36:26 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-06-19 07:52:09 -0400 |
commit | 0ab4743ebc18c23bddf3e288cfc6221ec71533ac (patch) | |
tree | 0977d5fe4a55f914e3bc9e2f4a7847ff555d5417 /drivers/block/rsxx | |
parent | a3299ab18591d36ad5622f5064619123c439b779 (diff) |
rsxx: Restructured DMA cancel scheme.
Before, DMAs would never be cancelled if there was a data stall
or an EEH Permenant failure which would cause an unrecoverable
I/O hang.
The DMA cancellation mechanism has been modified to fix
these issues and allows DMAs to be cancelled during the
above mentioned events.
Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/rsxx')
-rw-r--r-- | drivers/block/rsxx/core.c | 17 | ||||
-rw-r--r-- | drivers/block/rsxx/dev.c | 6 | ||||
-rw-r--r-- | drivers/block/rsxx/dma.c | 161 | ||||
-rw-r--r-- | drivers/block/rsxx/rsxx_priv.h | 4 |
4 files changed, 95 insertions, 93 deletions
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 774f810c6a9c..aca3f198e5cd 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c | |||
@@ -368,15 +368,26 @@ static void rsxx_eeh_failure(struct pci_dev *dev) | |||
368 | { | 368 | { |
369 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | 369 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); |
370 | int i; | 370 | int i; |
371 | int cnt = 0; | ||
371 | 372 | ||
372 | dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); | 373 | dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); |
373 | 374 | ||
374 | card->eeh_state = 1; | 375 | card->eeh_state = 1; |
376 | card->halt = 1; | ||
377 | |||
378 | for (i = 0; i < card->n_targets; i++) { | ||
379 | spin_lock_bh(&card->ctrl[i].queue_lock); | ||
380 | cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], | ||
381 | &card->ctrl[i].queue); | ||
382 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
375 | 383 | ||
376 | for (i = 0; i < card->n_targets; i++) | 384 | cnt += rsxx_dma_cancel(&card->ctrl[i]); |
377 | del_timer_sync(&card->ctrl[i].activity_timer); | ||
378 | 385 | ||
379 | rsxx_eeh_cancel_dmas(card); | 386 | if (cnt) |
387 | dev_info(CARD_TO_DEV(card), | ||
388 | "Freed %d queued DMAs on channel %d\n", | ||
389 | cnt, card->ctrl[i].id); | ||
390 | } | ||
380 | } | 391 | } |
381 | 392 | ||
382 | static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) | 393 | static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) |
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 4346d17d2949..604ad2dafa43 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c | |||
@@ -155,7 +155,8 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, | |||
155 | atomic_set(&meta->error, 1); | 155 | atomic_set(&meta->error, 1); |
156 | 156 | ||
157 | if (atomic_dec_and_test(&meta->pending_dmas)) { | 157 | if (atomic_dec_and_test(&meta->pending_dmas)) { |
158 | disk_stats_complete(card, meta->bio, meta->start_time); | 158 | if (!card->eeh_state && card->gendisk) |
159 | disk_stats_complete(card, meta->bio, meta->start_time); | ||
159 | 160 | ||
160 | bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); | 161 | bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); |
161 | kmem_cache_free(bio_meta_pool, meta); | 162 | kmem_cache_free(bio_meta_pool, meta); |
@@ -196,7 +197,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
196 | atomic_set(&bio_meta->pending_dmas, 0); | 197 | atomic_set(&bio_meta->pending_dmas, 0); |
197 | bio_meta->start_time = jiffies; | 198 | bio_meta->start_time = jiffies; |
198 | 199 | ||
199 | disk_stats_start(card, bio); | 200 | if (!unlikely(card->halt)) |
201 | disk_stats_start(card, bio); | ||
200 | 202 | ||
201 | dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", | 203 | dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", |
202 | bio_data_dir(bio) ? 'W' : 'R', bio_meta, | 204 | bio_data_dir(bio) ? 'W' : 'R', bio_meta, |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 0607513cfb41..213e40e4bd92 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | |||
245 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
246 | } | 246 | } |
247 | 247 | ||
248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | ||
249 | struct list_head *q) | ||
250 | { | ||
251 | struct rsxx_dma *dma; | ||
252 | struct rsxx_dma *tmp; | ||
253 | int cnt = 0; | ||
254 | |||
255 | list_for_each_entry_safe(dma, tmp, q, list) { | ||
256 | list_del(&dma->list); | ||
257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
258 | cnt++; | ||
259 | } | ||
260 | |||
261 | return cnt; | ||
262 | } | ||
263 | |||
248 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | 264 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
249 | struct rsxx_dma *dma) | 265 | struct rsxx_dma *dma) |
250 | { | 266 | { |
@@ -252,9 +268,9 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | |||
252 | * Requeued DMAs go to the front of the queue so they are issued | 268 | * Requeued DMAs go to the front of the queue so they are issued |
253 | * first. | 269 | * first. |
254 | */ | 270 | */ |
255 | spin_lock(&ctrl->queue_lock); | 271 | spin_lock_bh(&ctrl->queue_lock); |
256 | list_add(&dma->list, &ctrl->queue); | 272 | list_add(&dma->list, &ctrl->queue); |
257 | spin_unlock(&ctrl->queue_lock); | 273 | spin_unlock_bh(&ctrl->queue_lock); |
258 | } | 274 | } |
259 | 275 | ||
260 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | 276 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, |
@@ -329,6 +345,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
329 | static void dma_engine_stalled(unsigned long data) | 345 | static void dma_engine_stalled(unsigned long data) |
330 | { | 346 | { |
331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 347 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
348 | int cnt; | ||
332 | 349 | ||
333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || | 350 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
334 | unlikely(ctrl->card->eeh_state)) | 351 | unlikely(ctrl->card->eeh_state)) |
@@ -349,6 +366,18 @@ static void dma_engine_stalled(unsigned long data) | |||
349 | "DMA channel %d has stalled, faulting interface.\n", | 366 | "DMA channel %d has stalled, faulting interface.\n", |
350 | ctrl->id); | 367 | ctrl->id); |
351 | ctrl->card->dma_fault = 1; | 368 | ctrl->card->dma_fault = 1; |
369 | |||
370 | /* Clean up the DMA queue */ | ||
371 | spin_lock(&ctrl->queue_lock); | ||
372 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | ||
373 | spin_unlock(&ctrl->queue_lock); | ||
374 | |||
375 | cnt += rsxx_dma_cancel(ctrl); | ||
376 | |||
377 | if (cnt) | ||
378 | dev_info(CARD_TO_DEV(ctrl->card), | ||
379 | "Freed %d queued DMAs on channel %d\n", | ||
380 | cnt, ctrl->id); | ||
352 | } | 381 | } |
353 | } | 382 | } |
354 | 383 | ||
@@ -368,22 +397,22 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
368 | return; | 397 | return; |
369 | 398 | ||
370 | while (1) { | 399 | while (1) { |
371 | spin_lock(&ctrl->queue_lock); | 400 | spin_lock_bh(&ctrl->queue_lock); |
372 | if (list_empty(&ctrl->queue)) { | 401 | if (list_empty(&ctrl->queue)) { |
373 | spin_unlock(&ctrl->queue_lock); | 402 | spin_unlock_bh(&ctrl->queue_lock); |
374 | break; | 403 | break; |
375 | } | 404 | } |
376 | spin_unlock(&ctrl->queue_lock); | 405 | spin_unlock_bh(&ctrl->queue_lock); |
377 | 406 | ||
378 | tag = pop_tracker(ctrl->trackers); | 407 | tag = pop_tracker(ctrl->trackers); |
379 | if (tag == -1) | 408 | if (tag == -1) |
380 | break; | 409 | break; |
381 | 410 | ||
382 | spin_lock(&ctrl->queue_lock); | 411 | spin_lock_bh(&ctrl->queue_lock); |
383 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); | 412 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
384 | list_del(&dma->list); | 413 | list_del(&dma->list); |
385 | ctrl->stats.sw_q_depth--; | 414 | ctrl->stats.sw_q_depth--; |
386 | spin_unlock(&ctrl->queue_lock); | 415 | spin_unlock_bh(&ctrl->queue_lock); |
387 | 416 | ||
388 | /* | 417 | /* |
389 | * This will catch any DMAs that slipped in right before the | 418 | * This will catch any DMAs that slipped in right before the |
@@ -520,33 +549,10 @@ static void rsxx_dma_done(struct work_struct *work) | |||
520 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | 549 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); |
521 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | 550 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); |
522 | 551 | ||
523 | spin_lock(&ctrl->queue_lock); | 552 | spin_lock_bh(&ctrl->queue_lock); |
524 | if (ctrl->stats.sw_q_depth) | 553 | if (ctrl->stats.sw_q_depth) |
525 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | 554 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); |
526 | spin_unlock(&ctrl->queue_lock); | 555 | spin_unlock_bh(&ctrl->queue_lock); |
527 | } | ||
528 | |||
529 | static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, | ||
530 | struct list_head *q) | ||
531 | { | ||
532 | struct rsxx_dma *dma; | ||
533 | struct rsxx_dma *tmp; | ||
534 | int cnt = 0; | ||
535 | |||
536 | list_for_each_entry_safe(dma, tmp, q, list) { | ||
537 | list_del(&dma->list); | ||
538 | |||
539 | if (dma->dma_addr) | ||
540 | pci_unmap_page(card->dev, dma->dma_addr, | ||
541 | get_dma_size(dma), | ||
542 | (dma->cmd == HW_CMD_BLK_WRITE) ? | ||
543 | PCI_DMA_TODEVICE : | ||
544 | PCI_DMA_FROMDEVICE); | ||
545 | kmem_cache_free(rsxx_dma_pool, dma); | ||
546 | cnt++; | ||
547 | } | ||
548 | |||
549 | return cnt; | ||
550 | } | 556 | } |
551 | 557 | ||
552 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, | 558 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, |
@@ -698,10 +704,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
698 | 704 | ||
699 | for (i = 0; i < card->n_targets; i++) { | 705 | for (i = 0; i < card->n_targets; i++) { |
700 | if (!list_empty(&dma_list[i])) { | 706 | if (!list_empty(&dma_list[i])) { |
701 | spin_lock(&card->ctrl[i].queue_lock); | 707 | spin_lock_bh(&card->ctrl[i].queue_lock); |
702 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; | 708 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
703 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | 709 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); |
704 | spin_unlock(&card->ctrl[i].queue_lock); | 710 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
705 | 711 | ||
706 | queue_work(card->ctrl[i].issue_wq, | 712 | queue_work(card->ctrl[i].issue_wq, |
707 | &card->ctrl[i].issue_dma_work); | 713 | &card->ctrl[i].issue_dma_work); |
@@ -711,8 +717,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
711 | return 0; | 717 | return 0; |
712 | 718 | ||
713 | bvec_err: | 719 | bvec_err: |
714 | for (i = 0; i < card->n_targets; i++) | 720 | for (i = 0; i < card->n_targets; i++) { |
715 | rsxx_cleanup_dma_queue(card, &dma_list[i]); | 721 | spin_lock_bh(&card->ctrl[i].queue_lock); |
722 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | ||
723 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
724 | } | ||
716 | 725 | ||
717 | return st; | 726 | return st; |
718 | } | 727 | } |
@@ -918,13 +927,30 @@ failed_dma_setup: | |||
918 | return st; | 927 | return st; |
919 | } | 928 | } |
920 | 929 | ||
930 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) | ||
931 | { | ||
932 | struct rsxx_dma *dma; | ||
933 | int i; | ||
934 | int cnt = 0; | ||
935 | |||
936 | /* Clean up issued DMAs */ | ||
937 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | ||
938 | dma = get_tracker_dma(ctrl->trackers, i); | ||
939 | if (dma) { | ||
940 | atomic_dec(&ctrl->stats.hw_q_depth); | ||
941 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
942 | push_tracker(ctrl->trackers, i); | ||
943 | cnt++; | ||
944 | } | ||
945 | } | ||
946 | |||
947 | return cnt; | ||
948 | } | ||
921 | 949 | ||
922 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | 950 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) |
923 | { | 951 | { |
924 | struct rsxx_dma_ctrl *ctrl; | 952 | struct rsxx_dma_ctrl *ctrl; |
925 | struct rsxx_dma *dma; | 953 | int i; |
926 | int i, j; | ||
927 | int cnt = 0; | ||
928 | 954 | ||
929 | for (i = 0; i < card->n_targets; i++) { | 955 | for (i = 0; i < card->n_targets; i++) { |
930 | ctrl = &card->ctrl[i]; | 956 | ctrl = &card->ctrl[i]; |
@@ -943,33 +969,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
943 | del_timer_sync(&ctrl->activity_timer); | 969 | del_timer_sync(&ctrl->activity_timer); |
944 | 970 | ||
945 | /* Clean up the DMA queue */ | 971 | /* Clean up the DMA queue */ |
946 | spin_lock(&ctrl->queue_lock); | 972 | spin_lock_bh(&ctrl->queue_lock); |
947 | cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); | 973 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); |
948 | spin_unlock(&ctrl->queue_lock); | 974 | spin_unlock_bh(&ctrl->queue_lock); |
949 | 975 | ||
950 | if (cnt) | 976 | rsxx_dma_cancel(ctrl); |
951 | dev_info(CARD_TO_DEV(card), | ||
952 | "Freed %d queued DMAs on channel %d\n", | ||
953 | cnt, i); | ||
954 | |||
955 | /* Clean up issued DMAs */ | ||
956 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
957 | dma = get_tracker_dma(ctrl->trackers, j); | ||
958 | if (dma) { | ||
959 | pci_unmap_page(card->dev, dma->dma_addr, | ||
960 | get_dma_size(dma), | ||
961 | (dma->cmd == HW_CMD_BLK_WRITE) ? | ||
962 | PCI_DMA_TODEVICE : | ||
963 | PCI_DMA_FROMDEVICE); | ||
964 | kmem_cache_free(rsxx_dma_pool, dma); | ||
965 | cnt++; | ||
966 | } | ||
967 | } | ||
968 | |||
969 | if (cnt) | ||
970 | dev_info(CARD_TO_DEV(card), | ||
971 | "Freed %d pending DMAs on channel %d\n", | ||
972 | cnt, i); | ||
973 | 977 | ||
974 | vfree(ctrl->trackers); | 978 | vfree(ctrl->trackers); |
975 | 979 | ||
@@ -1013,7 +1017,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1013 | cnt++; | 1017 | cnt++; |
1014 | } | 1018 | } |
1015 | 1019 | ||
1016 | spin_lock(&card->ctrl[i].queue_lock); | 1020 | spin_lock_bh(&card->ctrl[i].queue_lock); |
1017 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | 1021 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
1018 | 1022 | ||
1019 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | 1023 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); |
@@ -1028,7 +1032,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1028 | PCI_DMA_TODEVICE : | 1032 | PCI_DMA_TODEVICE : |
1029 | PCI_DMA_FROMDEVICE); | 1033 | PCI_DMA_FROMDEVICE); |
1030 | } | 1034 | } |
1031 | spin_unlock(&card->ctrl[i].queue_lock); | 1035 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1032 | } | 1036 | } |
1033 | 1037 | ||
1034 | kfree(issued_dmas); | 1038 | kfree(issued_dmas); |
@@ -1036,30 +1040,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1036 | return 0; | 1040 | return 0; |
1037 | } | 1041 | } |
1038 | 1042 | ||
1039 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
1040 | { | ||
1041 | struct rsxx_dma *dma; | ||
1042 | struct rsxx_dma *tmp; | ||
1043 | int i; | ||
1044 | |||
1045 | for (i = 0; i < card->n_targets; i++) { | ||
1046 | spin_lock(&card->ctrl[i].queue_lock); | ||
1047 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
1048 | list_del(&dma->list); | ||
1049 | |||
1050 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
1051 | } | ||
1052 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | 1043 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) |
1057 | { | 1044 | { |
1058 | struct rsxx_dma *dma; | 1045 | struct rsxx_dma *dma; |
1059 | int i; | 1046 | int i; |
1060 | 1047 | ||
1061 | for (i = 0; i < card->n_targets; i++) { | 1048 | for (i = 0; i < card->n_targets; i++) { |
1062 | spin_lock(&card->ctrl[i].queue_lock); | 1049 | spin_lock_bh(&card->ctrl[i].queue_lock); |
1063 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | 1050 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { |
1064 | dma->dma_addr = pci_map_page(card->dev, dma->page, | 1051 | dma->dma_addr = pci_map_page(card->dev, dma->page, |
1065 | dma->pg_off, get_dma_size(dma), | 1052 | dma->pg_off, get_dma_size(dma), |
@@ -1067,12 +1054,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | |||
1067 | PCI_DMA_TODEVICE : | 1054 | PCI_DMA_TODEVICE : |
1068 | PCI_DMA_FROMDEVICE); | 1055 | PCI_DMA_FROMDEVICE); |
1069 | if (!dma->dma_addr) { | 1056 | if (!dma->dma_addr) { |
1070 | spin_unlock(&card->ctrl[i].queue_lock); | 1057 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1071 | kmem_cache_free(rsxx_dma_pool, dma); | 1058 | kmem_cache_free(rsxx_dma_pool, dma); |
1072 | return -ENOMEM; | 1059 | return -ENOMEM; |
1073 | } | 1060 | } |
1074 | } | 1061 | } |
1075 | spin_unlock(&card->ctrl[i].queue_lock); | 1062 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1076 | } | 1063 | } |
1077 | 1064 | ||
1078 | return 0; | 1065 | return 0; |
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 0dd62d966772..60b6ed6779ac 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | #include <linux/timer.h> | 40 | #include <linux/timer.h> |
41 | #include <linux/ioctl.h> | 41 | #include <linux/ioctl.h> |
42 | #include <linux/delay.h> | ||
42 | 43 | ||
43 | #include "rsxx.h" | 44 | #include "rsxx.h" |
44 | #include "rsxx_cfg.h" | 45 | #include "rsxx_cfg.h" |
@@ -374,6 +375,8 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, | |||
374 | int rsxx_dma_setup(struct rsxx_cardinfo *card); | 375 | int rsxx_dma_setup(struct rsxx_cardinfo *card); |
375 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | 376 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); |
376 | int rsxx_dma_init(void); | 377 | int rsxx_dma_init(void); |
378 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q); | ||
379 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); | ||
377 | void rsxx_dma_cleanup(void); | 380 | void rsxx_dma_cleanup(void); |
378 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | 381 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); |
379 | int rsxx_dma_configure(struct rsxx_cardinfo *card); | 382 | int rsxx_dma_configure(struct rsxx_cardinfo *card); |
@@ -384,7 +387,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
384 | void *cb_data); | 387 | void *cb_data); |
385 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); | 388 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); |
386 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); | 389 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); |
387 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); | ||
388 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); | 390 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); |
389 | 391 | ||
390 | /***** cregs.c *****/ | 392 | /***** cregs.c *****/ |