diff options
author | Philip J Kelleher <pjk1939@linux.vnet.ibm.com> | 2013-09-04 14:59:02 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-11-08 11:10:28 -0500 |
commit | e5feab229f199dadee91073fbef5b507046086fd (patch) | |
tree | a5a5f75cd2ad53ecda8a9e2b3e95046e119be148 | |
parent | ef7e7c82e02b602f29c2b87f42dcd6143a6777da (diff) |
rsxx: Handling failed pci_map_page on PowerPC and double free.
The rsxx driver was not checking the correct value during a
pci_map_page failure. Fixing this also uncovered a
double free if the bio was returned before it was
broken up into indiviadual 4k dmas, that is also
fixed here.
Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/block/rsxx/core.c | 3 | ||||
-rw-r--r-- | drivers/block/rsxx/dma.c | 47 | ||||
-rw-r--r-- | drivers/block/rsxx/rsxx_priv.h | 9 |
3 files changed, 37 insertions, 22 deletions
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 6e85e21445eb..e740a650d546 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c | |||
@@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev) | |||
654 | for (i = 0; i < card->n_targets; i++) { | 654 | for (i = 0; i < card->n_targets; i++) { |
655 | spin_lock_bh(&card->ctrl[i].queue_lock); | 655 | spin_lock_bh(&card->ctrl[i].queue_lock); |
656 | cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], | 656 | cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], |
657 | &card->ctrl[i].queue); | 657 | &card->ctrl[i].queue, |
658 | COMPLETE_DMA); | ||
658 | spin_unlock_bh(&card->ctrl[i].queue_lock); | 659 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
659 | 660 | ||
660 | cnt += rsxx_dma_cancel(&card->ctrl[i]); | 661 | cnt += rsxx_dma_cancel(&card->ctrl[i]); |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index bed32f16b084..71d1ca2a1444 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -221,6 +221,19 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
224 | static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) | ||
225 | { | ||
226 | if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | ||
227 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | ||
228 | get_dma_size(dma), | ||
229 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
230 | PCI_DMA_TODEVICE : | ||
231 | PCI_DMA_FROMDEVICE); | ||
232 | } | ||
233 | |||
234 | kmem_cache_free(rsxx_dma_pool, dma); | ||
235 | } | ||
236 | |||
224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | 237 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
225 | struct rsxx_dma *dma, | 238 | struct rsxx_dma *dma, |
226 | unsigned int status) | 239 | unsigned int status) |
@@ -232,21 +245,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | |||
232 | if (status & DMA_CANCELLED) | 245 | if (status & DMA_CANCELLED) |
233 | ctrl->stats.dma_cancelled++; | 246 | ctrl->stats.dma_cancelled++; |
234 | 247 | ||
235 | if (dma->dma_addr) | ||
236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | ||
237 | get_dma_size(dma), | ||
238 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
239 | PCI_DMA_TODEVICE : | ||
240 | PCI_DMA_FROMDEVICE); | ||
241 | |||
242 | if (dma->cb) | 248 | if (dma->cb) |
243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); | 249 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
244 | 250 | ||
245 | kmem_cache_free(rsxx_dma_pool, dma); | 251 | rsxx_free_dma(ctrl, dma); |
246 | } | 252 | } |
247 | 253 | ||
248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | 254 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
249 | struct list_head *q) | 255 | struct list_head *q, unsigned int done) |
250 | { | 256 | { |
251 | struct rsxx_dma *dma; | 257 | struct rsxx_dma *dma; |
252 | struct rsxx_dma *tmp; | 258 | struct rsxx_dma *tmp; |
@@ -254,7 +260,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | |||
254 | 260 | ||
255 | list_for_each_entry_safe(dma, tmp, q, list) { | 261 | list_for_each_entry_safe(dma, tmp, q, list) { |
256 | list_del(&dma->list); | 262 | list_del(&dma->list); |
257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | 263 | if (done & COMPLETE_DMA) |
264 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
265 | else | ||
266 | rsxx_free_dma(ctrl, dma); | ||
258 | cnt++; | 267 | cnt++; |
259 | } | 268 | } |
260 | 269 | ||
@@ -370,7 +379,7 @@ static void dma_engine_stalled(unsigned long data) | |||
370 | 379 | ||
371 | /* Clean up the DMA queue */ | 380 | /* Clean up the DMA queue */ |
372 | spin_lock(&ctrl->queue_lock); | 381 | spin_lock(&ctrl->queue_lock); |
373 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | 382 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
374 | spin_unlock(&ctrl->queue_lock); | 383 | spin_unlock(&ctrl->queue_lock); |
375 | 384 | ||
376 | cnt += rsxx_dma_cancel(ctrl); | 385 | cnt += rsxx_dma_cancel(ctrl); |
@@ -623,7 +632,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |||
623 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, | 632 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, |
624 | dir ? PCI_DMA_TODEVICE : | 633 | dir ? PCI_DMA_TODEVICE : |
625 | PCI_DMA_FROMDEVICE); | 634 | PCI_DMA_FROMDEVICE); |
626 | if (!dma->dma_addr) { | 635 | if (pci_dma_mapping_error(card->dev, dma->dma_addr)) { |
627 | kmem_cache_free(rsxx_dma_pool, dma); | 636 | kmem_cache_free(rsxx_dma_pool, dma); |
628 | return -ENOMEM; | 637 | return -ENOMEM; |
629 | } | 638 | } |
@@ -736,11 +745,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
736 | return 0; | 745 | return 0; |
737 | 746 | ||
738 | bvec_err: | 747 | bvec_err: |
739 | for (i = 0; i < card->n_targets; i++) { | 748 | for (i = 0; i < card->n_targets; i++) |
740 | spin_lock_bh(&card->ctrl[i].queue_lock); | 749 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], |
741 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | 750 | FREE_DMA); |
742 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
743 | } | ||
744 | 751 | ||
745 | return st; | 752 | return st; |
746 | } | 753 | } |
@@ -990,7 +997,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
990 | 997 | ||
991 | /* Clean up the DMA queue */ | 998 | /* Clean up the DMA queue */ |
992 | spin_lock_bh(&ctrl->queue_lock); | 999 | spin_lock_bh(&ctrl->queue_lock); |
993 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | 1000 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
994 | spin_unlock_bh(&ctrl->queue_lock); | 1001 | spin_unlock_bh(&ctrl->queue_lock); |
995 | 1002 | ||
996 | rsxx_dma_cancel(ctrl); | 1003 | rsxx_dma_cancel(ctrl); |
@@ -1045,7 +1052,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1045 | card->ctrl[i].e_cnt = 0; | 1052 | card->ctrl[i].e_cnt = 0; |
1046 | 1053 | ||
1047 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | 1054 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { |
1048 | if (dma->dma_addr) | 1055 | if (!pci_dma_mapping_error(card->dev, dma->dma_addr)) |
1049 | pci_unmap_page(card->dev, dma->dma_addr, | 1056 | pci_unmap_page(card->dev, dma->dma_addr, |
1050 | get_dma_size(dma), | 1057 | get_dma_size(dma), |
1051 | dma->cmd == HW_CMD_BLK_WRITE ? | 1058 | dma->cmd == HW_CMD_BLK_WRITE ? |
@@ -1073,7 +1080,7 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | |||
1073 | dma->cmd == HW_CMD_BLK_WRITE ? | 1080 | dma->cmd == HW_CMD_BLK_WRITE ? |
1074 | PCI_DMA_TODEVICE : | 1081 | PCI_DMA_TODEVICE : |
1075 | PCI_DMA_FROMDEVICE); | 1082 | PCI_DMA_FROMDEVICE); |
1076 | if (!dma->dma_addr) { | 1083 | if (pci_dma_mapping_error(card->dev, dma->dma_addr)) { |
1077 | spin_unlock_bh(&card->ctrl[i].queue_lock); | 1084 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1078 | kmem_cache_free(rsxx_dma_pool, dma); | 1085 | kmem_cache_free(rsxx_dma_pool, dma); |
1079 | return -ENOMEM; | 1086 | return -ENOMEM; |
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 5ad5055a4104..82779058e8ec 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
@@ -345,6 +345,11 @@ enum rsxx_creg_stat { | |||
345 | CREG_STAT_TAG_MASK = 0x0000ff00, | 345 | CREG_STAT_TAG_MASK = 0x0000ff00, |
346 | }; | 346 | }; |
347 | 347 | ||
348 | enum rsxx_dma_finish { | ||
349 | FREE_DMA = 0x0, | ||
350 | COMPLETE_DMA = 0x1, | ||
351 | }; | ||
352 | |||
348 | static inline unsigned int CREG_DATA(int N) | 353 | static inline unsigned int CREG_DATA(int N) |
349 | { | 354 | { |
350 | return CREG_DATA0 + (N << 2); | 355 | return CREG_DATA0 + (N << 2); |
@@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, | |||
379 | int rsxx_dma_setup(struct rsxx_cardinfo *card); | 384 | int rsxx_dma_setup(struct rsxx_cardinfo *card); |
380 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | 385 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); |
381 | int rsxx_dma_init(void); | 386 | int rsxx_dma_init(void); |
382 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q); | 387 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
388 | struct list_head *q, | ||
389 | unsigned int done); | ||
383 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); | 390 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); |
384 | void rsxx_dma_cleanup(void); | 391 | void rsxx_dma_cleanup(void); |
385 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | 392 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); |