diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/Kconfig | 4 | ||||
-rw-r--r-- | drivers/block/cciss.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.c | 22 | ||||
-rw-r--r-- | drivers/block/mg_disk.c | 4 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 4 | ||||
-rw-r--r-- | drivers/block/nvme.c | 33 | ||||
-rw-r--r-- | drivers/block/rbd.c | 47 | ||||
-rw-r--r-- | drivers/block/rsxx/Makefile | 2 | ||||
-rw-r--r-- | drivers/block/rsxx/config.c | 8 | ||||
-rw-r--r-- | drivers/block/rsxx/core.c | 237 | ||||
-rw-r--r-- | drivers/block/rsxx/cregs.c | 112 | ||||
-rw-r--r-- | drivers/block/rsxx/dma.c | 239 | ||||
-rw-r--r-- | drivers/block/rsxx/rsxx.h | 6 | ||||
-rw-r--r-- | drivers/block/rsxx/rsxx_cfg.h | 2 | ||||
-rw-r--r-- | drivers/block/rsxx/rsxx_priv.h | 34 | ||||
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 68 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 40 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 14 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 154 |
19 files changed, 752 insertions, 280 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 5dc0daed8fac..b81ddfea1da0 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -532,11 +532,11 @@ config BLK_DEV_RBD | |||
532 | If unsure, say N. | 532 | If unsure, say N. |
533 | 533 | ||
534 | config BLK_DEV_RSXX | 534 | config BLK_DEV_RSXX |
535 | tristate "RamSam PCIe Flash SSD Device Driver" | 535 | tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" |
536 | depends on PCI | 536 | depends on PCI |
537 | help | 537 | help |
538 | Device driver for IBM's high speed PCIe SSD | 538 | Device driver for IBM's high speed PCIe SSD |
539 | storage devices: RamSan-70 and RamSan-80. | 539 | storage devices: FlashSystem-70 and FlashSystem-80. |
540 | 540 | ||
541 | To compile this driver as a module, choose M here: the | 541 | To compile this driver as a module, choose M here: the |
542 | module will be called rsxx. | 542 | module will be called rsxx. |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index ade58bc8f3c4..1c1b8e544aa2 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h) | |||
4206 | if (rc) | 4206 | if (rc) |
4207 | return rc; | 4207 | return rc; |
4208 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, | 4208 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
4209 | cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); | 4209 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
4210 | if (!h->cfgtable) | 4210 | if (!h->cfgtable) |
4211 | return -ENOMEM; | 4211 | return -ENOMEM; |
4212 | rc = write_driver_ver_to_cfgtable(h->cfgtable); | 4212 | rc = write_driver_ver_to_cfgtable(h->cfgtable); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 747bb2af69dc..fe5f6403417f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1044,12 +1044,29 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1044 | lo->lo_state = Lo_unbound; | 1044 | lo->lo_state = Lo_unbound; |
1045 | /* This is safe: open() is still holding a reference. */ | 1045 | /* This is safe: open() is still holding a reference. */ |
1046 | module_put(THIS_MODULE); | 1046 | module_put(THIS_MODULE); |
1047 | if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) | ||
1048 | ioctl_by_bdev(bdev, BLKRRPART, 0); | ||
1049 | lo->lo_flags = 0; | 1047 | lo->lo_flags = 0; |
1050 | if (!part_shift) | 1048 | if (!part_shift) |
1051 | lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; | 1049 | lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; |
1052 | mutex_unlock(&lo->lo_ctl_mutex); | 1050 | mutex_unlock(&lo->lo_ctl_mutex); |
1051 | |||
1052 | /* | ||
1053 | * Remove all partitions, since BLKRRPART won't remove user | ||
1054 | * added partitions when max_part=0 | ||
1055 | */ | ||
1056 | if (bdev) { | ||
1057 | struct disk_part_iter piter; | ||
1058 | struct hd_struct *part; | ||
1059 | |||
1060 | mutex_lock_nested(&bdev->bd_mutex, 1); | ||
1061 | invalidate_partition(bdev->bd_disk, 0); | ||
1062 | disk_part_iter_init(&piter, bdev->bd_disk, | ||
1063 | DISK_PITER_INCL_EMPTY); | ||
1064 | while ((part = disk_part_iter_next(&piter))) | ||
1065 | delete_partition(bdev->bd_disk, part->partno); | ||
1066 | disk_part_iter_exit(&piter); | ||
1067 | mutex_unlock(&bdev->bd_mutex); | ||
1068 | } | ||
1069 | |||
1053 | /* | 1070 | /* |
1054 | * Need not hold lo_ctl_mutex to fput backing file. | 1071 | * Need not hold lo_ctl_mutex to fput backing file. |
1055 | * Calling fput holding lo_ctl_mutex triggers a circular | 1072 | * Calling fput holding lo_ctl_mutex triggers a circular |
@@ -1623,6 +1640,7 @@ static int loop_add(struct loop_device **l, int i) | |||
1623 | goto out_free_dev; | 1640 | goto out_free_dev; |
1624 | i = err; | 1641 | i = err; |
1625 | 1642 | ||
1643 | err = -ENOMEM; | ||
1626 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); | 1644 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); |
1627 | if (!lo->lo_queue) | 1645 | if (!lo->lo_queue) |
1628 | goto out_free_dev; | 1646 | goto out_free_dev; |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 1788f491e0fb..076ae7f1b781 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev) | |||
890 | gpio_direction_output(host->rst, 1); | 890 | gpio_direction_output(host->rst, 1); |
891 | 891 | ||
892 | /* reset out pin */ | 892 | /* reset out pin */ |
893 | if (!(prv_data->dev_attr & MG_DEV_MASK)) | 893 | if (!(prv_data->dev_attr & MG_DEV_MASK)) { |
894 | err = -EINVAL; | ||
894 | goto probe_err_3a; | 895 | goto probe_err_3a; |
896 | } | ||
895 | 897 | ||
896 | if (prv_data->dev_attr != MG_BOOT_DEV) { | 898 | if (prv_data->dev_attr != MG_BOOT_DEV) { |
897 | rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, | 899 | rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 11cc9522cdd4..92250af84e7d 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -4224,6 +4224,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4224 | dd->isr_workq = create_workqueue(dd->workq_name); | 4224 | dd->isr_workq = create_workqueue(dd->workq_name); |
4225 | if (!dd->isr_workq) { | 4225 | if (!dd->isr_workq) { |
4226 | dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); | 4226 | dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); |
4227 | rv = -ENOMEM; | ||
4227 | goto block_initialize_err; | 4228 | goto block_initialize_err; |
4228 | } | 4229 | } |
4229 | 4230 | ||
@@ -4282,7 +4283,8 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4282 | INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); | 4283 | INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); |
4283 | 4284 | ||
4284 | pci_set_master(pdev); | 4285 | pci_set_master(pdev); |
4285 | if (pci_enable_msi(pdev)) { | 4286 | rv = pci_enable_msi(pdev); |
4287 | if (rv) { | ||
4286 | dev_warn(&pdev->dev, | 4288 | dev_warn(&pdev->dev, |
4287 | "Unable to enable MSI interrupt.\n"); | 4289 | "Unable to enable MSI interrupt.\n"); |
4288 | goto block_initialize_err; | 4290 | goto block_initialize_err; |
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 07fb2dfaae13..9dcefe40380b 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void) | |||
135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); | 135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); |
136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); | 136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); |
137 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); | 137 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); |
138 | BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); | ||
138 | } | 139 | } |
139 | 140 | ||
140 | typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, | 141 | typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, |
@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, | |||
237 | *fn = special_completion; | 238 | *fn = special_completion; |
238 | return CMD_CTX_INVALID; | 239 | return CMD_CTX_INVALID; |
239 | } | 240 | } |
240 | *fn = info[cmdid].fn; | 241 | if (fn) |
242 | *fn = info[cmdid].fn; | ||
241 | ctx = info[cmdid].ctx; | 243 | ctx = info[cmdid].ctx; |
242 | info[cmdid].fn = special_completion; | 244 | info[cmdid].fn = special_completion; |
243 | info[cmdid].ctx = CMD_CTX_COMPLETED; | 245 | info[cmdid].ctx = CMD_CTX_COMPLETED; |
@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) | |||
335 | iod->offset = offsetof(struct nvme_iod, sg[nseg]); | 337 | iod->offset = offsetof(struct nvme_iod, sg[nseg]); |
336 | iod->npages = -1; | 338 | iod->npages = -1; |
337 | iod->length = nbytes; | 339 | iod->length = nbytes; |
340 | iod->nents = 0; | ||
338 | } | 341 | } |
339 | 342 | ||
340 | return iod; | 343 | return iod; |
@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, | |||
375 | struct bio *bio = iod->private; | 378 | struct bio *bio = iod->private; |
376 | u16 status = le16_to_cpup(&cqe->status) >> 1; | 379 | u16 status = le16_to_cpup(&cqe->status) >> 1; |
377 | 380 | ||
378 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | 381 | if (iod->nents) |
382 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | ||
379 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 383 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
380 | nvme_free_iod(dev, iod); | 384 | nvme_free_iod(dev, iod); |
381 | if (status) { | 385 | if (status) { |
@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
589 | 593 | ||
590 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); | 594 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); |
591 | if (result < 0) | 595 | if (result < 0) |
592 | goto free_iod; | 596 | goto free_cmdid; |
593 | length = result; | 597 | length = result; |
594 | 598 | ||
595 | cmnd->rw.command_id = cmdid; | 599 | cmnd->rw.command_id = cmdid; |
@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
609 | 613 | ||
610 | return 0; | 614 | return 0; |
611 | 615 | ||
616 | free_cmdid: | ||
617 | free_cmdid(nvmeq, cmdid, NULL); | ||
612 | free_iod: | 618 | free_iod: |
613 | nvme_free_iod(nvmeq->dev, iod); | 619 | nvme_free_iod(nvmeq->dev, iod); |
614 | nomem: | 620 | nomem: |
@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, | |||
835 | return nvme_submit_admin_cmd(dev, &c, NULL); | 841 | return nvme_submit_admin_cmd(dev, &c, NULL); |
836 | } | 842 | } |
837 | 843 | ||
838 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, | 844 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, |
839 | unsigned nsid, dma_addr_t dma_addr) | 845 | dma_addr_t dma_addr, u32 *result) |
840 | { | 846 | { |
841 | struct nvme_command c; | 847 | struct nvme_command c; |
842 | 848 | ||
@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, | |||
846 | c.features.prp1 = cpu_to_le64(dma_addr); | 852 | c.features.prp1 = cpu_to_le64(dma_addr); |
847 | c.features.fid = cpu_to_le32(fid); | 853 | c.features.fid = cpu_to_le32(fid); |
848 | 854 | ||
849 | return nvme_submit_admin_cmd(dev, &c, NULL); | 855 | return nvme_submit_admin_cmd(dev, &c, result); |
850 | } | 856 | } |
851 | 857 | ||
852 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, | 858 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, |
@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) | |||
906 | 912 | ||
907 | spin_lock_irq(&nvmeq->q_lock); | 913 | spin_lock_irq(&nvmeq->q_lock); |
908 | nvme_cancel_ios(nvmeq, false); | 914 | nvme_cancel_ios(nvmeq, false); |
915 | while (bio_list_peek(&nvmeq->sq_cong)) { | ||
916 | struct bio *bio = bio_list_pop(&nvmeq->sq_cong); | ||
917 | bio_endio(bio, -EIO); | ||
918 | } | ||
909 | spin_unlock_irq(&nvmeq->q_lock); | 919 | spin_unlock_irq(&nvmeq->q_lock); |
910 | 920 | ||
911 | irq_set_affinity_hint(vector, NULL); | 921 | irq_set_affinity_hint(vector, NULL); |
@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, | |||
1230 | if (length != cmd.data_len) | 1240 | if (length != cmd.data_len) |
1231 | status = -ENOMEM; | 1241 | status = -ENOMEM; |
1232 | else | 1242 | else |
1233 | status = nvme_submit_admin_cmd(dev, &c, NULL); | 1243 | status = nvme_submit_admin_cmd(dev, &c, &cmd.result); |
1234 | 1244 | ||
1235 | if (cmd.data_len) { | 1245 | if (cmd.data_len) { |
1236 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); | 1246 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); |
1237 | nvme_free_iod(dev, iod); | 1247 | nvme_free_iod(dev, iod); |
1238 | } | 1248 | } |
1249 | |||
1250 | if (!status && copy_to_user(&ucmd->result, &cmd.result, | ||
1251 | sizeof(cmd.result))) | ||
1252 | status = -EFAULT; | ||
1253 | |||
1239 | return status; | 1254 | return status; |
1240 | } | 1255 | } |
1241 | 1256 | ||
@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
1523 | continue; | 1538 | continue; |
1524 | 1539 | ||
1525 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, | 1540 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, |
1526 | dma_addr + 4096); | 1541 | dma_addr + 4096, NULL); |
1527 | if (res) | 1542 | if (res) |
1528 | continue; | 1543 | memset(mem + 4096, 0, 4096); |
1529 | 1544 | ||
1530 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); | 1545 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); |
1531 | if (ns) | 1546 | if (ns) |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 11e179826b60..6b2b039c191f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request) | |||
1264 | return atomic_read(&obj_request->done) != 0; | 1264 | return atomic_read(&obj_request->done) != 0; |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | static void | ||
1268 | rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) | ||
1269 | { | ||
1270 | dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, | ||
1271 | obj_request, obj_request->img_request, obj_request->result, | ||
1272 | obj_request->xferred, obj_request->length); | ||
1273 | /* | ||
1274 | * ENOENT means a hole in the image. We zero-fill the | ||
1275 | * entire length of the request. A short read also implies | ||
1276 | * zero-fill to the end of the request. Either way we | ||
1277 | * update the xferred count to indicate the whole request | ||
1278 | * was satisfied. | ||
1279 | */ | ||
1280 | BUG_ON(obj_request->type != OBJ_REQUEST_BIO); | ||
1281 | if (obj_request->result == -ENOENT) { | ||
1282 | zero_bio_chain(obj_request->bio_list, 0); | ||
1283 | obj_request->result = 0; | ||
1284 | obj_request->xferred = obj_request->length; | ||
1285 | } else if (obj_request->xferred < obj_request->length && | ||
1286 | !obj_request->result) { | ||
1287 | zero_bio_chain(obj_request->bio_list, obj_request->xferred); | ||
1288 | obj_request->xferred = obj_request->length; | ||
1289 | } | ||
1290 | obj_request_done_set(obj_request); | ||
1291 | } | ||
1292 | |||
1267 | static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) | 1293 | static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) |
1268 | { | 1294 | { |
1269 | dout("%s: obj %p cb %p\n", __func__, obj_request, | 1295 | dout("%s: obj %p cb %p\n", __func__, obj_request, |
@@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) | |||
1284 | { | 1310 | { |
1285 | dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, | 1311 | dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, |
1286 | obj_request->result, obj_request->xferred, obj_request->length); | 1312 | obj_request->result, obj_request->xferred, obj_request->length); |
1287 | /* | 1313 | if (obj_request->img_request) |
1288 | * ENOENT means a hole in the object. We zero-fill the | 1314 | rbd_img_obj_request_read_callback(obj_request); |
1289 | * entire length of the request. A short read also implies | 1315 | else |
1290 | * zero-fill to the end of the request. Either way we | 1316 | obj_request_done_set(obj_request); |
1291 | * update the xferred count to indicate the whole request | ||
1292 | * was satisfied. | ||
1293 | */ | ||
1294 | if (obj_request->result == -ENOENT) { | ||
1295 | zero_bio_chain(obj_request->bio_list, 0); | ||
1296 | obj_request->result = 0; | ||
1297 | obj_request->xferred = obj_request->length; | ||
1298 | } else if (obj_request->xferred < obj_request->length && | ||
1299 | !obj_request->result) { | ||
1300 | zero_bio_chain(obj_request->bio_list, obj_request->xferred); | ||
1301 | obj_request->xferred = obj_request->length; | ||
1302 | } | ||
1303 | obj_request_done_set(obj_request); | ||
1304 | } | 1317 | } |
1305 | 1318 | ||
1306 | static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) | 1319 | static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) |
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile index f35cd0b71f7b..b1c53c0aa450 100644 --- a/drivers/block/rsxx/Makefile +++ b/drivers/block/rsxx/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o | 1 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o |
2 | rsxx-y := config.o core.o cregs.o dev.o dma.o | 2 | rsxx-objs := config.o core.o cregs.o dev.o dma.o |
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c index a295e7e9ee41..10cd530d3e10 100644 --- a/drivers/block/rsxx/config.c +++ b/drivers/block/rsxx/config.c | |||
@@ -29,15 +29,13 @@ | |||
29 | #include "rsxx_priv.h" | 29 | #include "rsxx_priv.h" |
30 | #include "rsxx_cfg.h" | 30 | #include "rsxx_cfg.h" |
31 | 31 | ||
32 | static void initialize_config(void *config) | 32 | static void initialize_config(struct rsxx_card_cfg *cfg) |
33 | { | 33 | { |
34 | struct rsxx_card_cfg *cfg = config; | ||
35 | |||
36 | cfg->hdr.version = RSXX_CFG_VERSION; | 34 | cfg->hdr.version = RSXX_CFG_VERSION; |
37 | 35 | ||
38 | cfg->data.block_size = RSXX_HW_BLK_SIZE; | 36 | cfg->data.block_size = RSXX_HW_BLK_SIZE; |
39 | cfg->data.stripe_size = RSXX_HW_BLK_SIZE; | 37 | cfg->data.stripe_size = RSXX_HW_BLK_SIZE; |
40 | cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; | 38 | cfg->data.vendor_id = RSXX_VENDOR_ID_IBM; |
41 | cfg->data.cache_order = (-1); | 39 | cfg->data.cache_order = (-1); |
42 | cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; | 40 | cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; |
43 | cfg->data.intr_coal.count = 0; | 41 | cfg->data.intr_coal.count = 0; |
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card) | |||
181 | } else { | 179 | } else { |
182 | dev_info(CARD_TO_DEV(card), | 180 | dev_info(CARD_TO_DEV(card), |
183 | "Initializing card configuration.\n"); | 181 | "Initializing card configuration.\n"); |
184 | initialize_config(card); | 182 | initialize_config(&card->config); |
185 | st = rsxx_save_config(card); | 183 | st = rsxx_save_config(card); |
186 | if (st) | 184 | if (st) |
187 | return st; | 185 | return st; |
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index e5162487686a..5af21f2db29c 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/reboot.h> | 30 | #include <linux/reboot.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/bitops.h> | 32 | #include <linux/bitops.h> |
33 | #include <linux/delay.h> | ||
33 | 34 | ||
34 | #include <linux/genhd.h> | 35 | #include <linux/genhd.h> |
35 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
@@ -39,8 +40,8 @@ | |||
39 | 40 | ||
40 | #define NO_LEGACY 0 | 41 | #define NO_LEGACY 0 |
41 | 42 | ||
42 | MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); | 43 | MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); |
43 | MODULE_AUTHOR("IBM <support@ramsan.com>"); | 44 | MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); |
44 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
45 | MODULE_VERSION(DRIVER_VERSION); | 46 | MODULE_VERSION(DRIVER_VERSION); |
46 | 47 | ||
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida); | |||
52 | static DEFINE_SPINLOCK(rsxx_ida_lock); | 53 | static DEFINE_SPINLOCK(rsxx_ida_lock); |
53 | 54 | ||
54 | /*----------------- Interrupt Control & Handling -------------------*/ | 55 | /*----------------- Interrupt Control & Handling -------------------*/ |
56 | |||
57 | static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) | ||
58 | { | ||
59 | card->isr_mask = 0; | ||
60 | card->ier_mask = 0; | ||
61 | } | ||
62 | |||
55 | static void __enable_intr(unsigned int *mask, unsigned int intr) | 63 | static void __enable_intr(unsigned int *mask, unsigned int intr) |
56 | { | 64 | { |
57 | *mask |= intr; | 65 | *mask |= intr; |
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr) | |||
71 | */ | 79 | */ |
72 | void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) | 80 | void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) |
73 | { | 81 | { |
74 | if (unlikely(card->halt)) | 82 | if (unlikely(card->halt) || |
83 | unlikely(card->eeh_state)) | ||
75 | return; | 84 | return; |
76 | 85 | ||
77 | __enable_intr(&card->ier_mask, intr); | 86 | __enable_intr(&card->ier_mask, intr); |
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) | |||
80 | 89 | ||
81 | void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) | 90 | void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) |
82 | { | 91 | { |
92 | if (unlikely(card->eeh_state)) | ||
93 | return; | ||
94 | |||
83 | __disable_intr(&card->ier_mask, intr); | 95 | __disable_intr(&card->ier_mask, intr); |
84 | iowrite32(card->ier_mask, card->regmap + IER); | 96 | iowrite32(card->ier_mask, card->regmap + IER); |
85 | } | 97 | } |
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) | |||
87 | void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, | 99 | void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, |
88 | unsigned int intr) | 100 | unsigned int intr) |
89 | { | 101 | { |
90 | if (unlikely(card->halt)) | 102 | if (unlikely(card->halt) || |
103 | unlikely(card->eeh_state)) | ||
91 | return; | 104 | return; |
92 | 105 | ||
93 | __enable_intr(&card->isr_mask, intr); | 106 | __enable_intr(&card->isr_mask, intr); |
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, | |||
97 | void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, | 110 | void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, |
98 | unsigned int intr) | 111 | unsigned int intr) |
99 | { | 112 | { |
113 | if (unlikely(card->eeh_state)) | ||
114 | return; | ||
115 | |||
100 | __disable_intr(&card->isr_mask, intr); | 116 | __disable_intr(&card->isr_mask, intr); |
101 | __disable_intr(&card->ier_mask, intr); | 117 | __disable_intr(&card->ier_mask, intr); |
102 | iowrite32(card->ier_mask, card->regmap + IER); | 118 | iowrite32(card->ier_mask, card->regmap + IER); |
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) | |||
115 | do { | 131 | do { |
116 | reread_isr = 0; | 132 | reread_isr = 0; |
117 | 133 | ||
134 | if (unlikely(card->eeh_state)) | ||
135 | break; | ||
136 | |||
118 | isr = ioread32(card->regmap + ISR); | 137 | isr = ioread32(card->regmap + ISR); |
119 | if (isr == 0xffffffff) { | 138 | if (isr == 0xffffffff) { |
120 | /* | 139 | /* |
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) | |||
161 | } | 180 | } |
162 | 181 | ||
163 | /*----------------- Card Event Handler -------------------*/ | 182 | /*----------------- Card Event Handler -------------------*/ |
164 | static char *rsxx_card_state_to_str(unsigned int state) | 183 | static const char * const rsxx_card_state_to_str(unsigned int state) |
165 | { | 184 | { |
166 | static char *state_strings[] = { | 185 | static const char * const state_strings[] = { |
167 | "Unknown", "Shutdown", "Starting", "Formatting", | 186 | "Unknown", "Shutdown", "Starting", "Formatting", |
168 | "Uninitialized", "Good", "Shutting Down", | 187 | "Uninitialized", "Good", "Shutting Down", |
169 | "Fault", "Read Only Fault", "dStroying" | 188 | "Fault", "Read Only Fault", "dStroying" |
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card) | |||
304 | return 0; | 323 | return 0; |
305 | } | 324 | } |
306 | 325 | ||
326 | static int rsxx_eeh_frozen(struct pci_dev *dev) | ||
327 | { | ||
328 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
329 | int i; | ||
330 | int st; | ||
331 | |||
332 | dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); | ||
333 | |||
334 | card->eeh_state = 1; | ||
335 | rsxx_mask_interrupts(card); | ||
336 | |||
337 | /* | ||
338 | * We need to guarantee that the write for eeh_state and masking | ||
339 | * interrupts does not become reordered. This will prevent a possible | ||
340 | * race condition with the EEH code. | ||
341 | */ | ||
342 | wmb(); | ||
343 | |||
344 | pci_disable_device(dev); | ||
345 | |||
346 | st = rsxx_eeh_save_issued_dmas(card); | ||
347 | if (st) | ||
348 | return st; | ||
349 | |||
350 | rsxx_eeh_save_issued_creg(card); | ||
351 | |||
352 | for (i = 0; i < card->n_targets; i++) { | ||
353 | if (card->ctrl[i].status.buf) | ||
354 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | ||
355 | card->ctrl[i].status.buf, | ||
356 | card->ctrl[i].status.dma_addr); | ||
357 | if (card->ctrl[i].cmd.buf) | ||
358 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | ||
359 | card->ctrl[i].cmd.buf, | ||
360 | card->ctrl[i].cmd.dma_addr); | ||
361 | } | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static void rsxx_eeh_failure(struct pci_dev *dev) | ||
367 | { | ||
368 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
369 | int i; | ||
370 | |||
371 | dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); | ||
372 | |||
373 | card->eeh_state = 1; | ||
374 | |||
375 | for (i = 0; i < card->n_targets; i++) | ||
376 | del_timer_sync(&card->ctrl[i].activity_timer); | ||
377 | |||
378 | rsxx_eeh_cancel_dmas(card); | ||
379 | } | ||
380 | |||
381 | static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) | ||
382 | { | ||
383 | unsigned int status; | ||
384 | int iter = 0; | ||
385 | |||
386 | /* We need to wait for the hardware to reset */ | ||
387 | while (iter++ < 10) { | ||
388 | status = ioread32(card->regmap + PCI_RECONFIG); | ||
389 | |||
390 | if (status & RSXX_FLUSH_BUSY) { | ||
391 | ssleep(1); | ||
392 | continue; | ||
393 | } | ||
394 | |||
395 | if (status & RSXX_FLUSH_TIMEOUT) | ||
396 | dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n"); | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | /* Hardware failed resetting itself. */ | ||
401 | return -1; | ||
402 | } | ||
403 | |||
404 | static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, | ||
405 | enum pci_channel_state error) | ||
406 | { | ||
407 | int st; | ||
408 | |||
409 | if (dev->revision < RSXX_EEH_SUPPORT) | ||
410 | return PCI_ERS_RESULT_NONE; | ||
411 | |||
412 | if (error == pci_channel_io_perm_failure) { | ||
413 | rsxx_eeh_failure(dev); | ||
414 | return PCI_ERS_RESULT_DISCONNECT; | ||
415 | } | ||
416 | |||
417 | st = rsxx_eeh_frozen(dev); | ||
418 | if (st) { | ||
419 | dev_err(&dev->dev, "Slot reset setup failed\n"); | ||
420 | rsxx_eeh_failure(dev); | ||
421 | return PCI_ERS_RESULT_DISCONNECT; | ||
422 | } | ||
423 | |||
424 | return PCI_ERS_RESULT_NEED_RESET; | ||
425 | } | ||
426 | |||
427 | static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) | ||
428 | { | ||
429 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
430 | unsigned long flags; | ||
431 | int i; | ||
432 | int st; | ||
433 | |||
434 | dev_warn(&dev->dev, | ||
435 | "IBM FlashSystem PCI: recovering from slot reset.\n"); | ||
436 | |||
437 | st = pci_enable_device(dev); | ||
438 | if (st) | ||
439 | goto failed_hw_setup; | ||
440 | |||
441 | pci_set_master(dev); | ||
442 | |||
443 | st = rsxx_eeh_fifo_flush_poll(card); | ||
444 | if (st) | ||
445 | goto failed_hw_setup; | ||
446 | |||
447 | rsxx_dma_queue_reset(card); | ||
448 | |||
449 | for (i = 0; i < card->n_targets; i++) { | ||
450 | st = rsxx_hw_buffers_init(dev, &card->ctrl[i]); | ||
451 | if (st) | ||
452 | goto failed_hw_buffers_init; | ||
453 | } | ||
454 | |||
455 | if (card->config_valid) | ||
456 | rsxx_dma_configure(card); | ||
457 | |||
458 | /* Clears the ISR register from spurious interrupts */ | ||
459 | st = ioread32(card->regmap + ISR); | ||
460 | |||
461 | card->eeh_state = 0; | ||
462 | |||
463 | st = rsxx_eeh_remap_dmas(card); | ||
464 | if (st) | ||
465 | goto failed_remap_dmas; | ||
466 | |||
467 | spin_lock_irqsave(&card->irq_lock, flags); | ||
468 | if (card->n_targets & RSXX_MAX_TARGETS) | ||
469 | rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); | ||
470 | else | ||
471 | rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C); | ||
472 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
473 | |||
474 | rsxx_kick_creg_queue(card); | ||
475 | |||
476 | for (i = 0; i < card->n_targets; i++) { | ||
477 | spin_lock(&card->ctrl[i].queue_lock); | ||
478 | if (list_empty(&card->ctrl[i].queue)) { | ||
479 | spin_unlock(&card->ctrl[i].queue_lock); | ||
480 | continue; | ||
481 | } | ||
482 | spin_unlock(&card->ctrl[i].queue_lock); | ||
483 | |||
484 | queue_work(card->ctrl[i].issue_wq, | ||
485 | &card->ctrl[i].issue_dma_work); | ||
486 | } | ||
487 | |||
488 | dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); | ||
489 | |||
490 | return PCI_ERS_RESULT_RECOVERED; | ||
491 | |||
492 | failed_hw_buffers_init: | ||
493 | failed_remap_dmas: | ||
494 | for (i = 0; i < card->n_targets; i++) { | ||
495 | if (card->ctrl[i].status.buf) | ||
496 | pci_free_consistent(card->dev, | ||
497 | STATUS_BUFFER_SIZE8, | ||
498 | card->ctrl[i].status.buf, | ||
499 | card->ctrl[i].status.dma_addr); | ||
500 | if (card->ctrl[i].cmd.buf) | ||
501 | pci_free_consistent(card->dev, | ||
502 | COMMAND_BUFFER_SIZE8, | ||
503 | card->ctrl[i].cmd.buf, | ||
504 | card->ctrl[i].cmd.dma_addr); | ||
505 | } | ||
506 | failed_hw_setup: | ||
507 | rsxx_eeh_failure(dev); | ||
508 | return PCI_ERS_RESULT_DISCONNECT; | ||
509 | |||
510 | } | ||
511 | |||
307 | /*----------------- Driver Initialization & Setup -------------------*/ | 512 | /*----------------- Driver Initialization & Setup -------------------*/ |
308 | /* Returns: 0 if the driver is compatible with the device | 513 | /* Returns: 0 if the driver is compatible with the device |
309 | -1 if the driver is NOT compatible with the device */ | 514 | -1 if the driver is NOT compatible with the device */ |
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
383 | 588 | ||
384 | spin_lock_init(&card->irq_lock); | 589 | spin_lock_init(&card->irq_lock); |
385 | card->halt = 0; | 590 | card->halt = 0; |
591 | card->eeh_state = 0; | ||
386 | 592 | ||
387 | spin_lock_irq(&card->irq_lock); | 593 | spin_lock_irq(&card->irq_lock); |
388 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | 594 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); |
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev) | |||
538 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); | 744 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); |
539 | spin_unlock_irqrestore(&card->irq_lock, flags); | 745 | spin_unlock_irqrestore(&card->irq_lock, flags); |
540 | 746 | ||
541 | /* Prevent work_structs from re-queuing themselves. */ | ||
542 | card->halt = 1; | ||
543 | |||
544 | cancel_work_sync(&card->event_work); | 747 | cancel_work_sync(&card->event_work); |
545 | 748 | ||
546 | rsxx_destroy_dev(card); | 749 | rsxx_destroy_dev(card); |
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev) | |||
549 | spin_lock_irqsave(&card->irq_lock, flags); | 752 | spin_lock_irqsave(&card->irq_lock, flags); |
550 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | 753 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); |
551 | spin_unlock_irqrestore(&card->irq_lock, flags); | 754 | spin_unlock_irqrestore(&card->irq_lock, flags); |
755 | |||
756 | /* Prevent work_structs from re-queuing themselves. */ | ||
757 | card->halt = 1; | ||
758 | |||
552 | free_irq(dev->irq, card); | 759 | free_irq(dev->irq, card); |
553 | 760 | ||
554 | if (!force_legacy) | 761 | if (!force_legacy) |
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev) | |||
592 | card_shutdown(card); | 799 | card_shutdown(card); |
593 | } | 800 | } |
594 | 801 | ||
802 | static const struct pci_error_handlers rsxx_err_handler = { | ||
803 | .error_detected = rsxx_error_detected, | ||
804 | .slot_reset = rsxx_slot_reset, | ||
805 | }; | ||
806 | |||
595 | static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { | 807 | static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { |
596 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, | 808 | {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)}, |
597 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, | 809 | {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)}, |
598 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)}, | ||
599 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)}, | ||
600 | {0,}, | 810 | {0,}, |
601 | }; | 811 | }; |
602 | 812 | ||
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = { | |||
609 | .remove = rsxx_pci_remove, | 819 | .remove = rsxx_pci_remove, |
610 | .suspend = rsxx_pci_suspend, | 820 | .suspend = rsxx_pci_suspend, |
611 | .shutdown = rsxx_pci_shutdown, | 821 | .shutdown = rsxx_pci_shutdown, |
822 | .err_handler = &rsxx_err_handler, | ||
612 | }; | 823 | }; |
613 | 824 | ||
614 | static int __init rsxx_core_init(void) | 825 | static int __init rsxx_core_init(void) |
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c index 80bbe639fccd..4b5c020a0a65 100644 --- a/drivers/block/rsxx/cregs.c +++ b/drivers/block/rsxx/cregs.c | |||
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool; | |||
58 | #error Unknown endianess!!! Aborting... | 58 | #error Unknown endianess!!! Aborting... |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | static void copy_to_creg_data(struct rsxx_cardinfo *card, | 61 | static int copy_to_creg_data(struct rsxx_cardinfo *card, |
62 | int cnt8, | 62 | int cnt8, |
63 | void *buf, | 63 | void *buf, |
64 | unsigned int stream) | 64 | unsigned int stream) |
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card, | |||
66 | int i = 0; | 66 | int i = 0; |
67 | u32 *data = buf; | 67 | u32 *data = buf; |
68 | 68 | ||
69 | if (unlikely(card->eeh_state)) | ||
70 | return -EIO; | ||
71 | |||
69 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { | 72 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { |
70 | /* | 73 | /* |
71 | * Firmware implementation makes it necessary to byte swap on | 74 | * Firmware implementation makes it necessary to byte swap on |
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card, | |||
76 | else | 79 | else |
77 | iowrite32(data[i], card->regmap + CREG_DATA(i)); | 80 | iowrite32(data[i], card->regmap + CREG_DATA(i)); |
78 | } | 81 | } |
82 | |||
83 | return 0; | ||
79 | } | 84 | } |
80 | 85 | ||
81 | 86 | ||
82 | static void copy_from_creg_data(struct rsxx_cardinfo *card, | 87 | static int copy_from_creg_data(struct rsxx_cardinfo *card, |
83 | int cnt8, | 88 | int cnt8, |
84 | void *buf, | 89 | void *buf, |
85 | unsigned int stream) | 90 | unsigned int stream) |
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card, | |||
87 | int i = 0; | 92 | int i = 0; |
88 | u32 *data = buf; | 93 | u32 *data = buf; |
89 | 94 | ||
95 | if (unlikely(card->eeh_state)) | ||
96 | return -EIO; | ||
97 | |||
90 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { | 98 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { |
91 | /* | 99 | /* |
92 | * Firmware implementation makes it necessary to byte swap on | 100 | * Firmware implementation makes it necessary to byte swap on |
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card, | |||
97 | else | 105 | else |
98 | data[i] = ioread32(card->regmap + CREG_DATA(i)); | 106 | data[i] = ioread32(card->regmap + CREG_DATA(i)); |
99 | } | 107 | } |
100 | } | ||
101 | |||
102 | static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card) | ||
103 | { | ||
104 | struct creg_cmd *cmd; | ||
105 | 108 | ||
106 | /* | 109 | return 0; |
107 | * Spin lock is needed because this can be called in atomic/interrupt | ||
108 | * context. | ||
109 | */ | ||
110 | spin_lock_bh(&card->creg_ctrl.lock); | ||
111 | cmd = card->creg_ctrl.active_cmd; | ||
112 | card->creg_ctrl.active_cmd = NULL; | ||
113 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
114 | |||
115 | return cmd; | ||
116 | } | 110 | } |
117 | 111 | ||
118 | static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) | 112 | static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) |
119 | { | 113 | { |
114 | int st; | ||
115 | |||
116 | if (unlikely(card->eeh_state)) | ||
117 | return; | ||
118 | |||
120 | iowrite32(cmd->addr, card->regmap + CREG_ADD); | 119 | iowrite32(cmd->addr, card->regmap + CREG_ADD); |
121 | iowrite32(cmd->cnt8, card->regmap + CREG_CNT); | 120 | iowrite32(cmd->cnt8, card->regmap + CREG_CNT); |
122 | 121 | ||
123 | if (cmd->op == CREG_OP_WRITE) { | 122 | if (cmd->op == CREG_OP_WRITE) { |
124 | if (cmd->buf) | 123 | if (cmd->buf) { |
125 | copy_to_creg_data(card, cmd->cnt8, | 124 | st = copy_to_creg_data(card, cmd->cnt8, |
126 | cmd->buf, cmd->stream); | 125 | cmd->buf, cmd->stream); |
126 | if (st) | ||
127 | return; | ||
128 | } | ||
127 | } | 129 | } |
128 | 130 | ||
129 | /* | 131 | if (unlikely(card->eeh_state)) |
130 | * Data copy must complete before initiating the command. This is | 132 | return; |
131 | * needed for weakly ordered processors (i.e. PowerPC), so that all | ||
132 | * neccessary registers are written before we kick the hardware. | ||
133 | */ | ||
134 | wmb(); | ||
135 | 133 | ||
136 | /* Setting the valid bit will kick off the command. */ | 134 | /* Setting the valid bit will kick off the command. */ |
137 | iowrite32(cmd->op, card->regmap + CREG_CMD); | 135 | iowrite32(cmd->op, card->regmap + CREG_CMD); |
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card, | |||
196 | cmd->cb_private = cb_private; | 194 | cmd->cb_private = cb_private; |
197 | cmd->status = 0; | 195 | cmd->status = 0; |
198 | 196 | ||
199 | spin_lock(&card->creg_ctrl.lock); | 197 | spin_lock_bh(&card->creg_ctrl.lock); |
200 | list_add_tail(&cmd->list, &card->creg_ctrl.queue); | 198 | list_add_tail(&cmd->list, &card->creg_ctrl.queue); |
201 | card->creg_ctrl.q_depth++; | 199 | card->creg_ctrl.q_depth++; |
202 | creg_kick_queue(card); | 200 | creg_kick_queue(card); |
203 | spin_unlock(&card->creg_ctrl.lock); | 201 | spin_unlock_bh(&card->creg_ctrl.lock); |
204 | 202 | ||
205 | return 0; | 203 | return 0; |
206 | } | 204 | } |
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data) | |||
210 | struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; | 208 | struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; |
211 | struct creg_cmd *cmd; | 209 | struct creg_cmd *cmd; |
212 | 210 | ||
213 | cmd = pop_active_cmd(card); | 211 | spin_lock(&card->creg_ctrl.lock); |
212 | cmd = card->creg_ctrl.active_cmd; | ||
213 | card->creg_ctrl.active_cmd = NULL; | ||
214 | spin_unlock(&card->creg_ctrl.lock); | ||
215 | |||
214 | if (cmd == NULL) { | 216 | if (cmd == NULL) { |
215 | card->creg_ctrl.creg_stats.creg_timeout++; | 217 | card->creg_ctrl.creg_stats.creg_timeout++; |
216 | dev_warn(CARD_TO_DEV(card), | 218 | dev_warn(CARD_TO_DEV(card), |
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work) | |||
247 | if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) | 249 | if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) |
248 | card->creg_ctrl.creg_stats.failed_cancel_timer++; | 250 | card->creg_ctrl.creg_stats.failed_cancel_timer++; |
249 | 251 | ||
250 | cmd = pop_active_cmd(card); | 252 | spin_lock_bh(&card->creg_ctrl.lock); |
253 | cmd = card->creg_ctrl.active_cmd; | ||
254 | card->creg_ctrl.active_cmd = NULL; | ||
255 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
256 | |||
251 | if (cmd == NULL) { | 257 | if (cmd == NULL) { |
252 | dev_err(CARD_TO_DEV(card), | 258 | dev_err(CARD_TO_DEV(card), |
253 | "Spurious creg interrupt!\n"); | 259 | "Spurious creg interrupt!\n"); |
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work) | |||
287 | goto creg_done; | 293 | goto creg_done; |
288 | } | 294 | } |
289 | 295 | ||
290 | copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); | 296 | st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); |
291 | } | 297 | } |
292 | 298 | ||
293 | creg_done: | 299 | creg_done: |
@@ -296,10 +302,10 @@ creg_done: | |||
296 | 302 | ||
297 | kmem_cache_free(creg_cmd_pool, cmd); | 303 | kmem_cache_free(creg_cmd_pool, cmd); |
298 | 304 | ||
299 | spin_lock(&card->creg_ctrl.lock); | 305 | spin_lock_bh(&card->creg_ctrl.lock); |
300 | card->creg_ctrl.active = 0; | 306 | card->creg_ctrl.active = 0; |
301 | creg_kick_queue(card); | 307 | creg_kick_queue(card); |
302 | spin_unlock(&card->creg_ctrl.lock); | 308 | spin_unlock_bh(&card->creg_ctrl.lock); |
303 | } | 309 | } |
304 | 310 | ||
305 | static void creg_reset(struct rsxx_cardinfo *card) | 311 | static void creg_reset(struct rsxx_cardinfo *card) |
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card) | |||
324 | "Resetting creg interface for recovery\n"); | 330 | "Resetting creg interface for recovery\n"); |
325 | 331 | ||
326 | /* Cancel outstanding commands */ | 332 | /* Cancel outstanding commands */ |
327 | spin_lock(&card->creg_ctrl.lock); | 333 | spin_lock_bh(&card->creg_ctrl.lock); |
328 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { | 334 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { |
329 | list_del(&cmd->list); | 335 | list_del(&cmd->list); |
330 | card->creg_ctrl.q_depth--; | 336 | card->creg_ctrl.q_depth--; |
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card) | |||
345 | 351 | ||
346 | card->creg_ctrl.active = 0; | 352 | card->creg_ctrl.active = 0; |
347 | } | 353 | } |
348 | spin_unlock(&card->creg_ctrl.lock); | 354 | spin_unlock_bh(&card->creg_ctrl.lock); |
349 | 355 | ||
350 | card->creg_ctrl.reset = 0; | 356 | card->creg_ctrl.reset = 0; |
351 | spin_lock_irqsave(&card->irq_lock, flags); | 357 | spin_lock_irqsave(&card->irq_lock, flags); |
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card, | |||
399 | return st; | 405 | return st; |
400 | 406 | ||
401 | /* | 407 | /* |
402 | * This timeout is neccessary for unresponsive hardware. The additional | 408 | * This timeout is necessary for unresponsive hardware. The additional |
403 | * 20 seconds to used to guarantee that each cregs requests has time to | 409 | * 20 seconds to used to guarantee that each cregs requests has time to |
404 | * complete. | 410 | * complete. |
405 | */ | 411 | */ |
406 | timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * | 412 | timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC * |
407 | card->creg_ctrl.q_depth) + 20000); | 413 | card->creg_ctrl.q_depth + 20000); |
408 | 414 | ||
409 | /* | 415 | /* |
410 | * The creg interface is guaranteed to complete. It has a timeout | 416 | * The creg interface is guaranteed to complete. It has a timeout |
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card, | |||
690 | return 0; | 696 | return 0; |
691 | } | 697 | } |
692 | 698 | ||
699 | void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card) | ||
700 | { | ||
701 | struct creg_cmd *cmd = NULL; | ||
702 | |||
703 | cmd = card->creg_ctrl.active_cmd; | ||
704 | card->creg_ctrl.active_cmd = NULL; | ||
705 | |||
706 | if (cmd) { | ||
707 | del_timer_sync(&card->creg_ctrl.cmd_timer); | ||
708 | |||
709 | spin_lock_bh(&card->creg_ctrl.lock); | ||
710 | list_add(&cmd->list, &card->creg_ctrl.queue); | ||
711 | card->creg_ctrl.q_depth++; | ||
712 | card->creg_ctrl.active = 0; | ||
713 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
714 | } | ||
715 | } | ||
716 | |||
717 | void rsxx_kick_creg_queue(struct rsxx_cardinfo *card) | ||
718 | { | ||
719 | spin_lock_bh(&card->creg_ctrl.lock); | ||
720 | if (!list_empty(&card->creg_ctrl.queue)) | ||
721 | creg_kick_queue(card); | ||
722 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
723 | } | ||
724 | |||
693 | /*------------ Initialization & Setup --------------*/ | 725 | /*------------ Initialization & Setup --------------*/ |
694 | int rsxx_creg_setup(struct rsxx_cardinfo *card) | 726 | int rsxx_creg_setup(struct rsxx_cardinfo *card) |
695 | { | 727 | { |
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card) | |||
712 | int cnt = 0; | 744 | int cnt = 0; |
713 | 745 | ||
714 | /* Cancel outstanding commands */ | 746 | /* Cancel outstanding commands */ |
715 | spin_lock(&card->creg_ctrl.lock); | 747 | spin_lock_bh(&card->creg_ctrl.lock); |
716 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { | 748 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { |
717 | list_del(&cmd->list); | 749 | list_del(&cmd->list); |
718 | if (cmd->cb) | 750 | if (cmd->cb) |
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card) | |||
737 | "Canceled active creg command\n"); | 769 | "Canceled active creg command\n"); |
738 | kmem_cache_free(creg_cmd_pool, cmd); | 770 | kmem_cache_free(creg_cmd_pool, cmd); |
739 | } | 771 | } |
740 | spin_unlock(&card->creg_ctrl.lock); | 772 | spin_unlock_bh(&card->creg_ctrl.lock); |
741 | 773 | ||
742 | cancel_work_sync(&card->creg_ctrl.done_work); | 774 | cancel_work_sync(&card->creg_ctrl.done_work); |
743 | } | 775 | } |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 63176e67662f..0607513cfb41 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -28,7 +28,7 @@ | |||
28 | struct rsxx_dma { | 28 | struct rsxx_dma { |
29 | struct list_head list; | 29 | struct list_head list; |
30 | u8 cmd; | 30 | u8 cmd; |
31 | unsigned int laddr; /* Logical address on the ramsan */ | 31 | unsigned int laddr; /* Logical address */ |
32 | struct { | 32 | struct { |
33 | u32 off; | 33 | u32 off; |
34 | u32 cnt; | 34 | u32 cnt; |
@@ -81,9 +81,6 @@ enum rsxx_hw_status { | |||
81 | HW_STATUS_FAULT = 0x08, | 81 | HW_STATUS_FAULT = 0x08, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | #define STATUS_BUFFER_SIZE8 4096 | ||
85 | #define COMMAND_BUFFER_SIZE8 4096 | ||
86 | |||
87 | static struct kmem_cache *rsxx_dma_pool; | 84 | static struct kmem_cache *rsxx_dma_pool; |
88 | 85 | ||
89 | struct dma_tracker { | 86 | struct dma_tracker { |
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) | |||
122 | return tgt; | 119 | return tgt; |
123 | } | 120 | } |
124 | 121 | ||
125 | static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
126 | { | 123 | { |
127 | /* Reset all DMA Command/Status Queues */ | 124 | /* Reset all DMA Command/Status Queues */ |
128 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | 125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); |
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
210 | u32 q_depth = 0; | 207 | u32 q_depth = 0; |
211 | u32 intr_coal; | 208 | u32 intr_coal; |
212 | 209 | ||
213 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) | 210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | ||
214 | return; | 212 | return; |
215 | 213 | ||
216 | for (i = 0; i < card->n_targets; i++) | 214 | for (i = 0; i < card->n_targets; i++) |
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
223 | } | 221 | } |
224 | 222 | ||
225 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
226 | static void rsxx_complete_dma(struct rsxx_cardinfo *card, | 224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
227 | struct rsxx_dma *dma, | 225 | struct rsxx_dma *dma, |
228 | unsigned int status) | 226 | unsigned int status) |
229 | { | 227 | { |
230 | if (status & DMA_SW_ERR) | 228 | if (status & DMA_SW_ERR) |
231 | printk_ratelimited(KERN_ERR | 229 | ctrl->stats.dma_sw_err++; |
232 | "SW Error in DMA(cmd x%02x, laddr x%08x)\n", | ||
233 | dma->cmd, dma->laddr); | ||
234 | if (status & DMA_HW_FAULT) | 230 | if (status & DMA_HW_FAULT) |
235 | printk_ratelimited(KERN_ERR | 231 | ctrl->stats.dma_hw_fault++; |
236 | "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", | ||
237 | dma->cmd, dma->laddr); | ||
238 | if (status & DMA_CANCELLED) | 232 | if (status & DMA_CANCELLED) |
239 | printk_ratelimited(KERN_ERR | 233 | ctrl->stats.dma_cancelled++; |
240 | "DMA Cancelled(cmd x%02x, laddr x%08x)\n", | ||
241 | dma->cmd, dma->laddr); | ||
242 | 234 | ||
243 | if (dma->dma_addr) | 235 | if (dma->dma_addr) |
244 | pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), | 236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, |
237 | get_dma_size(dma), | ||
245 | dma->cmd == HW_CMD_BLK_WRITE ? | 238 | dma->cmd == HW_CMD_BLK_WRITE ? |
246 | PCI_DMA_TODEVICE : | 239 | PCI_DMA_TODEVICE : |
247 | PCI_DMA_FROMDEVICE); | 240 | PCI_DMA_FROMDEVICE); |
248 | 241 | ||
249 | if (dma->cb) | 242 | if (dma->cb) |
250 | dma->cb(card, dma->cb_data, status ? 1 : 0); | 243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
251 | 244 | ||
252 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
253 | } | 246 | } |
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
330 | if (requeue_cmd) | 323 | if (requeue_cmd) |
331 | rsxx_requeue_dma(ctrl, dma); | 324 | rsxx_requeue_dma(ctrl, dma); |
332 | else | 325 | else |
333 | rsxx_complete_dma(ctrl->card, dma, status); | 326 | rsxx_complete_dma(ctrl, dma, status); |
334 | } | 327 | } |
335 | 328 | ||
336 | static void dma_engine_stalled(unsigned long data) | 329 | static void dma_engine_stalled(unsigned long data) |
337 | { | 330 | { |
338 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
339 | 332 | ||
340 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | 333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
334 | unlikely(ctrl->card->eeh_state)) | ||
341 | return; | 335 | return; |
342 | 336 | ||
343 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | 337 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { |
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
369 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | 363 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); |
370 | hw_cmd_buf = ctrl->cmd.buf; | 364 | hw_cmd_buf = ctrl->cmd.buf; |
371 | 365 | ||
372 | if (unlikely(ctrl->card->halt)) | 366 | if (unlikely(ctrl->card->halt) || |
367 | unlikely(ctrl->card->eeh_state)) | ||
373 | return; | 368 | return; |
374 | 369 | ||
375 | while (1) { | 370 | while (1) { |
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
397 | */ | 392 | */ |
398 | if (unlikely(ctrl->card->dma_fault)) { | 393 | if (unlikely(ctrl->card->dma_fault)) { |
399 | push_tracker(ctrl->trackers, tag); | 394 | push_tracker(ctrl->trackers, tag); |
400 | rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); | 395 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
401 | continue; | 396 | continue; |
402 | } | 397 | } |
403 | 398 | ||
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
432 | 427 | ||
433 | /* Let HW know we've queued commands. */ | 428 | /* Let HW know we've queued commands. */ |
434 | if (cmds_pending) { | 429 | if (cmds_pending) { |
435 | /* | ||
436 | * We must guarantee that the CPU writes to 'ctrl->cmd.buf' | ||
437 | * (which is in PCI-consistent system-memory) from the loop | ||
438 | * above make it into the coherency domain before the | ||
439 | * following PIO "trigger" updating the cmd.idx. A WMB is | ||
440 | * sufficient. We need not explicitly CPU cache-flush since | ||
441 | * the memory is a PCI-consistent (ie; coherent) mapping. | ||
442 | */ | ||
443 | wmb(); | ||
444 | |||
445 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); | 430 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
446 | mod_timer(&ctrl->activity_timer, | 431 | mod_timer(&ctrl->activity_timer, |
447 | jiffies + DMA_ACTIVITY_TIMEOUT); | 432 | jiffies + DMA_ACTIVITY_TIMEOUT); |
433 | |||
434 | if (unlikely(ctrl->card->eeh_state)) { | ||
435 | del_timer_sync(&ctrl->activity_timer); | ||
436 | return; | ||
437 | } | ||
438 | |||
448 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | 439 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
449 | } | 440 | } |
450 | } | 441 | } |
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work) | |||
463 | hw_st_buf = ctrl->status.buf; | 454 | hw_st_buf = ctrl->status.buf; |
464 | 455 | ||
465 | if (unlikely(ctrl->card->halt) || | 456 | if (unlikely(ctrl->card->halt) || |
466 | unlikely(ctrl->card->dma_fault)) | 457 | unlikely(ctrl->card->dma_fault) || |
458 | unlikely(ctrl->card->eeh_state)) | ||
467 | return; | 459 | return; |
468 | 460 | ||
469 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | 461 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); |
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work) | |||
508 | if (status) | 500 | if (status) |
509 | rsxx_handle_dma_error(ctrl, dma, status); | 501 | rsxx_handle_dma_error(ctrl, dma, status); |
510 | else | 502 | else |
511 | rsxx_complete_dma(ctrl->card, dma, 0); | 503 | rsxx_complete_dma(ctrl, dma, 0); |
512 | 504 | ||
513 | push_tracker(ctrl->trackers, tag); | 505 | push_tracker(ctrl->trackers, tag); |
514 | 506 | ||
@@ -727,20 +719,54 @@ bvec_err: | |||
727 | 719 | ||
728 | 720 | ||
729 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | 721 | /*----------------- DMA Engine Initialization & Setup -------------------*/ |
722 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) | ||
723 | { | ||
724 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
725 | &ctrl->status.dma_addr); | ||
726 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
727 | &ctrl->cmd.dma_addr); | ||
728 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
729 | return -ENOMEM; | ||
730 | |||
731 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | ||
732 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | ||
733 | ctrl->regmap + SB_ADD_LO); | ||
734 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
735 | ctrl->regmap + SB_ADD_HI); | ||
736 | |||
737 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
738 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
739 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
740 | |||
741 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
742 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
743 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
744 | ctrl->status.idx); | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
748 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
749 | |||
750 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
751 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
752 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
753 | ctrl->status.idx); | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
757 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
730 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, | 762 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
731 | struct rsxx_dma_ctrl *ctrl) | 763 | struct rsxx_dma_ctrl *ctrl) |
732 | { | 764 | { |
733 | int i; | 765 | int i; |
766 | int st; | ||
734 | 767 | ||
735 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | 768 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); |
736 | 769 | ||
737 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
738 | &ctrl->status.dma_addr); | ||
739 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
740 | &ctrl->cmd.dma_addr); | ||
741 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
742 | return -ENOMEM; | ||
743 | |||
744 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); | 770 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
745 | if (!ctrl->trackers) | 771 | if (!ctrl->trackers) |
746 | return -ENOMEM; | 772 | return -ENOMEM; |
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
770 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | 796 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); |
771 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | 797 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); |
772 | 798 | ||
773 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | 799 | st = rsxx_hw_buffers_init(dev, ctrl); |
774 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | 800 | if (st) |
775 | ctrl->regmap + SB_ADD_LO); | 801 | return st; |
776 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
777 | ctrl->regmap + SB_ADD_HI); | ||
778 | |||
779 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
780 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
781 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
782 | |||
783 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
784 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
785 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
786 | ctrl->status.idx); | ||
787 | return -EINVAL; | ||
788 | } | ||
789 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
790 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
791 | |||
792 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
793 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
794 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
795 | ctrl->status.idx); | ||
796 | return -EINVAL; | ||
797 | } | ||
798 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
799 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
800 | |||
801 | wmb(); | ||
802 | 802 | ||
803 | return 0; | 803 | return 0; |
804 | } | 804 | } |
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, | |||
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | 836 | ||
837 | static int rsxx_dma_configure(struct rsxx_cardinfo *card) | 837 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
838 | { | 838 | { |
839 | u32 intr_coal; | 839 | u32 intr_coal; |
840 | 840 | ||
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
980 | } | 980 | } |
981 | } | 981 | } |
982 | 982 | ||
983 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | ||
984 | { | ||
985 | int i; | ||
986 | int j; | ||
987 | int cnt; | ||
988 | struct rsxx_dma *dma; | ||
989 | struct list_head *issued_dmas; | ||
990 | |||
991 | issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, | ||
992 | GFP_KERNEL); | ||
993 | if (!issued_dmas) | ||
994 | return -ENOMEM; | ||
995 | |||
996 | for (i = 0; i < card->n_targets; i++) { | ||
997 | INIT_LIST_HEAD(&issued_dmas[i]); | ||
998 | cnt = 0; | ||
999 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
1000 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | ||
1001 | if (dma == NULL) | ||
1002 | continue; | ||
1003 | |||
1004 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
1005 | card->ctrl[i].stats.writes_issued--; | ||
1006 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | ||
1007 | card->ctrl[i].stats.discards_issued--; | ||
1008 | else | ||
1009 | card->ctrl[i].stats.reads_issued--; | ||
1010 | |||
1011 | list_add_tail(&dma->list, &issued_dmas[i]); | ||
1012 | push_tracker(card->ctrl[i].trackers, j); | ||
1013 | cnt++; | ||
1014 | } | ||
1015 | |||
1016 | spin_lock(&card->ctrl[i].queue_lock); | ||
1017 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | ||
1018 | |||
1019 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | ||
1020 | card->ctrl[i].stats.sw_q_depth += cnt; | ||
1021 | card->ctrl[i].e_cnt = 0; | ||
1022 | |||
1023 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1024 | if (dma->dma_addr) | ||
1025 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1026 | get_dma_size(dma), | ||
1027 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1028 | PCI_DMA_TODEVICE : | ||
1029 | PCI_DMA_FROMDEVICE); | ||
1030 | } | ||
1031 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1032 | } | ||
1033 | |||
1034 | kfree(issued_dmas); | ||
1035 | |||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
1040 | { | ||
1041 | struct rsxx_dma *dma; | ||
1042 | struct rsxx_dma *tmp; | ||
1043 | int i; | ||
1044 | |||
1045 | for (i = 0; i < card->n_targets; i++) { | ||
1046 | spin_lock(&card->ctrl[i].queue_lock); | ||
1047 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
1048 | list_del(&dma->list); | ||
1049 | |||
1050 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
1051 | } | ||
1052 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | ||
1057 | { | ||
1058 | struct rsxx_dma *dma; | ||
1059 | int i; | ||
1060 | |||
1061 | for (i = 0; i < card->n_targets; i++) { | ||
1062 | spin_lock(&card->ctrl[i].queue_lock); | ||
1063 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1064 | dma->dma_addr = pci_map_page(card->dev, dma->page, | ||
1065 | dma->pg_off, get_dma_size(dma), | ||
1066 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1067 | PCI_DMA_TODEVICE : | ||
1068 | PCI_DMA_FROMDEVICE); | ||
1069 | if (!dma->dma_addr) { | ||
1070 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1071 | kmem_cache_free(rsxx_dma_pool, dma); | ||
1072 | return -ENOMEM; | ||
1073 | } | ||
1074 | } | ||
1075 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1076 | } | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
983 | 1080 | ||
984 | int rsxx_dma_init(void) | 1081 | int rsxx_dma_init(void) |
985 | { | 1082 | { |
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h index 2e50b65902b7..24ba3642bd89 100644 --- a/drivers/block/rsxx/rsxx.h +++ b/drivers/block/rsxx/rsxx.h | |||
@@ -27,15 +27,17 @@ | |||
27 | 27 | ||
28 | /*----------------- IOCTL Definitions -------------------*/ | 28 | /*----------------- IOCTL Definitions -------------------*/ |
29 | 29 | ||
30 | #define RSXX_MAX_DATA 8 | ||
31 | |||
30 | struct rsxx_reg_access { | 32 | struct rsxx_reg_access { |
31 | __u32 addr; | 33 | __u32 addr; |
32 | __u32 cnt; | 34 | __u32 cnt; |
33 | __u32 stat; | 35 | __u32 stat; |
34 | __u32 stream; | 36 | __u32 stream; |
35 | __u32 data[8]; | 37 | __u32 data[RSXX_MAX_DATA]; |
36 | }; | 38 | }; |
37 | 39 | ||
38 | #define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) | 40 | #define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32))) |
39 | 41 | ||
40 | #define RSXX_IOC_MAGIC 'r' | 42 | #define RSXX_IOC_MAGIC 'r' |
41 | 43 | ||
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h index c025fe5fdb70..f384c943846d 100644 --- a/drivers/block/rsxx/rsxx_cfg.h +++ b/drivers/block/rsxx/rsxx_cfg.h | |||
@@ -58,7 +58,7 @@ struct rsxx_card_cfg { | |||
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* Vendor ID Values */ | 60 | /* Vendor ID Values */ |
61 | #define RSXX_VENDOR_ID_TMS_IBM 0 | 61 | #define RSXX_VENDOR_ID_IBM 0 |
62 | #define RSXX_VENDOR_ID_DSI 1 | 62 | #define RSXX_VENDOR_ID_DSI 1 |
63 | #define RSXX_VENDOR_COUNT 2 | 63 | #define RSXX_VENDOR_COUNT 2 |
64 | 64 | ||
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index a1ac907d8f4c..382e8bf5c03b 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
@@ -45,16 +45,13 @@ | |||
45 | 45 | ||
46 | struct proc_cmd; | 46 | struct proc_cmd; |
47 | 47 | ||
48 | #define PCI_VENDOR_ID_TMS_IBM 0x15B6 | 48 | #define PCI_DEVICE_ID_FS70_FLASH 0x04A9 |
49 | #define PCI_DEVICE_ID_RS70_FLASH 0x0019 | 49 | #define PCI_DEVICE_ID_FS80_FLASH 0x04AA |
50 | #define PCI_DEVICE_ID_RS70D_FLASH 0x001A | ||
51 | #define PCI_DEVICE_ID_RS80_FLASH 0x001C | ||
52 | #define PCI_DEVICE_ID_RS81_FLASH 0x001E | ||
53 | 50 | ||
54 | #define RS70_PCI_REV_SUPPORTED 4 | 51 | #define RS70_PCI_REV_SUPPORTED 4 |
55 | 52 | ||
56 | #define DRIVER_NAME "rsxx" | 53 | #define DRIVER_NAME "rsxx" |
57 | #define DRIVER_VERSION "3.7" | 54 | #define DRIVER_VERSION "4.0" |
58 | 55 | ||
59 | /* Block size is 4096 */ | 56 | /* Block size is 4096 */ |
60 | #define RSXX_HW_BLK_SHIFT 12 | 57 | #define RSXX_HW_BLK_SHIFT 12 |
@@ -67,6 +64,9 @@ struct proc_cmd; | |||
67 | #define RSXX_MAX_OUTSTANDING_CMDS 255 | 64 | #define RSXX_MAX_OUTSTANDING_CMDS 255 |
68 | #define RSXX_CS_IDX_MASK 0xff | 65 | #define RSXX_CS_IDX_MASK 0xff |
69 | 66 | ||
67 | #define STATUS_BUFFER_SIZE8 4096 | ||
68 | #define COMMAND_BUFFER_SIZE8 4096 | ||
69 | |||
70 | #define RSXX_MAX_TARGETS 8 | 70 | #define RSXX_MAX_TARGETS 8 |
71 | 71 | ||
72 | struct dma_tracker_list; | 72 | struct dma_tracker_list; |
@@ -91,6 +91,9 @@ struct rsxx_dma_stats { | |||
91 | u32 discards_failed; | 91 | u32 discards_failed; |
92 | u32 done_rescheduled; | 92 | u32 done_rescheduled; |
93 | u32 issue_rescheduled; | 93 | u32 issue_rescheduled; |
94 | u32 dma_sw_err; | ||
95 | u32 dma_hw_fault; | ||
96 | u32 dma_cancelled; | ||
94 | u32 sw_q_depth; /* Number of DMAs on the SW queue. */ | 97 | u32 sw_q_depth; /* Number of DMAs on the SW queue. */ |
95 | atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ | 98 | atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ |
96 | }; | 99 | }; |
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl { | |||
116 | struct rsxx_cardinfo { | 119 | struct rsxx_cardinfo { |
117 | struct pci_dev *dev; | 120 | struct pci_dev *dev; |
118 | unsigned int halt; | 121 | unsigned int halt; |
122 | unsigned int eeh_state; | ||
119 | 123 | ||
120 | void __iomem *regmap; | 124 | void __iomem *regmap; |
121 | spinlock_t irq_lock; | 125 | spinlock_t irq_lock; |
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap { | |||
224 | PERF_RD512_HI = 0xac, | 228 | PERF_RD512_HI = 0xac, |
225 | PERF_WR512_LO = 0xb0, | 229 | PERF_WR512_LO = 0xb0, |
226 | PERF_WR512_HI = 0xb4, | 230 | PERF_WR512_HI = 0xb4, |
231 | PCI_RECONFIG = 0xb8, | ||
227 | }; | 232 | }; |
228 | 233 | ||
229 | enum rsxx_intr { | 234 | enum rsxx_intr { |
@@ -237,6 +242,8 @@ enum rsxx_intr { | |||
237 | CR_INTR_DMA5 = 0x00000080, | 242 | CR_INTR_DMA5 = 0x00000080, |
238 | CR_INTR_DMA6 = 0x00000100, | 243 | CR_INTR_DMA6 = 0x00000100, |
239 | CR_INTR_DMA7 = 0x00000200, | 244 | CR_INTR_DMA7 = 0x00000200, |
245 | CR_INTR_ALL_C = 0x0000003f, | ||
246 | CR_INTR_ALL_G = 0x000003ff, | ||
240 | CR_INTR_DMA_ALL = 0x000003f5, | 247 | CR_INTR_DMA_ALL = 0x000003f5, |
241 | CR_INTR_ALL = 0xffffffff, | 248 | CR_INTR_ALL = 0xffffffff, |
242 | }; | 249 | }; |
@@ -253,8 +260,14 @@ enum rsxx_pci_reset { | |||
253 | DMA_QUEUE_RESET = 0x00000001, | 260 | DMA_QUEUE_RESET = 0x00000001, |
254 | }; | 261 | }; |
255 | 262 | ||
263 | enum rsxx_hw_fifo_flush { | ||
264 | RSXX_FLUSH_BUSY = 0x00000002, | ||
265 | RSXX_FLUSH_TIMEOUT = 0x00000004, | ||
266 | }; | ||
267 | |||
256 | enum rsxx_pci_revision { | 268 | enum rsxx_pci_revision { |
257 | RSXX_DISCARD_SUPPORT = 2, | 269 | RSXX_DISCARD_SUPPORT = 2, |
270 | RSXX_EEH_SUPPORT = 3, | ||
258 | }; | 271 | }; |
259 | 272 | ||
260 | enum rsxx_creg_cmd { | 273 | enum rsxx_creg_cmd { |
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card); | |||
360 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | 373 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); |
361 | int rsxx_dma_init(void); | 374 | int rsxx_dma_init(void); |
362 | void rsxx_dma_cleanup(void); | 375 | void rsxx_dma_cleanup(void); |
376 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | ||
377 | int rsxx_dma_configure(struct rsxx_cardinfo *card); | ||
363 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | 378 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, |
364 | struct bio *bio, | 379 | struct bio *bio, |
365 | atomic_t *n_dmas, | 380 | atomic_t *n_dmas, |
366 | rsxx_dma_cb cb, | 381 | rsxx_dma_cb cb, |
367 | void *cb_data); | 382 | void *cb_data); |
383 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); | ||
384 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); | ||
385 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); | ||
386 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); | ||
368 | 387 | ||
369 | /***** cregs.c *****/ | 388 | /***** cregs.c *****/ |
370 | int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, | 389 | int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, |
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card); | |||
389 | void rsxx_creg_destroy(struct rsxx_cardinfo *card); | 408 | void rsxx_creg_destroy(struct rsxx_cardinfo *card); |
390 | int rsxx_creg_init(void); | 409 | int rsxx_creg_init(void); |
391 | void rsxx_creg_cleanup(void); | 410 | void rsxx_creg_cleanup(void); |
392 | |||
393 | int rsxx_reg_access(struct rsxx_cardinfo *card, | 411 | int rsxx_reg_access(struct rsxx_cardinfo *card, |
394 | struct rsxx_reg_access __user *ucmd, | 412 | struct rsxx_reg_access __user *ucmd, |
395 | int read); | 413 | int read); |
414 | void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card); | ||
415 | void rsxx_kick_creg_queue(struct rsxx_cardinfo *card); | ||
396 | 416 | ||
397 | 417 | ||
398 | 418 | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index de1f319f7bd7..dd5b2fed97e9 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id, | |||
164 | 164 | ||
165 | #define foreach_grant_safe(pos, n, rbtree, node) \ | 165 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
166 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ | 166 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ |
167 | (n) = rb_next(&(pos)->node); \ | 167 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ |
168 | &(pos)->node != NULL; \ | 168 | &(pos)->node != NULL; \ |
169 | (pos) = container_of(n, typeof(*(pos)), node), \ | 169 | (pos) = container_of(n, typeof(*(pos)), node), \ |
170 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | 170 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) |
@@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) | |||
381 | 381 | ||
382 | static void print_stats(struct xen_blkif *blkif) | 382 | static void print_stats(struct xen_blkif *blkif) |
383 | { | 383 | { |
384 | pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" | 384 | pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
385 | " | ds %4d\n", | 385 | " | ds %4llu\n", |
386 | current->comm, blkif->st_oo_req, | 386 | current->comm, blkif->st_oo_req, |
387 | blkif->st_rd_req, blkif->st_wr_req, | 387 | blkif->st_rd_req, blkif->st_wr_req, |
388 | blkif->st_f_req, blkif->st_ds_req); | 388 | blkif->st_f_req, blkif->st_ds_req); |
@@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg) | |||
442 | } | 442 | } |
443 | 443 | ||
444 | struct seg_buf { | 444 | struct seg_buf { |
445 | unsigned long buf; | 445 | unsigned int offset; |
446 | unsigned int nsec; | 446 | unsigned int nsec; |
447 | }; | 447 | }; |
448 | /* | 448 | /* |
@@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
621 | * If this is a new persistent grant | 621 | * If this is a new persistent grant |
622 | * save the handler | 622 | * save the handler |
623 | */ | 623 | */ |
624 | persistent_gnts[i]->handle = map[j].handle; | 624 | persistent_gnts[i]->handle = map[j++].handle; |
625 | persistent_gnts[i]->dev_bus_addr = | ||
626 | map[j++].dev_bus_addr; | ||
627 | } | 625 | } |
628 | pending_handle(pending_req, i) = | 626 | pending_handle(pending_req, i) = |
629 | persistent_gnts[i]->handle; | 627 | persistent_gnts[i]->handle; |
630 | 628 | ||
631 | if (ret) | 629 | if (ret) |
632 | continue; | 630 | continue; |
633 | |||
634 | seg[i].buf = persistent_gnts[i]->dev_bus_addr | | ||
635 | (req->u.rw.seg[i].first_sect << 9); | ||
636 | } else { | 631 | } else { |
637 | pending_handle(pending_req, i) = map[j].handle; | 632 | pending_handle(pending_req, i) = map[j++].handle; |
638 | bitmap_set(pending_req->unmap_seg, i, 1); | 633 | bitmap_set(pending_req->unmap_seg, i, 1); |
639 | 634 | ||
640 | if (ret) { | 635 | if (ret) |
641 | j++; | ||
642 | continue; | 636 | continue; |
643 | } | ||
644 | |||
645 | seg[i].buf = map[j++].dev_bus_addr | | ||
646 | (req->u.rw.seg[i].first_sect << 9); | ||
647 | } | 637 | } |
638 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | ||
648 | } | 639 | } |
649 | return ret; | 640 | return ret; |
650 | } | 641 | } |
@@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
679 | return err; | 670 | return err; |
680 | } | 671 | } |
681 | 672 | ||
673 | static int dispatch_other_io(struct xen_blkif *blkif, | ||
674 | struct blkif_request *req, | ||
675 | struct pending_req *pending_req) | ||
676 | { | ||
677 | free_req(pending_req); | ||
678 | make_response(blkif, req->u.other.id, req->operation, | ||
679 | BLKIF_RSP_EOPNOTSUPP); | ||
680 | return -EIO; | ||
681 | } | ||
682 | |||
682 | static void xen_blk_drain_io(struct xen_blkif *blkif) | 683 | static void xen_blk_drain_io(struct xen_blkif *blkif) |
683 | { | 684 | { |
684 | atomic_set(&blkif->drain, 1); | 685 | atomic_set(&blkif->drain, 1); |
@@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif) | |||
800 | 801 | ||
801 | /* Apply all sanity checks to /private copy/ of request. */ | 802 | /* Apply all sanity checks to /private copy/ of request. */ |
802 | barrier(); | 803 | barrier(); |
803 | if (unlikely(req.operation == BLKIF_OP_DISCARD)) { | 804 | |
805 | switch (req.operation) { | ||
806 | case BLKIF_OP_READ: | ||
807 | case BLKIF_OP_WRITE: | ||
808 | case BLKIF_OP_WRITE_BARRIER: | ||
809 | case BLKIF_OP_FLUSH_DISKCACHE: | ||
810 | if (dispatch_rw_block_io(blkif, &req, pending_req)) | ||
811 | goto done; | ||
812 | break; | ||
813 | case BLKIF_OP_DISCARD: | ||
804 | free_req(pending_req); | 814 | free_req(pending_req); |
805 | if (dispatch_discard_io(blkif, &req)) | 815 | if (dispatch_discard_io(blkif, &req)) |
806 | break; | 816 | goto done; |
807 | } else if (dispatch_rw_block_io(blkif, &req, pending_req)) | ||
808 | break; | 817 | break; |
818 | default: | ||
819 | if (dispatch_other_io(blkif, &req, pending_req)) | ||
820 | goto done; | ||
821 | break; | ||
822 | } | ||
809 | 823 | ||
810 | /* Yield point for this unbounded loop. */ | 824 | /* Yield point for this unbounded loop. */ |
811 | cond_resched(); | 825 | cond_resched(); |
812 | } | 826 | } |
813 | 827 | done: | |
814 | return more_to_do; | 828 | return more_to_do; |
815 | } | 829 | } |
816 | 830 | ||
@@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
904 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", | 918 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", |
905 | operation == READ ? "read" : "write", | 919 | operation == READ ? "read" : "write", |
906 | preq.sector_number, | 920 | preq.sector_number, |
907 | preq.sector_number + preq.nr_sects, preq.dev); | 921 | preq.sector_number + preq.nr_sects, |
922 | blkif->vbd.pdevice); | ||
908 | goto fail_response; | 923 | goto fail_response; |
909 | } | 924 | } |
910 | 925 | ||
@@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
947 | (bio_add_page(bio, | 962 | (bio_add_page(bio, |
948 | pages[i], | 963 | pages[i], |
949 | seg[i].nsec << 9, | 964 | seg[i].nsec << 9, |
950 | seg[i].buf & ~PAGE_MASK) == 0)) { | 965 | seg[i].offset) == 0)) { |
951 | 966 | ||
952 | bio = bio_alloc(GFP_KERNEL, nseg-i); | 967 | bio = bio_alloc(GFP_KERNEL, nseg-i); |
953 | if (unlikely(bio == NULL)) | 968 | if (unlikely(bio == NULL)) |
@@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
977 | bio->bi_end_io = end_block_io_op; | 992 | bio->bi_end_io = end_block_io_op; |
978 | } | 993 | } |
979 | 994 | ||
980 | /* | ||
981 | * We set it one so that the last submit_bio does not have to call | ||
982 | * atomic_inc. | ||
983 | */ | ||
984 | atomic_set(&pending_req->pendcnt, nbio); | 995 | atomic_set(&pending_req->pendcnt, nbio); |
985 | |||
986 | /* Get a reference count for the disk queue and start sending I/O */ | ||
987 | blk_start_plug(&plug); | 996 | blk_start_plug(&plug); |
988 | 997 | ||
989 | for (i = 0; i < nbio; i++) | 998 | for (i = 0; i < nbio; i++) |
@@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1011 | fail_put_bio: | 1020 | fail_put_bio: |
1012 | for (i = 0; i < nbio; i++) | 1021 | for (i = 0; i < nbio; i++) |
1013 | bio_put(biolist[i]); | 1022 | bio_put(biolist[i]); |
1023 | atomic_set(&pending_req->pendcnt, 1); | ||
1014 | __end_block_io_op(pending_req, -EINVAL); | 1024 | __end_block_io_op(pending_req, -EINVAL); |
1015 | msleep(1); /* back off a bit */ | 1025 | msleep(1); /* back off a bit */ |
1016 | return -EIO; | 1026 | return -EIO; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 6072390c7f57..60103e2517ba 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard { | |||
77 | uint64_t nr_sectors; | 77 | uint64_t nr_sectors; |
78 | } __attribute__((__packed__)); | 78 | } __attribute__((__packed__)); |
79 | 79 | ||
80 | struct blkif_x86_32_request_other { | ||
81 | uint8_t _pad1; | ||
82 | blkif_vdev_t _pad2; | ||
83 | uint64_t id; /* private guest value, echoed in resp */ | ||
84 | } __attribute__((__packed__)); | ||
85 | |||
80 | struct blkif_x86_32_request { | 86 | struct blkif_x86_32_request { |
81 | uint8_t operation; /* BLKIF_OP_??? */ | 87 | uint8_t operation; /* BLKIF_OP_??? */ |
82 | union { | 88 | union { |
83 | struct blkif_x86_32_request_rw rw; | 89 | struct blkif_x86_32_request_rw rw; |
84 | struct blkif_x86_32_request_discard discard; | 90 | struct blkif_x86_32_request_discard discard; |
91 | struct blkif_x86_32_request_other other; | ||
85 | } u; | 92 | } u; |
86 | } __attribute__((__packed__)); | 93 | } __attribute__((__packed__)); |
87 | 94 | ||
@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard { | |||
113 | uint64_t nr_sectors; | 120 | uint64_t nr_sectors; |
114 | } __attribute__((__packed__)); | 121 | } __attribute__((__packed__)); |
115 | 122 | ||
123 | struct blkif_x86_64_request_other { | ||
124 | uint8_t _pad1; | ||
125 | blkif_vdev_t _pad2; | ||
126 | uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ | ||
127 | uint64_t id; /* private guest value, echoed in resp */ | ||
128 | } __attribute__((__packed__)); | ||
129 | |||
116 | struct blkif_x86_64_request { | 130 | struct blkif_x86_64_request { |
117 | uint8_t operation; /* BLKIF_OP_??? */ | 131 | uint8_t operation; /* BLKIF_OP_??? */ |
118 | union { | 132 | union { |
119 | struct blkif_x86_64_request_rw rw; | 133 | struct blkif_x86_64_request_rw rw; |
120 | struct blkif_x86_64_request_discard discard; | 134 | struct blkif_x86_64_request_discard discard; |
135 | struct blkif_x86_64_request_other other; | ||
121 | } u; | 136 | } u; |
122 | } __attribute__((__packed__)); | 137 | } __attribute__((__packed__)); |
123 | 138 | ||
@@ -172,7 +187,6 @@ struct persistent_gnt { | |||
172 | struct page *page; | 187 | struct page *page; |
173 | grant_ref_t gnt; | 188 | grant_ref_t gnt; |
174 | grant_handle_t handle; | 189 | grant_handle_t handle; |
175 | uint64_t dev_bus_addr; | ||
176 | struct rb_node node; | 190 | struct rb_node node; |
177 | }; | 191 | }; |
178 | 192 | ||
@@ -208,13 +222,13 @@ struct xen_blkif { | |||
208 | 222 | ||
209 | /* statistics */ | 223 | /* statistics */ |
210 | unsigned long st_print; | 224 | unsigned long st_print; |
211 | int st_rd_req; | 225 | unsigned long long st_rd_req; |
212 | int st_wr_req; | 226 | unsigned long long st_wr_req; |
213 | int st_oo_req; | 227 | unsigned long long st_oo_req; |
214 | int st_f_req; | 228 | unsigned long long st_f_req; |
215 | int st_ds_req; | 229 | unsigned long long st_ds_req; |
216 | int st_rd_sect; | 230 | unsigned long long st_rd_sect; |
217 | int st_wr_sect; | 231 | unsigned long long st_wr_sect; |
218 | 232 | ||
219 | wait_queue_head_t waiting_to_free; | 233 | wait_queue_head_t waiting_to_free; |
220 | }; | 234 | }; |
@@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, | |||
278 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | 292 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
279 | break; | 293 | break; |
280 | default: | 294 | default: |
295 | /* | ||
296 | * Don't know how to translate this op. Only get the | ||
297 | * ID so failure can be reported to the frontend. | ||
298 | */ | ||
299 | dst->u.other.id = src->u.other.id; | ||
281 | break; | 300 | break; |
282 | } | 301 | } |
283 | } | 302 | } |
@@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, | |||
309 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | 328 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
310 | break; | 329 | break; |
311 | default: | 330 | default: |
331 | /* | ||
332 | * Don't know how to translate this op. Only get the | ||
333 | * ID so failure can be reported to the frontend. | ||
334 | */ | ||
335 | dst->u.other.id = src->u.other.id; | ||
312 | break; | 336 | break; |
313 | } | 337 | } |
314 | } | 338 | } |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 5e237f630c47..8bfd1bcf95ec 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void) | |||
230 | } \ | 230 | } \ |
231 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | 231 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
232 | 232 | ||
233 | VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); | 233 | VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req); |
234 | VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); | 234 | VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req); |
235 | VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); | 235 | VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req); |
236 | VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); | 236 | VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req); |
237 | VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); | 237 | VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req); |
238 | VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); | 238 | VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect); |
239 | VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); | 239 | VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect); |
240 | 240 | ||
241 | static struct attribute *xen_vbdstat_attrs[] = { | 241 | static struct attribute *xen_vbdstat_attrs[] = { |
242 | &dev_attr_oo_req.attr, | 242 | &dev_attr_oo_req.attr, |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c3dae2e0f290..a894f88762d8 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <linux/mutex.h> | 44 | #include <linux/mutex.h> |
45 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
46 | #include <linux/bitmap.h> | 46 | #include <linux/bitmap.h> |
47 | #include <linux/llist.h> | 47 | #include <linux/list.h> |
48 | 48 | ||
49 | #include <xen/xen.h> | 49 | #include <xen/xen.h> |
50 | #include <xen/xenbus.h> | 50 | #include <xen/xenbus.h> |
@@ -68,13 +68,12 @@ enum blkif_state { | |||
68 | struct grant { | 68 | struct grant { |
69 | grant_ref_t gref; | 69 | grant_ref_t gref; |
70 | unsigned long pfn; | 70 | unsigned long pfn; |
71 | struct llist_node node; | 71 | struct list_head node; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct blk_shadow { | 74 | struct blk_shadow { |
75 | struct blkif_request req; | 75 | struct blkif_request req; |
76 | struct request *request; | 76 | struct request *request; |
77 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
78 | struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 77 | struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
79 | }; | 78 | }; |
80 | 79 | ||
@@ -105,7 +104,7 @@ struct blkfront_info | |||
105 | struct work_struct work; | 104 | struct work_struct work; |
106 | struct gnttab_free_callback callback; | 105 | struct gnttab_free_callback callback; |
107 | struct blk_shadow shadow[BLK_RING_SIZE]; | 106 | struct blk_shadow shadow[BLK_RING_SIZE]; |
108 | struct llist_head persistent_gnts; | 107 | struct list_head persistent_gnts; |
109 | unsigned int persistent_gnts_c; | 108 | unsigned int persistent_gnts_c; |
110 | unsigned long shadow_free; | 109 | unsigned long shadow_free; |
111 | unsigned int feature_flush; | 110 | unsigned int feature_flush; |
@@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info, | |||
165 | return 0; | 164 | return 0; |
166 | } | 165 | } |
167 | 166 | ||
167 | static int fill_grant_buffer(struct blkfront_info *info, int num) | ||
168 | { | ||
169 | struct page *granted_page; | ||
170 | struct grant *gnt_list_entry, *n; | ||
171 | int i = 0; | ||
172 | |||
173 | while(i < num) { | ||
174 | gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); | ||
175 | if (!gnt_list_entry) | ||
176 | goto out_of_memory; | ||
177 | |||
178 | granted_page = alloc_page(GFP_NOIO); | ||
179 | if (!granted_page) { | ||
180 | kfree(gnt_list_entry); | ||
181 | goto out_of_memory; | ||
182 | } | ||
183 | |||
184 | gnt_list_entry->pfn = page_to_pfn(granted_page); | ||
185 | gnt_list_entry->gref = GRANT_INVALID_REF; | ||
186 | list_add(&gnt_list_entry->node, &info->persistent_gnts); | ||
187 | i++; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | |||
192 | out_of_memory: | ||
193 | list_for_each_entry_safe(gnt_list_entry, n, | ||
194 | &info->persistent_gnts, node) { | ||
195 | list_del(&gnt_list_entry->node); | ||
196 | __free_page(pfn_to_page(gnt_list_entry->pfn)); | ||
197 | kfree(gnt_list_entry); | ||
198 | i--; | ||
199 | } | ||
200 | BUG_ON(i != 0); | ||
201 | return -ENOMEM; | ||
202 | } | ||
203 | |||
204 | static struct grant *get_grant(grant_ref_t *gref_head, | ||
205 | struct blkfront_info *info) | ||
206 | { | ||
207 | struct grant *gnt_list_entry; | ||
208 | unsigned long buffer_mfn; | ||
209 | |||
210 | BUG_ON(list_empty(&info->persistent_gnts)); | ||
211 | gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, | ||
212 | node); | ||
213 | list_del(&gnt_list_entry->node); | ||
214 | |||
215 | if (gnt_list_entry->gref != GRANT_INVALID_REF) { | ||
216 | info->persistent_gnts_c--; | ||
217 | return gnt_list_entry; | ||
218 | } | ||
219 | |||
220 | /* Assign a gref to this page */ | ||
221 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); | ||
222 | BUG_ON(gnt_list_entry->gref == -ENOSPC); | ||
223 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | ||
224 | gnttab_grant_foreign_access_ref(gnt_list_entry->gref, | ||
225 | info->xbdev->otherend_id, | ||
226 | buffer_mfn, 0); | ||
227 | return gnt_list_entry; | ||
228 | } | ||
229 | |||
168 | static const char *op_name(int op) | 230 | static const char *op_name(int op) |
169 | { | 231 | { |
170 | static const char *const names[] = { | 232 | static const char *const names[] = { |
@@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, | |||
293 | static int blkif_queue_request(struct request *req) | 355 | static int blkif_queue_request(struct request *req) |
294 | { | 356 | { |
295 | struct blkfront_info *info = req->rq_disk->private_data; | 357 | struct blkfront_info *info = req->rq_disk->private_data; |
296 | unsigned long buffer_mfn; | ||
297 | struct blkif_request *ring_req; | 358 | struct blkif_request *ring_req; |
298 | unsigned long id; | 359 | unsigned long id; |
299 | unsigned int fsect, lsect; | 360 | unsigned int fsect, lsect; |
@@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req) | |||
306 | */ | 367 | */ |
307 | bool new_persistent_gnts; | 368 | bool new_persistent_gnts; |
308 | grant_ref_t gref_head; | 369 | grant_ref_t gref_head; |
309 | struct page *granted_page; | ||
310 | struct grant *gnt_list_entry = NULL; | 370 | struct grant *gnt_list_entry = NULL; |
311 | struct scatterlist *sg; | 371 | struct scatterlist *sg; |
312 | 372 | ||
@@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req) | |||
370 | fsect = sg->offset >> 9; | 430 | fsect = sg->offset >> 9; |
371 | lsect = fsect + (sg->length >> 9) - 1; | 431 | lsect = fsect + (sg->length >> 9) - 1; |
372 | 432 | ||
373 | if (info->persistent_gnts_c) { | 433 | gnt_list_entry = get_grant(&gref_head, info); |
374 | BUG_ON(llist_empty(&info->persistent_gnts)); | 434 | ref = gnt_list_entry->gref; |
375 | gnt_list_entry = llist_entry( | ||
376 | llist_del_first(&info->persistent_gnts), | ||
377 | struct grant, node); | ||
378 | |||
379 | ref = gnt_list_entry->gref; | ||
380 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | ||
381 | info->persistent_gnts_c--; | ||
382 | } else { | ||
383 | ref = gnttab_claim_grant_reference(&gref_head); | ||
384 | BUG_ON(ref == -ENOSPC); | ||
385 | |||
386 | gnt_list_entry = | ||
387 | kmalloc(sizeof(struct grant), | ||
388 | GFP_ATOMIC); | ||
389 | if (!gnt_list_entry) | ||
390 | return -ENOMEM; | ||
391 | |||
392 | granted_page = alloc_page(GFP_ATOMIC); | ||
393 | if (!granted_page) { | ||
394 | kfree(gnt_list_entry); | ||
395 | return -ENOMEM; | ||
396 | } | ||
397 | |||
398 | gnt_list_entry->pfn = | ||
399 | page_to_pfn(granted_page); | ||
400 | gnt_list_entry->gref = ref; | ||
401 | |||
402 | buffer_mfn = pfn_to_mfn(page_to_pfn( | ||
403 | granted_page)); | ||
404 | gnttab_grant_foreign_access_ref(ref, | ||
405 | info->xbdev->otherend_id, | ||
406 | buffer_mfn, 0); | ||
407 | } | ||
408 | 435 | ||
409 | info->shadow[id].grants_used[i] = gnt_list_entry; | 436 | info->shadow[id].grants_used[i] = gnt_list_entry; |
410 | 437 | ||
@@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req) | |||
435 | kunmap_atomic(shared_data); | 462 | kunmap_atomic(shared_data); |
436 | } | 463 | } |
437 | 464 | ||
438 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); | ||
439 | ring_req->u.rw.seg[i] = | 465 | ring_req->u.rw.seg[i] = |
440 | (struct blkif_request_segment) { | 466 | (struct blkif_request_segment) { |
441 | .gref = ref, | 467 | .gref = ref, |
@@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work) | |||
790 | 816 | ||
791 | static void blkif_free(struct blkfront_info *info, int suspend) | 817 | static void blkif_free(struct blkfront_info *info, int suspend) |
792 | { | 818 | { |
793 | struct llist_node *all_gnts; | 819 | struct grant *persistent_gnt; |
794 | struct grant *persistent_gnt, *tmp; | 820 | struct grant *n; |
795 | struct llist_node *n; | ||
796 | 821 | ||
797 | /* Prevent new requests being issued until we fix things up. */ | 822 | /* Prevent new requests being issued until we fix things up. */ |
798 | spin_lock_irq(&info->io_lock); | 823 | spin_lock_irq(&info->io_lock); |
@@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
803 | blk_stop_queue(info->rq); | 828 | blk_stop_queue(info->rq); |
804 | 829 | ||
805 | /* Remove all persistent grants */ | 830 | /* Remove all persistent grants */ |
806 | if (info->persistent_gnts_c) { | 831 | if (!list_empty(&info->persistent_gnts)) { |
807 | all_gnts = llist_del_all(&info->persistent_gnts); | 832 | list_for_each_entry_safe(persistent_gnt, n, |
808 | persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); | 833 | &info->persistent_gnts, node) { |
809 | while (persistent_gnt) { | 834 | list_del(&persistent_gnt->node); |
810 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | 835 | if (persistent_gnt->gref != GRANT_INVALID_REF) { |
836 | gnttab_end_foreign_access(persistent_gnt->gref, | ||
837 | 0, 0UL); | ||
838 | info->persistent_gnts_c--; | ||
839 | } | ||
811 | __free_page(pfn_to_page(persistent_gnt->pfn)); | 840 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
812 | tmp = persistent_gnt; | 841 | kfree(persistent_gnt); |
813 | n = persistent_gnt->node.next; | ||
814 | if (n) | ||
815 | persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node); | ||
816 | else | ||
817 | persistent_gnt = NULL; | ||
818 | kfree(tmp); | ||
819 | } | 842 | } |
820 | info->persistent_gnts_c = 0; | ||
821 | } | 843 | } |
844 | BUG_ON(info->persistent_gnts_c != 0); | ||
822 | 845 | ||
823 | /* No more gnttab callback work. */ | 846 | /* No more gnttab callback work. */ |
824 | gnttab_cancel_free_callback(&info->callback); | 847 | gnttab_cancel_free_callback(&info->callback); |
@@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
875 | } | 898 | } |
876 | /* Add the persistent grant into the list of free grants */ | 899 | /* Add the persistent grant into the list of free grants */ |
877 | for (i = 0; i < s->req.u.rw.nr_segments; i++) { | 900 | for (i = 0; i < s->req.u.rw.nr_segments; i++) { |
878 | llist_add(&s->grants_used[i]->node, &info->persistent_gnts); | 901 | list_add(&s->grants_used[i]->node, &info->persistent_gnts); |
879 | info->persistent_gnts_c++; | 902 | info->persistent_gnts_c++; |
880 | } | 903 | } |
881 | } | 904 | } |
@@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev, | |||
1013 | 1036 | ||
1014 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | 1037 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
1015 | 1038 | ||
1039 | /* Allocate memory for grants */ | ||
1040 | err = fill_grant_buffer(info, BLK_RING_SIZE * | ||
1041 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
1042 | if (err) | ||
1043 | goto fail; | ||
1044 | |||
1016 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | 1045 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); |
1017 | if (err < 0) { | 1046 | if (err < 0) { |
1018 | free_page((unsigned long)sring); | 1047 | free_page((unsigned long)sring); |
@@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1171 | spin_lock_init(&info->io_lock); | 1200 | spin_lock_init(&info->io_lock); |
1172 | info->xbdev = dev; | 1201 | info->xbdev = dev; |
1173 | info->vdevice = vdevice; | 1202 | info->vdevice = vdevice; |
1174 | init_llist_head(&info->persistent_gnts); | 1203 | INIT_LIST_HEAD(&info->persistent_gnts); |
1175 | info->persistent_gnts_c = 0; | 1204 | info->persistent_gnts_c = 0; |
1176 | info->connected = BLKIF_STATE_DISCONNECTED; | 1205 | info->connected = BLKIF_STATE_DISCONNECTED; |
1177 | INIT_WORK(&info->work, blkif_restart_queue); | 1206 | INIT_WORK(&info->work, blkif_restart_queue); |
@@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info) | |||
1203 | int j; | 1232 | int j; |
1204 | 1233 | ||
1205 | /* Stage 1: Make a safe copy of the shadow state. */ | 1234 | /* Stage 1: Make a safe copy of the shadow state. */ |
1206 | copy = kmalloc(sizeof(info->shadow), | 1235 | copy = kmemdup(info->shadow, sizeof(info->shadow), |
1207 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); | 1236 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); |
1208 | if (!copy) | 1237 | if (!copy) |
1209 | return -ENOMEM; | 1238 | return -ENOMEM; |
1210 | memcpy(copy, info->shadow, sizeof(info->shadow)); | ||
1211 | 1239 | ||
1212 | /* Stage 2: Set up free list. */ | 1240 | /* Stage 2: Set up free list. */ |
1213 | memset(&info->shadow, 0, sizeof(info->shadow)); | 1241 | memset(&info->shadow, 0, sizeof(info->shadow)); |
@@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
1236 | gnttab_grant_foreign_access_ref( | 1264 | gnttab_grant_foreign_access_ref( |
1237 | req->u.rw.seg[j].gref, | 1265 | req->u.rw.seg[j].gref, |
1238 | info->xbdev->otherend_id, | 1266 | info->xbdev->otherend_id, |
1239 | pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), | 1267 | pfn_to_mfn(copy[i].grants_used[j]->pfn), |
1240 | 0); | 1268 | 0); |
1241 | } | 1269 | } |
1242 | info->shadow[req->u.rw.id].req = *req; | 1270 | info->shadow[req->u.rw.id].req = *req; |