diff options
author | Mike Snitzer <snitzer@redhat.com> | 2013-11-01 15:05:10 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-11-08 11:10:29 -0500 |
commit | 38d4a1bb994e87057bc8b59e393931904b6b8bc0 (patch) | |
tree | 79a13b82b05f5bdecd6554abbae777da768a95b7 /drivers/block | |
parent | 6a5ec65b9acee39f9af4a15a81858d5fc07498d0 (diff) |
skd: more removal of bio-based code
Remove skd_flush_cmd structure and skd_flush_slab.
Remove skd_end_request wrapper around skd_end_request_blk.
Remove skd_requeue_request, use blk_requeue_request directly.
Cleanup some comments (remove "bio" info) and whitespace.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/skd_main.c | 91 |
1 files changed, 20 insertions, 71 deletions
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index d404d7646d9c..5dc5b39e5b85 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -86,7 +86,7 @@ enum { | |||
86 | MODULE_AUTHOR("bug-reports: support@stec-inc.com"); | 86 | MODULE_AUTHOR("bug-reports: support@stec-inc.com"); |
87 | MODULE_LICENSE("Dual BSD/GPL"); | 87 | MODULE_LICENSE("Dual BSD/GPL"); |
88 | 88 | ||
89 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block/BIO driver (b" DRV_BUILD_ID ")"); | 89 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); |
90 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); | 90 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); |
91 | 91 | ||
92 | #define PCI_VENDOR_ID_STEC 0x1B39 | 92 | #define PCI_VENDOR_ID_STEC 0x1B39 |
@@ -352,21 +352,8 @@ struct skd_device { | |||
352 | 352 | ||
353 | u32 timo_slot; | 353 | u32 timo_slot; |
354 | 354 | ||
355 | struct work_struct completion_worker; | ||
356 | }; | ||
357 | |||
358 | #define SKD_FLUSH_JOB "skd-flush-jobs" | ||
359 | struct kmem_cache *skd_flush_slab; | ||
360 | 355 | ||
361 | /* | 356 | struct work_struct completion_worker; |
362 | * These commands hold "nonzero size FLUSH bios", | ||
363 | * which are enqueud in skdev->flush_list during | ||
364 | * completion of "zero size FLUSH commands". | ||
365 | * It will be active in biomode. | ||
366 | */ | ||
367 | struct skd_flush_cmd { | ||
368 | void *cmd; | ||
369 | struct list_head flist; | ||
370 | }; | 357 | }; |
371 | 358 | ||
372 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) | 359 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) |
@@ -541,7 +528,7 @@ skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, | |||
541 | 528 | ||
542 | static void | 529 | static void |
543 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | 530 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, |
544 | struct skd_request_context *skreq) | 531 | struct skd_request_context *skreq) |
545 | { | 532 | { |
546 | skreq->flush_cmd = 1; | 533 | skreq->flush_cmd = 1; |
547 | 534 | ||
@@ -559,9 +546,9 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | |||
559 | 546 | ||
560 | static void | 547 | static void |
561 | skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, | 548 | skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, |
562 | struct skd_request_context *skreq, | 549 | struct skd_request_context *skreq, |
563 | struct page *page, | 550 | struct page *page, |
564 | u32 lba, u32 count) | 551 | u32 lba, u32 count) |
565 | { | 552 | { |
566 | char *buf; | 553 | char *buf; |
567 | unsigned long len; | 554 | unsigned long len; |
@@ -655,10 +642,7 @@ static void skd_request_fn(struct request_queue *q) | |||
655 | skdev->name, __func__, __LINE__, | 642 | skdev->name, __func__, __LINE__, |
656 | req, lba, lba, count, count, data_dir); | 643 | req, lba, lba, count, count, data_dir); |
657 | 644 | ||
658 | /* At this point we know there is a request | 645 | /* At this point we know there is a request */ |
659 | * (from our bio q or req q depending on the way | ||
660 | * the driver is built do checks for resources. | ||
661 | */ | ||
662 | 646 | ||
663 | /* Are too many requets already in progress? */ | 647 | /* Are too many requets already in progress? */ |
664 | if (skdev->in_flight >= skdev->cur_max_queue_depth) { | 648 | if (skdev->in_flight >= skdev->cur_max_queue_depth) { |
@@ -693,7 +677,7 @@ static void skd_request_fn(struct request_queue *q) | |||
693 | skreq->discard_page = 0; | 677 | skreq->discard_page = 0; |
694 | 678 | ||
695 | /* | 679 | /* |
696 | * OK to now dequeue request from either bio or q. | 680 | * OK to now dequeue request from q. |
697 | * | 681 | * |
698 | * At this point we are comitted to either start or reject | 682 | * At this point we are comitted to either start or reject |
699 | * the native request. Note that skd_request_context is | 683 | * the native request. Note that skd_request_context is |
@@ -868,15 +852,15 @@ skip_sg: | |||
868 | blk_stop_queue(skdev->queue); | 852 | blk_stop_queue(skdev->queue); |
869 | } | 853 | } |
870 | 854 | ||
871 | static void skd_end_request_blk(struct skd_device *skdev, | 855 | static void skd_end_request(struct skd_device *skdev, |
872 | struct skd_request_context *skreq, int error) | 856 | struct skd_request_context *skreq, int error) |
873 | { | 857 | { |
874 | struct request *req = skreq->req; | 858 | struct request *req = skreq->req; |
875 | unsigned int io_flags = req->cmd_flags; | 859 | unsigned int io_flags = req->cmd_flags; |
876 | 860 | ||
877 | if ((io_flags & REQ_DISCARD) && | 861 | if ((io_flags & REQ_DISCARD) && |
878 | (skreq->discard_page == 1)) { | 862 | (skreq->discard_page == 1)) { |
879 | pr_debug("%s:%s:%d skd_end_request_blk, free the page!", | 863 | pr_debug("%s:%s:%d, free the page!", |
880 | skdev->name, __func__, __LINE__); | 864 | skdev->name, __func__, __LINE__); |
881 | free_page((unsigned long)req->buffer); | 865 | free_page((unsigned long)req->buffer); |
882 | req->buffer = NULL; | 866 | req->buffer = NULL; |
@@ -898,7 +882,7 @@ static void skd_end_request_blk(struct skd_device *skdev, | |||
898 | } | 882 | } |
899 | 883 | ||
900 | static int skd_preop_sg_list(struct skd_device *skdev, | 884 | static int skd_preop_sg_list(struct skd_device *skdev, |
901 | struct skd_request_context *skreq) | 885 | struct skd_request_context *skreq) |
902 | { | 886 | { |
903 | struct request *req = skreq->req; | 887 | struct request *req = skreq->req; |
904 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | 888 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; |
@@ -961,7 +945,7 @@ static int skd_preop_sg_list(struct skd_device *skdev, | |||
961 | } | 945 | } |
962 | 946 | ||
963 | static void skd_postop_sg_list(struct skd_device *skdev, | 947 | static void skd_postop_sg_list(struct skd_device *skdev, |
964 | struct skd_request_context *skreq) | 948 | struct skd_request_context *skreq) |
965 | { | 949 | { |
966 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | 950 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; |
967 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | 951 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; |
@@ -976,12 +960,6 @@ static void skd_postop_sg_list(struct skd_device *skdev, | |||
976 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); | 960 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); |
977 | } | 961 | } |
978 | 962 | ||
979 | static void skd_end_request(struct skd_device *skdev, | ||
980 | struct skd_request_context *skreq, int error) | ||
981 | { | ||
982 | skd_end_request_blk(skdev, skreq, error); | ||
983 | } | ||
984 | |||
985 | static void skd_request_fn_not_online(struct request_queue *q) | 963 | static void skd_request_fn_not_online(struct request_queue *q) |
986 | { | 964 | { |
987 | struct skd_device *skdev = q->queuedata; | 965 | struct skd_device *skdev = q->queuedata; |
@@ -1525,7 +1503,7 @@ static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | |||
1525 | struct skd_special_context *skspcl = NULL; | 1503 | struct skd_special_context *skspcl = NULL; |
1526 | int rc; | 1504 | int rc; |
1527 | 1505 | ||
1528 | for (;; ) { | 1506 | for (;;) { |
1529 | ulong flags; | 1507 | ulong flags; |
1530 | 1508 | ||
1531 | spin_lock_irqsave(&skdev->lock, flags); | 1509 | spin_lock_irqsave(&skdev->lock, flags); |
@@ -2300,10 +2278,6 @@ static void skd_complete_other(struct skd_device *skdev, | |||
2300 | volatile struct fit_completion_entry_v1 *skcomp, | 2278 | volatile struct fit_completion_entry_v1 *skcomp, |
2301 | volatile struct fit_comp_error_info *skerr); | 2279 | volatile struct fit_comp_error_info *skerr); |
2302 | 2280 | ||
2303 | |||
2304 | static void skd_requeue_request(struct skd_device *skdev, | ||
2305 | struct skd_request_context *skreq); | ||
2306 | |||
2307 | struct sns_info { | 2281 | struct sns_info { |
2308 | u8 type; | 2282 | u8 type; |
2309 | u8 stat; | 2283 | u8 stat; |
@@ -2349,9 +2323,9 @@ static struct sns_info skd_chkstat_table[] = { | |||
2349 | * type and stat, ignore key, asc, ascq. | 2323 | * type and stat, ignore key, asc, ascq. |
2350 | */ | 2324 | */ |
2351 | 2325 | ||
2352 | static enum skd_check_status_action skd_check_status(struct skd_device *skdev, | 2326 | static enum skd_check_status_action |
2353 | u8 cmp_status, | 2327 | skd_check_status(struct skd_device *skdev, |
2354 | volatile struct fit_comp_error_info *skerr) | 2328 | u8 cmp_status, volatile struct fit_comp_error_info *skerr) |
2355 | { | 2329 | { |
2356 | int i, n; | 2330 | int i, n; |
2357 | 2331 | ||
@@ -2424,7 +2398,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, | |||
2424 | 2398 | ||
2425 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | 2399 | case SKD_CHECK_STATUS_BUSY_IMMINENT: |
2426 | skd_log_skreq(skdev, skreq, "retry(busy)"); | 2400 | skd_log_skreq(skdev, skreq, "retry(busy)"); |
2427 | skd_requeue_request(skdev, skreq); | 2401 | blk_requeue_request(skdev->queue, skreq->req); |
2428 | pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); | 2402 | pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); |
2429 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; | 2403 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; |
2430 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); | 2404 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); |
@@ -2434,7 +2408,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, | |||
2434 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: | 2408 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: |
2435 | if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { | 2409 | if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { |
2436 | skd_log_skreq(skdev, skreq, "retry"); | 2410 | skd_log_skreq(skdev, skreq, "retry"); |
2437 | skd_requeue_request(skdev, skreq); | 2411 | blk_requeue_request(skdev->queue, skreq->req); |
2438 | break; | 2412 | break; |
2439 | } | 2413 | } |
2440 | /* fall through to report error */ | 2414 | /* fall through to report error */ |
@@ -2446,14 +2420,6 @@ static void skd_resolve_req_exception(struct skd_device *skdev, | |||
2446 | } | 2420 | } |
2447 | } | 2421 | } |
2448 | 2422 | ||
2449 | static void skd_requeue_request(struct skd_device *skdev, | ||
2450 | struct skd_request_context *skreq) | ||
2451 | { | ||
2452 | blk_requeue_request(skdev->queue, skreq->req); | ||
2453 | } | ||
2454 | |||
2455 | |||
2456 | |||
2457 | /* assume spinlock is already held */ | 2423 | /* assume spinlock is already held */ |
2458 | static void skd_release_skreq(struct skd_device *skdev, | 2424 | static void skd_release_skreq(struct skd_device *skdev, |
2459 | struct skd_request_context *skreq) | 2425 | struct skd_request_context *skreq) |
@@ -2998,7 +2964,6 @@ static void skd_release_special(struct skd_device *skdev, | |||
2998 | int i, was_depleted; | 2964 | int i, was_depleted; |
2999 | 2965 | ||
3000 | for (i = 0; i < skspcl->req.n_sg; i++) { | 2966 | for (i = 0; i < skspcl->req.n_sg; i++) { |
3001 | |||
3002 | struct page *page = sg_page(&skspcl->req.sg[i]); | 2967 | struct page *page = sg_page(&skspcl->req.sg[i]); |
3003 | __free_page(page); | 2968 | __free_page(page); |
3004 | } | 2969 | } |
@@ -3141,7 +3106,6 @@ static skd_isr(int irq, void *ptr) | |||
3141 | return rc; | 3106 | return rc; |
3142 | } | 3107 | } |
3143 | 3108 | ||
3144 | |||
3145 | static void skd_drive_fault(struct skd_device *skdev) | 3109 | static void skd_drive_fault(struct skd_device *skdev) |
3146 | { | 3110 | { |
3147 | skdev->state = SKD_DRVR_STATE_FAULT; | 3111 | skdev->state = SKD_DRVR_STATE_FAULT; |
@@ -3297,7 +3261,7 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue) | |||
3297 | if (requeue && | 3261 | if (requeue && |
3298 | (unsigned long) ++skreq->req->special < | 3262 | (unsigned long) ++skreq->req->special < |
3299 | SKD_MAX_RETRIES) | 3263 | SKD_MAX_RETRIES) |
3300 | skd_requeue_request(skdev, skreq); | 3264 | blk_requeue_request(skdev->queue, skreq->req); |
3301 | else | 3265 | else |
3302 | skd_end_request(skdev, skreq, -EIO); | 3266 | skd_end_request(skdev, skreq, -EIO); |
3303 | 3267 | ||
@@ -3305,8 +3269,6 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue) | |||
3305 | 3269 | ||
3306 | skreq->state = SKD_REQ_STATE_IDLE; | 3270 | skreq->state = SKD_REQ_STATE_IDLE; |
3307 | skreq->id += SKD_ID_INCR; | 3271 | skreq->id += SKD_ID_INCR; |
3308 | |||
3309 | |||
3310 | } | 3272 | } |
3311 | if (i > 0) | 3273 | if (i > 0) |
3312 | skreq[-1].next = skreq; | 3274 | skreq[-1].next = skreq; |
@@ -3879,7 +3841,6 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data) | |||
3879 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); | 3841 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); |
3880 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, | 3842 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, |
3881 | &flush_enqueued); | 3843 | &flush_enqueued); |
3882 | |||
3883 | if (flush_enqueued) | 3844 | if (flush_enqueued) |
3884 | skd_request_fn(skdev->queue); | 3845 | skd_request_fn(skdev->queue); |
3885 | 3846 | ||
@@ -5450,15 +5411,6 @@ static int __init skd_init(void) | |||
5450 | skd_isr_type = SKD_IRQ_DEFAULT; | 5411 | skd_isr_type = SKD_IRQ_DEFAULT; |
5451 | } | 5412 | } |
5452 | 5413 | ||
5453 | skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB, | ||
5454 | sizeof(struct skd_flush_cmd), | ||
5455 | 0, 0, NULL); | ||
5456 | |||
5457 | if (!skd_flush_slab) { | ||
5458 | pr_err("failed to allocated flush slab.\n"); | ||
5459 | return -ENOMEM; | ||
5460 | } | ||
5461 | |||
5462 | if (skd_max_queue_depth < 1 | 5414 | if (skd_max_queue_depth < 1 |
5463 | || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { | 5415 | || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { |
5464 | pr_info( | 5416 | pr_info( |
@@ -5507,7 +5459,6 @@ static int __init skd_init(void) | |||
5507 | skd_major = rc; | 5459 | skd_major = rc; |
5508 | 5460 | ||
5509 | return pci_register_driver(&skd_driver); | 5461 | return pci_register_driver(&skd_driver); |
5510 | |||
5511 | } | 5462 | } |
5512 | 5463 | ||
5513 | static void __exit skd_exit(void) | 5464 | static void __exit skd_exit(void) |
@@ -5516,8 +5467,6 @@ static void __exit skd_exit(void) | |||
5516 | 5467 | ||
5517 | unregister_blkdev(skd_major, DRV_NAME); | 5468 | unregister_blkdev(skd_major, DRV_NAME); |
5518 | pci_unregister_driver(&skd_driver); | 5469 | pci_unregister_driver(&skd_driver); |
5519 | |||
5520 | kmem_cache_destroy(skd_flush_slab); | ||
5521 | } | 5470 | } |
5522 | 5471 | ||
5523 | module_init(skd_init); | 5472 | module_init(skd_init); |