diff options
-rw-r--r-- | Documentation/blockdev/floppy.txt | 6 | ||||
-rw-r--r-- | drivers/block/Kconfig | 12 | ||||
-rw-r--r-- | drivers/block/Makefile | 2 | ||||
-rw-r--r-- | drivers/block/cciss.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 19 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 6 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 45 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 3 | ||||
-rw-r--r-- | drivers/block/loop.c | 15 | ||||
-rw-r--r-- | drivers/block/mg_disk.c | 2 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 500 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.h | 18 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 22 | ||||
-rw-r--r-- | drivers/block/rsxx/core.c | 8 | ||||
-rw-r--r-- | drivers/block/rsxx/dev.c | 8 | ||||
-rw-r--r-- | drivers/block/rsxx/dma.c | 119 | ||||
-rw-r--r-- | drivers/block/rsxx/rsxx_priv.h | 11 | ||||
-rw-r--r-- | drivers/block/skd_main.c | 5432 | ||||
-rw-r--r-- | drivers/block/skd_s1120.h | 330 | ||||
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 3 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 159 | ||||
-rw-r--r-- | drivers/s390/block/dasd.c | 8 |
23 files changed, 6369 insertions, 364 deletions
diff --git a/Documentation/blockdev/floppy.txt b/Documentation/blockdev/floppy.txt index 470fe4b5e379..e2240f5ab64d 100644 --- a/Documentation/blockdev/floppy.txt +++ b/Documentation/blockdev/floppy.txt | |||
@@ -39,15 +39,15 @@ Module configuration options | |||
39 | ============================ | 39 | ============================ |
40 | 40 | ||
41 | If you use the floppy driver as a module, use the following syntax: | 41 | If you use the floppy driver as a module, use the following syntax: |
42 | modprobe floppy <options> | 42 | modprobe floppy floppy="<options>" |
43 | 43 | ||
44 | Example: | 44 | Example: |
45 | modprobe floppy omnibook messages | 45 | modprobe floppy floppy="omnibook messages" |
46 | 46 | ||
47 | If you need certain options enabled every time you load the floppy driver, | 47 | If you need certain options enabled every time you load the floppy driver, |
48 | you can put: | 48 | you can put: |
49 | 49 | ||
50 | options floppy omnibook messages | 50 | options floppy floppy="omnibook messages" |
51 | 51 | ||
52 | in a configuration file in /etc/modprobe.d/. | 52 | in a configuration file in /etc/modprobe.d/. |
53 | 53 | ||
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 5902bd006a9c..86b9f37d102e 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -110,7 +110,7 @@ source "drivers/block/mtip32xx/Kconfig" | |||
110 | 110 | ||
111 | config BLK_CPQ_DA | 111 | config BLK_CPQ_DA |
112 | tristate "Compaq SMART2 support" | 112 | tristate "Compaq SMART2 support" |
113 | depends on PCI && VIRT_TO_BUS | 113 | depends on PCI && VIRT_TO_BUS && 0 |
114 | help | 114 | help |
115 | This is the driver for Compaq Smart Array controllers. Everyone | 115 | This is the driver for Compaq Smart Array controllers. Everyone |
116 | using these boards should say Y here. See the file | 116 | using these boards should say Y here. See the file |
@@ -319,6 +319,16 @@ config BLK_DEV_NVME | |||
319 | To compile this driver as a module, choose M here: the | 319 | To compile this driver as a module, choose M here: the |
320 | module will be called nvme. | 320 | module will be called nvme. |
321 | 321 | ||
322 | config BLK_DEV_SKD | ||
323 | tristate "STEC S1120 Block Driver" | ||
324 | depends on PCI | ||
325 | depends on 64BIT | ||
326 | ---help--- | ||
327 | Saying Y or M here will enable support for the | ||
328 | STEC, Inc. S1120 PCIe SSD. | ||
329 | |||
330 | Use device /dev/skd$N amd /dev/skd$Np$M. | ||
331 | |||
322 | config BLK_DEV_OSD | 332 | config BLK_DEV_OSD |
323 | tristate "OSD object-as-blkdev support" | 333 | tristate "OSD object-as-blkdev support" |
324 | depends on SCSI_OSD_ULD | 334 | depends on SCSI_OSD_ULD |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 03b3b4a2bd8a..8cc98cd0d4a8 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -23,6 +23,7 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o | |||
23 | obj-$(CONFIG_MG_DISK) += mg_disk.o | 23 | obj-$(CONFIG_MG_DISK) += mg_disk.o |
24 | obj-$(CONFIG_SUNVDC) += sunvdc.o | 24 | obj-$(CONFIG_SUNVDC) += sunvdc.o |
25 | obj-$(CONFIG_BLK_DEV_NVME) += nvme.o | 25 | obj-$(CONFIG_BLK_DEV_NVME) += nvme.o |
26 | obj-$(CONFIG_BLK_DEV_SKD) += skd.o | ||
26 | obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o | 27 | obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o |
27 | 28 | ||
28 | obj-$(CONFIG_BLK_DEV_UMEM) += umem.o | 29 | obj-$(CONFIG_BLK_DEV_UMEM) += umem.o |
@@ -44,4 +45,5 @@ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ | |||
44 | obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o | 45 | obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o |
45 | 46 | ||
46 | nvme-y := nvme-core.o nvme-scsi.o | 47 | nvme-y := nvme-core.o nvme-scsi.o |
48 | skd-y := skd_main.o | ||
47 | swim_mod-y := swim.o swim_asm.o | 49 | swim_mod-y := swim.o swim_asm.o |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index edfa2515bc86..0c004ac05811 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -5183,7 +5183,7 @@ reinit_after_soft_reset: | |||
5183 | rebuild_lun_table(h, 1, 0); | 5183 | rebuild_lun_table(h, 1, 0); |
5184 | cciss_engage_scsi(h); | 5184 | cciss_engage_scsi(h); |
5185 | h->busy_initializing = 0; | 5185 | h->busy_initializing = 0; |
5186 | return 1; | 5186 | return 0; |
5187 | 5187 | ||
5188 | clean4: | 5188 | clean4: |
5189 | cciss_free_cmd_pool(h); | 5189 | cciss_free_cmd_pool(h); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 2d7f608d181c..0e06f0c5dd1e 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -1474,7 +1474,8 @@ enum determine_dev_size { | |||
1474 | DS_ERROR = -1, | 1474 | DS_ERROR = -1, |
1475 | DS_UNCHANGED = 0, | 1475 | DS_UNCHANGED = 0, |
1476 | DS_SHRUNK = 1, | 1476 | DS_SHRUNK = 1, |
1477 | DS_GREW = 2 | 1477 | DS_GREW = 2, |
1478 | DS_GREW_FROM_ZERO = 3, | ||
1478 | }; | 1479 | }; |
1479 | extern enum determine_dev_size | 1480 | extern enum determine_dev_size |
1480 | drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local); | 1481 | drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local); |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 55635edf563b..9e3818b1bc83 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2750,13 +2750,6 @@ int __init drbd_init(void) | |||
2750 | return err; | 2750 | return err; |
2751 | } | 2751 | } |
2752 | 2752 | ||
2753 | err = drbd_genl_register(); | ||
2754 | if (err) { | ||
2755 | printk(KERN_ERR "drbd: unable to register generic netlink family\n"); | ||
2756 | goto fail; | ||
2757 | } | ||
2758 | |||
2759 | |||
2760 | register_reboot_notifier(&drbd_notifier); | 2753 | register_reboot_notifier(&drbd_notifier); |
2761 | 2754 | ||
2762 | /* | 2755 | /* |
@@ -2767,6 +2760,15 @@ int __init drbd_init(void) | |||
2767 | drbd_proc = NULL; /* play safe for drbd_cleanup */ | 2760 | drbd_proc = NULL; /* play safe for drbd_cleanup */ |
2768 | idr_init(&minors); | 2761 | idr_init(&minors); |
2769 | 2762 | ||
2763 | rwlock_init(&global_state_lock); | ||
2764 | INIT_LIST_HEAD(&drbd_tconns); | ||
2765 | |||
2766 | err = drbd_genl_register(); | ||
2767 | if (err) { | ||
2768 | printk(KERN_ERR "drbd: unable to register generic netlink family\n"); | ||
2769 | goto fail; | ||
2770 | } | ||
2771 | |||
2770 | err = drbd_create_mempools(); | 2772 | err = drbd_create_mempools(); |
2771 | if (err) | 2773 | if (err) |
2772 | goto fail; | 2774 | goto fail; |
@@ -2778,9 +2780,6 @@ int __init drbd_init(void) | |||
2778 | goto fail; | 2780 | goto fail; |
2779 | } | 2781 | } |
2780 | 2782 | ||
2781 | rwlock_init(&global_state_lock); | ||
2782 | INIT_LIST_HEAD(&drbd_tconns); | ||
2783 | |||
2784 | retry.wq = create_singlethread_workqueue("drbd-reissue"); | 2783 | retry.wq = create_singlethread_workqueue("drbd-reissue"); |
2785 | if (!retry.wq) { | 2784 | if (!retry.wq) { |
2786 | printk(KERN_ERR "drbd: unable to create retry workqueue\n"); | 2785 | printk(KERN_ERR "drbd: unable to create retry workqueue\n"); |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 8cc1e640f485..c706d50a8b06 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -955,7 +955,7 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res | |||
955 | } | 955 | } |
956 | 956 | ||
957 | if (size > la_size_sect) | 957 | if (size > la_size_sect) |
958 | rv = DS_GREW; | 958 | rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO; |
959 | if (size < la_size_sect) | 959 | if (size < la_size_sect) |
960 | rv = DS_SHRUNK; | 960 | rv = DS_SHRUNK; |
961 | 961 | ||
@@ -1132,9 +1132,9 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) | |||
1132 | /* We may ignore peer limits if the peer is modern enough. | 1132 | /* We may ignore peer limits if the peer is modern enough. |
1133 | Because new from 8.3.8 onwards the peer can use multiple | 1133 | Because new from 8.3.8 onwards the peer can use multiple |
1134 | BIOs for a single peer_request */ | 1134 | BIOs for a single peer_request */ |
1135 | if (mdev->state.conn >= C_CONNECTED) { | 1135 | if (mdev->state.conn >= C_WF_REPORT_PARAMS) { |
1136 | if (mdev->tconn->agreed_pro_version < 94) | 1136 | if (mdev->tconn->agreed_pro_version < 94) |
1137 | peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); | 1137 | peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); |
1138 | /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ | 1138 | /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ |
1139 | else if (mdev->tconn->agreed_pro_version == 94) | 1139 | else if (mdev->tconn->agreed_pro_version == 94) |
1140 | peer = DRBD_MAX_SIZE_H80_PACKET; | 1140 | peer = DRBD_MAX_SIZE_H80_PACKET; |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index cc29cd3bf78b..6fa6673b36b3 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1890,29 +1890,11 @@ static u32 seq_max(u32 a, u32 b) | |||
1890 | return seq_greater(a, b) ? a : b; | 1890 | return seq_greater(a, b) ? a : b; |
1891 | } | 1891 | } |
1892 | 1892 | ||
1893 | static bool need_peer_seq(struct drbd_conf *mdev) | ||
1894 | { | ||
1895 | struct drbd_tconn *tconn = mdev->tconn; | ||
1896 | int tp; | ||
1897 | |||
1898 | /* | ||
1899 | * We only need to keep track of the last packet_seq number of our peer | ||
1900 | * if we are in dual-primary mode and we have the resolve-conflicts flag set; see | ||
1901 | * handle_write_conflicts(). | ||
1902 | */ | ||
1903 | |||
1904 | rcu_read_lock(); | ||
1905 | tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; | ||
1906 | rcu_read_unlock(); | ||
1907 | |||
1908 | return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags); | ||
1909 | } | ||
1910 | |||
1911 | static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) | 1893 | static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) |
1912 | { | 1894 | { |
1913 | unsigned int newest_peer_seq; | 1895 | unsigned int newest_peer_seq; |
1914 | 1896 | ||
1915 | if (need_peer_seq(mdev)) { | 1897 | if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) { |
1916 | spin_lock(&mdev->peer_seq_lock); | 1898 | spin_lock(&mdev->peer_seq_lock); |
1917 | newest_peer_seq = seq_max(mdev->peer_seq, peer_seq); | 1899 | newest_peer_seq = seq_max(mdev->peer_seq, peer_seq); |
1918 | mdev->peer_seq = newest_peer_seq; | 1900 | mdev->peer_seq = newest_peer_seq; |
@@ -1972,22 +1954,31 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s | |||
1972 | { | 1954 | { |
1973 | DEFINE_WAIT(wait); | 1955 | DEFINE_WAIT(wait); |
1974 | long timeout; | 1956 | long timeout; |
1975 | int ret; | 1957 | int ret = 0, tp; |
1976 | 1958 | ||
1977 | if (!need_peer_seq(mdev)) | 1959 | if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) |
1978 | return 0; | 1960 | return 0; |
1979 | 1961 | ||
1980 | spin_lock(&mdev->peer_seq_lock); | 1962 | spin_lock(&mdev->peer_seq_lock); |
1981 | for (;;) { | 1963 | for (;;) { |
1982 | if (!seq_greater(peer_seq - 1, mdev->peer_seq)) { | 1964 | if (!seq_greater(peer_seq - 1, mdev->peer_seq)) { |
1983 | mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); | 1965 | mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); |
1984 | ret = 0; | ||
1985 | break; | 1966 | break; |
1986 | } | 1967 | } |
1968 | |||
1987 | if (signal_pending(current)) { | 1969 | if (signal_pending(current)) { |
1988 | ret = -ERESTARTSYS; | 1970 | ret = -ERESTARTSYS; |
1989 | break; | 1971 | break; |
1990 | } | 1972 | } |
1973 | |||
1974 | rcu_read_lock(); | ||
1975 | tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; | ||
1976 | rcu_read_unlock(); | ||
1977 | |||
1978 | if (!tp) | ||
1979 | break; | ||
1980 | |||
1981 | /* Only need to wait if two_primaries is enabled */ | ||
1991 | prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); | 1982 | prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); |
1992 | spin_unlock(&mdev->peer_seq_lock); | 1983 | spin_unlock(&mdev->peer_seq_lock); |
1993 | rcu_read_lock(); | 1984 | rcu_read_lock(); |
@@ -2228,8 +2219,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) | |||
2228 | } | 2219 | } |
2229 | goto out_interrupted; | 2220 | goto out_interrupted; |
2230 | } | 2221 | } |
2231 | } else | 2222 | } else { |
2223 | update_peer_seq(mdev, peer_seq); | ||
2232 | spin_lock_irq(&mdev->tconn->req_lock); | 2224 | spin_lock_irq(&mdev->tconn->req_lock); |
2225 | } | ||
2233 | list_add(&peer_req->w.list, &mdev->active_ee); | 2226 | list_add(&peer_req->w.list, &mdev->active_ee); |
2234 | spin_unlock_irq(&mdev->tconn->req_lock); | 2227 | spin_unlock_irq(&mdev->tconn->req_lock); |
2235 | 2228 | ||
@@ -4132,7 +4125,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
4132 | (unsigned int)bs.buf_len); | 4125 | (unsigned int)bs.buf_len); |
4133 | return -EIO; | 4126 | return -EIO; |
4134 | } | 4127 | } |
4135 | look_ahead >>= bits; | 4128 | /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */ |
4129 | if (likely(bits < 64)) | ||
4130 | look_ahead >>= bits; | ||
4131 | else | ||
4132 | look_ahead = 0; | ||
4136 | have -= bits; | 4133 | have -= bits; |
4137 | 4134 | ||
4138 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); | 4135 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c24379ffd4e3..fec7bef44994 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -1306,6 +1306,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1306 | int backing_limit; | 1306 | int backing_limit; |
1307 | 1307 | ||
1308 | if (bio_size && get_ldev(mdev)) { | 1308 | if (bio_size && get_ldev(mdev)) { |
1309 | unsigned int max_hw_sectors = queue_max_hw_sectors(q); | ||
1309 | struct request_queue * const b = | 1310 | struct request_queue * const b = |
1310 | mdev->ldev->backing_bdev->bd_disk->queue; | 1311 | mdev->ldev->backing_bdev->bd_disk->queue; |
1311 | if (b->merge_bvec_fn) { | 1312 | if (b->merge_bvec_fn) { |
@@ -1313,6 +1314,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1313 | limit = min(limit, backing_limit); | 1314 | limit = min(limit, backing_limit); |
1314 | } | 1315 | } |
1315 | put_ldev(mdev); | 1316 | put_ldev(mdev); |
1317 | if ((limit >> 9) > max_hw_sectors) | ||
1318 | limit = max_hw_sectors << 9; | ||
1316 | } | 1319 | } |
1317 | return limit; | 1320 | return limit; |
1318 | } | 1321 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index dbdb88a4976c..c8dac7305244 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -894,13 +894,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
894 | 894 | ||
895 | bio_list_init(&lo->lo_bio_list); | 895 | bio_list_init(&lo->lo_bio_list); |
896 | 896 | ||
897 | /* | ||
898 | * set queue make_request_fn, and add limits based on lower level | ||
899 | * device | ||
900 | */ | ||
901 | blk_queue_make_request(lo->lo_queue, loop_make_request); | ||
902 | lo->lo_queue->queuedata = lo; | ||
903 | |||
904 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) | 897 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) |
905 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); | 898 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); |
906 | 899 | ||
@@ -1618,6 +1611,8 @@ static int loop_add(struct loop_device **l, int i) | |||
1618 | if (!lo) | 1611 | if (!lo) |
1619 | goto out; | 1612 | goto out; |
1620 | 1613 | ||
1614 | lo->lo_state = Lo_unbound; | ||
1615 | |||
1621 | /* allocate id, if @id >= 0, we're requesting that specific id */ | 1616 | /* allocate id, if @id >= 0, we're requesting that specific id */ |
1622 | if (i >= 0) { | 1617 | if (i >= 0) { |
1623 | err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); | 1618 | err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); |
@@ -1635,6 +1630,12 @@ static int loop_add(struct loop_device **l, int i) | |||
1635 | if (!lo->lo_queue) | 1630 | if (!lo->lo_queue) |
1636 | goto out_free_idr; | 1631 | goto out_free_idr; |
1637 | 1632 | ||
1633 | /* | ||
1634 | * set queue make_request_fn | ||
1635 | */ | ||
1636 | blk_queue_make_request(lo->lo_queue, loop_make_request); | ||
1637 | lo->lo_queue->queuedata = lo; | ||
1638 | |||
1638 | disk = lo->lo_disk = alloc_disk(1 << part_shift); | 1639 | disk = lo->lo_disk = alloc_disk(1 << part_shift); |
1639 | if (!disk) | 1640 | if (!disk) |
1640 | goto out_free_queue; | 1641 | goto out_free_queue; |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 77a60bedd7a3..7bc363f1ee82 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -936,7 +936,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
936 | goto probe_err_3b; | 936 | goto probe_err_3b; |
937 | } | 937 | } |
938 | err = request_irq(host->irq, mg_irq, | 938 | err = request_irq(host->irq, mg_irq, |
939 | IRQF_DISABLED | IRQF_TRIGGER_RISING, | 939 | IRQF_TRIGGER_RISING, |
940 | MG_DEV_NAME, host); | 940 | MG_DEV_NAME, host); |
941 | if (err) { | 941 | if (err) { |
942 | printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n", | 942 | printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n", |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 952dbfe22126..050c71267f14 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -126,64 +126,30 @@ struct mtip_compat_ide_task_request_s { | |||
126 | static bool mtip_check_surprise_removal(struct pci_dev *pdev) | 126 | static bool mtip_check_surprise_removal(struct pci_dev *pdev) |
127 | { | 127 | { |
128 | u16 vendor_id = 0; | 128 | u16 vendor_id = 0; |
129 | struct driver_data *dd = pci_get_drvdata(pdev); | ||
130 | |||
131 | if (dd->sr) | ||
132 | return true; | ||
129 | 133 | ||
130 | /* Read the vendorID from the configuration space */ | 134 | /* Read the vendorID from the configuration space */ |
131 | pci_read_config_word(pdev, 0x00, &vendor_id); | 135 | pci_read_config_word(pdev, 0x00, &vendor_id); |
132 | if (vendor_id == 0xFFFF) | 136 | if (vendor_id == 0xFFFF) { |
137 | dd->sr = true; | ||
138 | if (dd->queue) | ||
139 | set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); | ||
140 | else | ||
141 | dev_warn(&dd->pdev->dev, | ||
142 | "%s: dd->queue is NULL\n", __func__); | ||
143 | if (dd->port) { | ||
144 | set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags); | ||
145 | wake_up_interruptible(&dd->port->svc_wait); | ||
146 | } else | ||
147 | dev_warn(&dd->pdev->dev, | ||
148 | "%s: dd->port is NULL\n", __func__); | ||
133 | return true; /* device removed */ | 149 | return true; /* device removed */ |
134 | |||
135 | return false; /* device present */ | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * This function is called for clean the pending command in the | ||
140 | * command slot during the surprise removal of device and return | ||
141 | * error to the upper layer. | ||
142 | * | ||
143 | * @dd Pointer to the DRIVER_DATA structure. | ||
144 | * | ||
145 | * return value | ||
146 | * None | ||
147 | */ | ||
148 | static void mtip_command_cleanup(struct driver_data *dd) | ||
149 | { | ||
150 | int group = 0, commandslot = 0, commandindex = 0; | ||
151 | struct mtip_cmd *command; | ||
152 | struct mtip_port *port = dd->port; | ||
153 | static int in_progress; | ||
154 | |||
155 | if (in_progress) | ||
156 | return; | ||
157 | |||
158 | in_progress = 1; | ||
159 | |||
160 | for (group = 0; group < 4; group++) { | ||
161 | for (commandslot = 0; commandslot < 32; commandslot++) { | ||
162 | if (!(port->allocated[group] & (1 << commandslot))) | ||
163 | continue; | ||
164 | |||
165 | commandindex = group << 5 | commandslot; | ||
166 | command = &port->commands[commandindex]; | ||
167 | |||
168 | if (atomic_read(&command->active) | ||
169 | && (command->async_callback)) { | ||
170 | command->async_callback(command->async_data, | ||
171 | -ENODEV); | ||
172 | command->async_callback = NULL; | ||
173 | command->async_data = NULL; | ||
174 | } | ||
175 | |||
176 | dma_unmap_sg(&port->dd->pdev->dev, | ||
177 | command->sg, | ||
178 | command->scatter_ents, | ||
179 | command->direction); | ||
180 | } | ||
181 | } | 150 | } |
182 | 151 | ||
183 | up(&port->cmd_slot); | 152 | return false; /* device present */ |
184 | |||
185 | set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); | ||
186 | in_progress = 0; | ||
187 | } | 153 | } |
188 | 154 | ||
189 | /* | 155 | /* |
@@ -222,10 +188,7 @@ static int get_slot(struct mtip_port *port) | |||
222 | } | 188 | } |
223 | dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); | 189 | dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); |
224 | 190 | ||
225 | if (mtip_check_surprise_removal(port->dd->pdev)) { | 191 | mtip_check_surprise_removal(port->dd->pdev); |
226 | /* Device not present, clean outstanding commands */ | ||
227 | mtip_command_cleanup(port->dd); | ||
228 | } | ||
229 | return -1; | 192 | return -1; |
230 | } | 193 | } |
231 | 194 | ||
@@ -246,6 +209,107 @@ static inline void release_slot(struct mtip_port *port, int tag) | |||
246 | } | 209 | } |
247 | 210 | ||
248 | /* | 211 | /* |
212 | * IO completion function. | ||
213 | * | ||
214 | * This completion function is called by the driver ISR when a | ||
215 | * command that was issued by the kernel completes. It first calls the | ||
216 | * asynchronous completion function which normally calls back into the block | ||
217 | * layer passing the asynchronous callback data, then unmaps the | ||
218 | * scatter list associated with the completed command, and finally | ||
219 | * clears the allocated bit associated with the completed command. | ||
220 | * | ||
221 | * @port Pointer to the port data structure. | ||
222 | * @tag Tag of the command. | ||
223 | * @data Pointer to driver_data. | ||
224 | * @status Completion status. | ||
225 | * | ||
226 | * return value | ||
227 | * None | ||
228 | */ | ||
229 | static void mtip_async_complete(struct mtip_port *port, | ||
230 | int tag, | ||
231 | void *data, | ||
232 | int status) | ||
233 | { | ||
234 | struct mtip_cmd *command; | ||
235 | struct driver_data *dd = data; | ||
236 | int cb_status = status ? -EIO : 0; | ||
237 | |||
238 | if (unlikely(!dd) || unlikely(!port)) | ||
239 | return; | ||
240 | |||
241 | command = &port->commands[tag]; | ||
242 | |||
243 | if (unlikely(status == PORT_IRQ_TF_ERR)) { | ||
244 | dev_warn(&port->dd->pdev->dev, | ||
245 | "Command tag %d failed due to TFE\n", tag); | ||
246 | } | ||
247 | |||
248 | /* Upper layer callback */ | ||
249 | if (likely(command->async_callback)) | ||
250 | command->async_callback(command->async_data, cb_status); | ||
251 | |||
252 | command->async_callback = NULL; | ||
253 | command->comp_func = NULL; | ||
254 | |||
255 | /* Unmap the DMA scatter list entries */ | ||
256 | dma_unmap_sg(&dd->pdev->dev, | ||
257 | command->sg, | ||
258 | command->scatter_ents, | ||
259 | command->direction); | ||
260 | |||
261 | /* Clear the allocated and active bits for the command */ | ||
262 | atomic_set(&port->commands[tag].active, 0); | ||
263 | release_slot(port, tag); | ||
264 | |||
265 | up(&port->cmd_slot); | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * This function is called for clean the pending command in the | ||
270 | * command slot during the surprise removal of device and return | ||
271 | * error to the upper layer. | ||
272 | * | ||
273 | * @dd Pointer to the DRIVER_DATA structure. | ||
274 | * | ||
275 | * return value | ||
276 | * None | ||
277 | */ | ||
278 | static void mtip_command_cleanup(struct driver_data *dd) | ||
279 | { | ||
280 | int tag = 0; | ||
281 | struct mtip_cmd *cmd; | ||
282 | struct mtip_port *port = dd->port; | ||
283 | unsigned int num_cmd_slots = dd->slot_groups * 32; | ||
284 | |||
285 | if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) | ||
286 | return; | ||
287 | |||
288 | if (!port) | ||
289 | return; | ||
290 | |||
291 | cmd = &port->commands[MTIP_TAG_INTERNAL]; | ||
292 | if (atomic_read(&cmd->active)) | ||
293 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & | ||
294 | (1 << MTIP_TAG_INTERNAL)) | ||
295 | if (cmd->comp_func) | ||
296 | cmd->comp_func(port, MTIP_TAG_INTERNAL, | ||
297 | cmd->comp_data, -ENODEV); | ||
298 | |||
299 | while (1) { | ||
300 | tag = find_next_bit(port->allocated, num_cmd_slots, tag); | ||
301 | if (tag >= num_cmd_slots) | ||
302 | break; | ||
303 | |||
304 | cmd = &port->commands[tag]; | ||
305 | if (atomic_read(&cmd->active)) | ||
306 | mtip_async_complete(port, tag, dd, -ENODEV); | ||
307 | } | ||
308 | |||
309 | set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); | ||
310 | } | ||
311 | |||
312 | /* | ||
249 | * Reset the HBA (without sleeping) | 313 | * Reset the HBA (without sleeping) |
250 | * | 314 | * |
251 | * @dd Pointer to the driver data structure. | 315 | * @dd Pointer to the driver data structure. |
@@ -584,6 +648,9 @@ static void mtip_timeout_function(unsigned long int data) | |||
584 | if (unlikely(!port)) | 648 | if (unlikely(!port)) |
585 | return; | 649 | return; |
586 | 650 | ||
651 | if (unlikely(port->dd->sr)) | ||
652 | return; | ||
653 | |||
587 | if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { | 654 | if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { |
588 | mod_timer(&port->cmd_timer, | 655 | mod_timer(&port->cmd_timer, |
589 | jiffies + msecs_to_jiffies(30000)); | 656 | jiffies + msecs_to_jiffies(30000)); |
@@ -675,66 +742,6 @@ static void mtip_timeout_function(unsigned long int data) | |||
675 | } | 742 | } |
676 | 743 | ||
677 | /* | 744 | /* |
678 | * IO completion function. | ||
679 | * | ||
680 | * This completion function is called by the driver ISR when a | ||
681 | * command that was issued by the kernel completes. It first calls the | ||
682 | * asynchronous completion function which normally calls back into the block | ||
683 | * layer passing the asynchronous callback data, then unmaps the | ||
684 | * scatter list associated with the completed command, and finally | ||
685 | * clears the allocated bit associated with the completed command. | ||
686 | * | ||
687 | * @port Pointer to the port data structure. | ||
688 | * @tag Tag of the command. | ||
689 | * @data Pointer to driver_data. | ||
690 | * @status Completion status. | ||
691 | * | ||
692 | * return value | ||
693 | * None | ||
694 | */ | ||
695 | static void mtip_async_complete(struct mtip_port *port, | ||
696 | int tag, | ||
697 | void *data, | ||
698 | int status) | ||
699 | { | ||
700 | struct mtip_cmd *command; | ||
701 | struct driver_data *dd = data; | ||
702 | int cb_status = status ? -EIO : 0; | ||
703 | |||
704 | if (unlikely(!dd) || unlikely(!port)) | ||
705 | return; | ||
706 | |||
707 | command = &port->commands[tag]; | ||
708 | |||
709 | if (unlikely(status == PORT_IRQ_TF_ERR)) { | ||
710 | dev_warn(&port->dd->pdev->dev, | ||
711 | "Command tag %d failed due to TFE\n", tag); | ||
712 | } | ||
713 | |||
714 | /* Upper layer callback */ | ||
715 | if (likely(command->async_callback)) | ||
716 | command->async_callback(command->async_data, cb_status); | ||
717 | |||
718 | command->async_callback = NULL; | ||
719 | command->comp_func = NULL; | ||
720 | |||
721 | /* Unmap the DMA scatter list entries */ | ||
722 | dma_unmap_sg(&dd->pdev->dev, | ||
723 | command->sg, | ||
724 | command->scatter_ents, | ||
725 | command->direction); | ||
726 | |||
727 | /* Clear the allocated and active bits for the command */ | ||
728 | atomic_set(&port->commands[tag].active, 0); | ||
729 | release_slot(port, tag); | ||
730 | |||
731 | if (unlikely(command->unaligned)) | ||
732 | up(&port->cmd_slot_unal); | ||
733 | else | ||
734 | up(&port->cmd_slot); | ||
735 | } | ||
736 | |||
737 | /* | ||
738 | * Internal command completion callback function. | 745 | * Internal command completion callback function. |
739 | * | 746 | * |
740 | * This function is normally called by the driver ISR when an internal | 747 | * This function is normally called by the driver ISR when an internal |
@@ -854,7 +861,6 @@ static void mtip_handle_tfe(struct driver_data *dd) | |||
854 | "Missing completion func for tag %d", | 861 | "Missing completion func for tag %d", |
855 | tag); | 862 | tag); |
856 | if (mtip_check_surprise_removal(dd->pdev)) { | 863 | if (mtip_check_surprise_removal(dd->pdev)) { |
857 | mtip_command_cleanup(dd); | ||
858 | /* don't proceed further */ | 864 | /* don't proceed further */ |
859 | return; | 865 | return; |
860 | } | 866 | } |
@@ -1018,14 +1024,12 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, | |||
1018 | command->comp_data, | 1024 | command->comp_data, |
1019 | 0); | 1025 | 0); |
1020 | } else { | 1026 | } else { |
1021 | dev_warn(&dd->pdev->dev, | 1027 | dev_dbg(&dd->pdev->dev, |
1022 | "Null completion " | 1028 | "Null completion for tag %d", |
1023 | "for tag %d", | ||
1024 | tag); | 1029 | tag); |
1025 | 1030 | ||
1026 | if (mtip_check_surprise_removal( | 1031 | if (mtip_check_surprise_removal( |
1027 | dd->pdev)) { | 1032 | dd->pdev)) { |
1028 | mtip_command_cleanup(dd); | ||
1029 | return; | 1033 | return; |
1030 | } | 1034 | } |
1031 | } | 1035 | } |
@@ -1145,7 +1149,6 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) | |||
1145 | 1149 | ||
1146 | if (unlikely(port_stat & PORT_IRQ_ERR)) { | 1150 | if (unlikely(port_stat & PORT_IRQ_ERR)) { |
1147 | if (unlikely(mtip_check_surprise_removal(dd->pdev))) { | 1151 | if (unlikely(mtip_check_surprise_removal(dd->pdev))) { |
1148 | mtip_command_cleanup(dd); | ||
1149 | /* don't proceed further */ | 1152 | /* don't proceed further */ |
1150 | return IRQ_HANDLED; | 1153 | return IRQ_HANDLED; |
1151 | } | 1154 | } |
@@ -2806,34 +2809,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf) | |||
2806 | static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, | 2809 | static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, |
2807 | size_t len, loff_t *offset) | 2810 | size_t len, loff_t *offset) |
2808 | { | 2811 | { |
2812 | struct driver_data *dd = (struct driver_data *)f->private_data; | ||
2809 | int size = *offset; | 2813 | int size = *offset; |
2810 | char buf[MTIP_DFS_MAX_BUF_SIZE]; | 2814 | char *buf; |
2815 | int rv = 0; | ||
2811 | 2816 | ||
2812 | if (!len || *offset) | 2817 | if (!len || *offset) |
2813 | return 0; | 2818 | return 0; |
2814 | 2819 | ||
2820 | buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); | ||
2821 | if (!buf) { | ||
2822 | dev_err(&dd->pdev->dev, | ||
2823 | "Memory allocation: status buffer\n"); | ||
2824 | return -ENOMEM; | ||
2825 | } | ||
2826 | |||
2815 | size += show_device_status(NULL, buf); | 2827 | size += show_device_status(NULL, buf); |
2816 | 2828 | ||
2817 | *offset = size <= len ? size : len; | 2829 | *offset = size <= len ? size : len; |
2818 | size = copy_to_user(ubuf, buf, *offset); | 2830 | size = copy_to_user(ubuf, buf, *offset); |
2819 | if (size) | 2831 | if (size) |
2820 | return -EFAULT; | 2832 | rv = -EFAULT; |
2821 | 2833 | ||
2822 | return *offset; | 2834 | kfree(buf); |
2835 | return rv ? rv : *offset; | ||
2823 | } | 2836 | } |
2824 | 2837 | ||
2825 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, | 2838 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, |
2826 | size_t len, loff_t *offset) | 2839 | size_t len, loff_t *offset) |
2827 | { | 2840 | { |
2828 | struct driver_data *dd = (struct driver_data *)f->private_data; | 2841 | struct driver_data *dd = (struct driver_data *)f->private_data; |
2829 | char buf[MTIP_DFS_MAX_BUF_SIZE]; | 2842 | char *buf; |
2830 | u32 group_allocated; | 2843 | u32 group_allocated; |
2831 | int size = *offset; | 2844 | int size = *offset; |
2832 | int n; | 2845 | int n, rv = 0; |
2833 | 2846 | ||
2834 | if (!len || size) | 2847 | if (!len || size) |
2835 | return 0; | 2848 | return 0; |
2836 | 2849 | ||
2850 | buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); | ||
2851 | if (!buf) { | ||
2852 | dev_err(&dd->pdev->dev, | ||
2853 | "Memory allocation: register buffer\n"); | ||
2854 | return -ENOMEM; | ||
2855 | } | ||
2856 | |||
2837 | size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); | 2857 | size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); |
2838 | 2858 | ||
2839 | for (n = dd->slot_groups-1; n >= 0; n--) | 2859 | for (n = dd->slot_groups-1; n >= 0; n--) |
@@ -2888,21 +2908,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, | |||
2888 | *offset = size <= len ? size : len; | 2908 | *offset = size <= len ? size : len; |
2889 | size = copy_to_user(ubuf, buf, *offset); | 2909 | size = copy_to_user(ubuf, buf, *offset); |
2890 | if (size) | 2910 | if (size) |
2891 | return -EFAULT; | 2911 | rv = -EFAULT; |
2892 | 2912 | ||
2893 | return *offset; | 2913 | kfree(buf); |
2914 | return rv ? rv : *offset; | ||
2894 | } | 2915 | } |
2895 | 2916 | ||
2896 | static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, | 2917 | static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, |
2897 | size_t len, loff_t *offset) | 2918 | size_t len, loff_t *offset) |
2898 | { | 2919 | { |
2899 | struct driver_data *dd = (struct driver_data *)f->private_data; | 2920 | struct driver_data *dd = (struct driver_data *)f->private_data; |
2900 | char buf[MTIP_DFS_MAX_BUF_SIZE]; | 2921 | char *buf; |
2901 | int size = *offset; | 2922 | int size = *offset; |
2923 | int rv = 0; | ||
2902 | 2924 | ||
2903 | if (!len || size) | 2925 | if (!len || size) |
2904 | return 0; | 2926 | return 0; |
2905 | 2927 | ||
2928 | buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); | ||
2929 | if (!buf) { | ||
2930 | dev_err(&dd->pdev->dev, | ||
2931 | "Memory allocation: flag buffer\n"); | ||
2932 | return -ENOMEM; | ||
2933 | } | ||
2934 | |||
2906 | size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", | 2935 | size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", |
2907 | dd->port->flags); | 2936 | dd->port->flags); |
2908 | size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", | 2937 | size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", |
@@ -2911,9 +2940,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, | |||
2911 | *offset = size <= len ? size : len; | 2940 | *offset = size <= len ? size : len; |
2912 | size = copy_to_user(ubuf, buf, *offset); | 2941 | size = copy_to_user(ubuf, buf, *offset); |
2913 | if (size) | 2942 | if (size) |
2914 | return -EFAULT; | 2943 | rv = -EFAULT; |
2915 | 2944 | ||
2916 | return *offset; | 2945 | kfree(buf); |
2946 | return rv ? rv : *offset; | ||
2917 | } | 2947 | } |
2918 | 2948 | ||
2919 | static const struct file_operations mtip_device_status_fops = { | 2949 | static const struct file_operations mtip_device_status_fops = { |
@@ -3006,6 +3036,46 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd) | |||
3006 | debugfs_remove_recursive(dd->dfs_node); | 3036 | debugfs_remove_recursive(dd->dfs_node); |
3007 | } | 3037 | } |
3008 | 3038 | ||
3039 | static int mtip_free_orphan(struct driver_data *dd) | ||
3040 | { | ||
3041 | struct kobject *kobj; | ||
3042 | |||
3043 | if (dd->bdev) { | ||
3044 | if (dd->bdev->bd_holders >= 1) | ||
3045 | return -2; | ||
3046 | |||
3047 | bdput(dd->bdev); | ||
3048 | dd->bdev = NULL; | ||
3049 | } | ||
3050 | |||
3051 | mtip_hw_debugfs_exit(dd); | ||
3052 | |||
3053 | spin_lock(&rssd_index_lock); | ||
3054 | ida_remove(&rssd_index_ida, dd->index); | ||
3055 | spin_unlock(&rssd_index_lock); | ||
3056 | |||
3057 | if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) && | ||
3058 | test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { | ||
3059 | put_disk(dd->disk); | ||
3060 | } else { | ||
3061 | if (dd->disk) { | ||
3062 | kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); | ||
3063 | if (kobj) { | ||
3064 | mtip_hw_sysfs_exit(dd, kobj); | ||
3065 | kobject_put(kobj); | ||
3066 | } | ||
3067 | del_gendisk(dd->disk); | ||
3068 | dd->disk = NULL; | ||
3069 | } | ||
3070 | if (dd->queue) { | ||
3071 | dd->queue->queuedata = NULL; | ||
3072 | blk_cleanup_queue(dd->queue); | ||
3073 | dd->queue = NULL; | ||
3074 | } | ||
3075 | } | ||
3076 | kfree(dd); | ||
3077 | return 0; | ||
3078 | } | ||
3009 | 3079 | ||
3010 | /* | 3080 | /* |
3011 | * Perform any init/resume time hardware setup | 3081 | * Perform any init/resume time hardware setup |
@@ -3154,6 +3224,7 @@ static int mtip_service_thread(void *data) | |||
3154 | unsigned long slot, slot_start, slot_wrap; | 3224 | unsigned long slot, slot_start, slot_wrap; |
3155 | unsigned int num_cmd_slots = dd->slot_groups * 32; | 3225 | unsigned int num_cmd_slots = dd->slot_groups * 32; |
3156 | struct mtip_port *port = dd->port; | 3226 | struct mtip_port *port = dd->port; |
3227 | int ret; | ||
3157 | 3228 | ||
3158 | while (1) { | 3229 | while (1) { |
3159 | /* | 3230 | /* |
@@ -3164,13 +3235,18 @@ static int mtip_service_thread(void *data) | |||
3164 | !(port->flags & MTIP_PF_PAUSE_IO)); | 3235 | !(port->flags & MTIP_PF_PAUSE_IO)); |
3165 | 3236 | ||
3166 | if (kthread_should_stop()) | 3237 | if (kthread_should_stop()) |
3238 | goto st_out; | ||
3239 | |||
3240 | set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); | ||
3241 | |||
3242 | /* If I am an orphan, start self cleanup */ | ||
3243 | if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags)) | ||
3167 | break; | 3244 | break; |
3168 | 3245 | ||
3169 | if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 3246 | if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
3170 | &dd->dd_flag))) | 3247 | &dd->dd_flag))) |
3171 | break; | 3248 | goto st_out; |
3172 | 3249 | ||
3173 | set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); | ||
3174 | if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { | 3250 | if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { |
3175 | slot = 1; | 3251 | slot = 1; |
3176 | /* used to restrict the loop to one iteration */ | 3252 | /* used to restrict the loop to one iteration */ |
@@ -3201,7 +3277,7 @@ static int mtip_service_thread(void *data) | |||
3201 | 3277 | ||
3202 | clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); | 3278 | clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); |
3203 | } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { | 3279 | } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { |
3204 | if (!mtip_ftl_rebuild_poll(dd)) | 3280 | if (mtip_ftl_rebuild_poll(dd) < 0) |
3205 | set_bit(MTIP_DDF_REBUILD_FAILED_BIT, | 3281 | set_bit(MTIP_DDF_REBUILD_FAILED_BIT, |
3206 | &dd->dd_flag); | 3282 | &dd->dd_flag); |
3207 | clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); | 3283 | clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); |
@@ -3209,8 +3285,30 @@ static int mtip_service_thread(void *data) | |||
3209 | clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); | 3285 | clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); |
3210 | 3286 | ||
3211 | if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) | 3287 | if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) |
3288 | goto st_out; | ||
3289 | } | ||
3290 | |||
3291 | /* wait for pci remove to exit */ | ||
3292 | while (1) { | ||
3293 | if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag)) | ||
3212 | break; | 3294 | break; |
3295 | msleep_interruptible(1000); | ||
3296 | if (kthread_should_stop()) | ||
3297 | goto st_out; | ||
3298 | } | ||
3299 | |||
3300 | while (1) { | ||
3301 | ret = mtip_free_orphan(dd); | ||
3302 | if (!ret) { | ||
3303 | /* NOTE: All data structures are invalid, do not | ||
3304 | * access any here */ | ||
3305 | return 0; | ||
3306 | } | ||
3307 | msleep_interruptible(1000); | ||
3308 | if (kthread_should_stop()) | ||
3309 | goto st_out; | ||
3213 | } | 3310 | } |
3311 | st_out: | ||
3214 | return 0; | 3312 | return 0; |
3215 | } | 3313 | } |
3216 | 3314 | ||
@@ -3437,13 +3535,13 @@ static int mtip_hw_init(struct driver_data *dd) | |||
3437 | rv = -EFAULT; | 3535 | rv = -EFAULT; |
3438 | goto out3; | 3536 | goto out3; |
3439 | } | 3537 | } |
3538 | mtip_dump_identify(dd->port); | ||
3440 | 3539 | ||
3441 | if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == | 3540 | if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == |
3442 | MTIP_FTL_REBUILD_MAGIC) { | 3541 | MTIP_FTL_REBUILD_MAGIC) { |
3443 | set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); | 3542 | set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); |
3444 | return MTIP_FTL_REBUILD_MAGIC; | 3543 | return MTIP_FTL_REBUILD_MAGIC; |
3445 | } | 3544 | } |
3446 | mtip_dump_identify(dd->port); | ||
3447 | 3545 | ||
3448 | /* check write protect, over temp and rebuild statuses */ | 3546 | /* check write protect, over temp and rebuild statuses */ |
3449 | rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, | 3547 | rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, |
@@ -3467,8 +3565,8 @@ static int mtip_hw_init(struct driver_data *dd) | |||
3467 | } | 3565 | } |
3468 | if (buf[288] == 0xBF) { | 3566 | if (buf[288] == 0xBF) { |
3469 | dev_info(&dd->pdev->dev, | 3567 | dev_info(&dd->pdev->dev, |
3470 | "Drive indicates rebuild has failed.\n"); | 3568 | "Drive is in security locked state.\n"); |
3471 | /* TODO */ | 3569 | set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); |
3472 | } | 3570 | } |
3473 | } | 3571 | } |
3474 | 3572 | ||
@@ -3523,9 +3621,8 @@ static int mtip_hw_exit(struct driver_data *dd) | |||
3523 | * Send standby immediate (E0h) to the drive so that it | 3621 | * Send standby immediate (E0h) to the drive so that it |
3524 | * saves its state. | 3622 | * saves its state. |
3525 | */ | 3623 | */ |
3526 | if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { | 3624 | if (!dd->sr) { |
3527 | 3625 | if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) | |
3528 | if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) | ||
3529 | if (mtip_standby_immediate(dd->port)) | 3626 | if (mtip_standby_immediate(dd->port)) |
3530 | dev_warn(&dd->pdev->dev, | 3627 | dev_warn(&dd->pdev->dev, |
3531 | "STANDBY IMMEDIATE failed\n"); | 3628 | "STANDBY IMMEDIATE failed\n"); |
@@ -3551,6 +3648,7 @@ static int mtip_hw_exit(struct driver_data *dd) | |||
3551 | dd->port->command_list_dma); | 3648 | dd->port->command_list_dma); |
3552 | /* Free the memory allocated for the for structure. */ | 3649 | /* Free the memory allocated for the for structure. */ |
3553 | kfree(dd->port); | 3650 | kfree(dd->port); |
3651 | dd->port = NULL; | ||
3554 | 3652 | ||
3555 | return 0; | 3653 | return 0; |
3556 | } | 3654 | } |
@@ -3572,7 +3670,8 @@ static int mtip_hw_shutdown(struct driver_data *dd) | |||
3572 | * Send standby immediate (E0h) to the drive so that it | 3670 | * Send standby immediate (E0h) to the drive so that it |
3573 | * saves its state. | 3671 | * saves its state. |
3574 | */ | 3672 | */ |
3575 | mtip_standby_immediate(dd->port); | 3673 | if (!dd->sr && dd->port) |
3674 | mtip_standby_immediate(dd->port); | ||
3576 | 3675 | ||
3577 | return 0; | 3676 | return 0; |
3578 | } | 3677 | } |
@@ -3887,6 +3986,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) | |||
3887 | bio_endio(bio, -ENODATA); | 3986 | bio_endio(bio, -ENODATA); |
3888 | return; | 3987 | return; |
3889 | } | 3988 | } |
3989 | if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { | ||
3990 | bio_endio(bio, -ENXIO); | ||
3991 | return; | ||
3992 | } | ||
3890 | } | 3993 | } |
3891 | 3994 | ||
3892 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | 3995 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
@@ -4010,6 +4113,8 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
4010 | dd->disk->private_data = dd; | 4113 | dd->disk->private_data = dd; |
4011 | dd->index = index; | 4114 | dd->index = index; |
4012 | 4115 | ||
4116 | mtip_hw_debugfs_init(dd); | ||
4117 | |||
4013 | /* | 4118 | /* |
4014 | * if rebuild pending, start the service thread, and delay the block | 4119 | * if rebuild pending, start the service thread, and delay the block |
4015 | * queue creation and add_disk() | 4120 | * queue creation and add_disk() |
@@ -4068,6 +4173,7 @@ skip_create_disk: | |||
4068 | /* Enable the block device and add it to /dev */ | 4173 | /* Enable the block device and add it to /dev */ |
4069 | add_disk(dd->disk); | 4174 | add_disk(dd->disk); |
4070 | 4175 | ||
4176 | dd->bdev = bdget_disk(dd->disk, 0); | ||
4071 | /* | 4177 | /* |
4072 | * Now that the disk is active, initialize any sysfs attributes | 4178 | * Now that the disk is active, initialize any sysfs attributes |
4073 | * managed by the protocol layer. | 4179 | * managed by the protocol layer. |
@@ -4077,7 +4183,6 @@ skip_create_disk: | |||
4077 | mtip_hw_sysfs_init(dd, kobj); | 4183 | mtip_hw_sysfs_init(dd, kobj); |
4078 | kobject_put(kobj); | 4184 | kobject_put(kobj); |
4079 | } | 4185 | } |
4080 | mtip_hw_debugfs_init(dd); | ||
4081 | 4186 | ||
4082 | if (dd->mtip_svc_handler) { | 4187 | if (dd->mtip_svc_handler) { |
4083 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); | 4188 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); |
@@ -4103,7 +4208,8 @@ start_service_thread: | |||
4103 | return rv; | 4208 | return rv; |
4104 | 4209 | ||
4105 | kthread_run_error: | 4210 | kthread_run_error: |
4106 | mtip_hw_debugfs_exit(dd); | 4211 | bdput(dd->bdev); |
4212 | dd->bdev = NULL; | ||
4107 | 4213 | ||
4108 | /* Delete our gendisk. This also removes the device from /dev */ | 4214 | /* Delete our gendisk. This also removes the device from /dev */ |
4109 | del_gendisk(dd->disk); | 4215 | del_gendisk(dd->disk); |
@@ -4112,6 +4218,7 @@ read_capacity_error: | |||
4112 | blk_cleanup_queue(dd->queue); | 4218 | blk_cleanup_queue(dd->queue); |
4113 | 4219 | ||
4114 | block_queue_alloc_init_error: | 4220 | block_queue_alloc_init_error: |
4221 | mtip_hw_debugfs_exit(dd); | ||
4115 | disk_index_error: | 4222 | disk_index_error: |
4116 | spin_lock(&rssd_index_lock); | 4223 | spin_lock(&rssd_index_lock); |
4117 | ida_remove(&rssd_index_ida, index); | 4224 | ida_remove(&rssd_index_ida, index); |
@@ -4141,40 +4248,48 @@ static int mtip_block_remove(struct driver_data *dd) | |||
4141 | { | 4248 | { |
4142 | struct kobject *kobj; | 4249 | struct kobject *kobj; |
4143 | 4250 | ||
4144 | if (dd->mtip_svc_handler) { | 4251 | if (!dd->sr) { |
4145 | set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); | 4252 | mtip_hw_debugfs_exit(dd); |
4146 | wake_up_interruptible(&dd->port->svc_wait); | ||
4147 | kthread_stop(dd->mtip_svc_handler); | ||
4148 | } | ||
4149 | 4253 | ||
4150 | /* Clean up the sysfs attributes, if created */ | 4254 | if (dd->mtip_svc_handler) { |
4151 | if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { | 4255 | set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); |
4152 | kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); | 4256 | wake_up_interruptible(&dd->port->svc_wait); |
4153 | if (kobj) { | 4257 | kthread_stop(dd->mtip_svc_handler); |
4154 | mtip_hw_sysfs_exit(dd, kobj); | ||
4155 | kobject_put(kobj); | ||
4156 | } | 4258 | } |
4157 | } | ||
4158 | mtip_hw_debugfs_exit(dd); | ||
4159 | 4259 | ||
4160 | /* | 4260 | /* Clean up the sysfs attributes, if created */ |
4161 | * Delete our gendisk structure. This also removes the device | 4261 | if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { |
4162 | * from /dev | 4262 | kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); |
4163 | */ | 4263 | if (kobj) { |
4164 | if (dd->disk) { | 4264 | mtip_hw_sysfs_exit(dd, kobj); |
4165 | if (dd->disk->queue) | 4265 | kobject_put(kobj); |
4166 | del_gendisk(dd->disk); | 4266 | } |
4167 | else | 4267 | } |
4168 | put_disk(dd->disk); | 4268 | /* |
4169 | } | 4269 | * Delete our gendisk structure. This also removes the device |
4170 | 4270 | * from /dev | |
4171 | spin_lock(&rssd_index_lock); | 4271 | */ |
4172 | ida_remove(&rssd_index_ida, dd->index); | 4272 | if (dd->bdev) { |
4173 | spin_unlock(&rssd_index_lock); | 4273 | bdput(dd->bdev); |
4274 | dd->bdev = NULL; | ||
4275 | } | ||
4276 | if (dd->disk) { | ||
4277 | if (dd->disk->queue) { | ||
4278 | del_gendisk(dd->disk); | ||
4279 | blk_cleanup_queue(dd->queue); | ||
4280 | dd->queue = NULL; | ||
4281 | } else | ||
4282 | put_disk(dd->disk); | ||
4283 | } | ||
4284 | dd->disk = NULL; | ||
4174 | 4285 | ||
4175 | blk_cleanup_queue(dd->queue); | 4286 | spin_lock(&rssd_index_lock); |
4176 | dd->disk = NULL; | 4287 | ida_remove(&rssd_index_ida, dd->index); |
4177 | dd->queue = NULL; | 4288 | spin_unlock(&rssd_index_lock); |
4289 | } else { | ||
4290 | dev_info(&dd->pdev->dev, "device %s surprise removal\n", | ||
4291 | dd->disk->disk_name); | ||
4292 | } | ||
4178 | 4293 | ||
4179 | /* De-initialize the protocol layer. */ | 4294 | /* De-initialize the protocol layer. */ |
4180 | mtip_hw_exit(dd); | 4295 | mtip_hw_exit(dd); |
@@ -4490,8 +4605,7 @@ done: | |||
4490 | static void mtip_pci_remove(struct pci_dev *pdev) | 4605 | static void mtip_pci_remove(struct pci_dev *pdev) |
4491 | { | 4606 | { |
4492 | struct driver_data *dd = pci_get_drvdata(pdev); | 4607 | struct driver_data *dd = pci_get_drvdata(pdev); |
4493 | int counter = 0; | 4608 | unsigned long flags, to; |
4494 | unsigned long flags; | ||
4495 | 4609 | ||
4496 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); | 4610 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); |
4497 | 4611 | ||
@@ -4500,17 +4614,22 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4500 | list_add(&dd->remove_list, &removing_list); | 4614 | list_add(&dd->remove_list, &removing_list); |
4501 | spin_unlock_irqrestore(&dev_lock, flags); | 4615 | spin_unlock_irqrestore(&dev_lock, flags); |
4502 | 4616 | ||
4503 | if (mtip_check_surprise_removal(pdev)) { | 4617 | mtip_check_surprise_removal(pdev); |
4504 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { | 4618 | synchronize_irq(dd->pdev->irq); |
4505 | counter++; | 4619 | |
4506 | msleep(20); | 4620 | /* Spin until workers are done */ |
4507 | if (counter == 10) { | 4621 | to = jiffies + msecs_to_jiffies(4000); |
4508 | /* Cleanup the outstanding commands */ | 4622 | do { |
4509 | mtip_command_cleanup(dd); | 4623 | msleep(20); |
4510 | break; | 4624 | } while (atomic_read(&dd->irq_workers_active) != 0 && |
4511 | } | 4625 | time_before(jiffies, to)); |
4512 | } | 4626 | |
4627 | if (atomic_read(&dd->irq_workers_active) != 0) { | ||
4628 | dev_warn(&dd->pdev->dev, | ||
4629 | "Completion workers still active!\n"); | ||
4513 | } | 4630 | } |
4631 | /* Cleanup the outstanding commands */ | ||
4632 | mtip_command_cleanup(dd); | ||
4514 | 4633 | ||
4515 | /* Clean up the block layer. */ | 4634 | /* Clean up the block layer. */ |
4516 | mtip_block_remove(dd); | 4635 | mtip_block_remove(dd); |
@@ -4529,8 +4648,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4529 | list_del_init(&dd->remove_list); | 4648 | list_del_init(&dd->remove_list); |
4530 | spin_unlock_irqrestore(&dev_lock, flags); | 4649 | spin_unlock_irqrestore(&dev_lock, flags); |
4531 | 4650 | ||
4532 | kfree(dd); | 4651 | if (!dd->sr) |
4652 | kfree(dd); | ||
4653 | else | ||
4654 | set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag); | ||
4655 | |||
4533 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); | 4656 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); |
4657 | pci_set_drvdata(pdev, NULL); | ||
4658 | pci_dev_put(pdev); | ||
4659 | |||
4534 | } | 4660 | } |
4535 | 4661 | ||
4536 | /* | 4662 | /* |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bb8a295fbe4..9be7a1582ad3 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -140,6 +140,7 @@ enum { | |||
140 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, | 140 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, |
141 | MTIP_PF_ISSUE_CMDS_BIT = 5, | 141 | MTIP_PF_ISSUE_CMDS_BIT = 5, |
142 | MTIP_PF_REBUILD_BIT = 6, | 142 | MTIP_PF_REBUILD_BIT = 6, |
143 | MTIP_PF_SR_CLEANUP_BIT = 7, | ||
143 | MTIP_PF_SVC_THD_STOP_BIT = 8, | 144 | MTIP_PF_SVC_THD_STOP_BIT = 8, |
144 | 145 | ||
145 | /* below are bit numbers in 'dd_flag' defined in driver_data */ | 146 | /* below are bit numbers in 'dd_flag' defined in driver_data */ |
@@ -147,15 +148,18 @@ enum { | |||
147 | MTIP_DDF_REMOVE_PENDING_BIT = 1, | 148 | MTIP_DDF_REMOVE_PENDING_BIT = 1, |
148 | MTIP_DDF_OVER_TEMP_BIT = 2, | 149 | MTIP_DDF_OVER_TEMP_BIT = 2, |
149 | MTIP_DDF_WRITE_PROTECT_BIT = 3, | 150 | MTIP_DDF_WRITE_PROTECT_BIT = 3, |
150 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | | 151 | MTIP_DDF_REMOVE_DONE_BIT = 4, |
151 | (1 << MTIP_DDF_SEC_LOCK_BIT) | | ||
152 | (1 << MTIP_DDF_OVER_TEMP_BIT) | | ||
153 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), | ||
154 | |||
155 | MTIP_DDF_CLEANUP_BIT = 5, | 152 | MTIP_DDF_CLEANUP_BIT = 5, |
156 | MTIP_DDF_RESUME_BIT = 6, | 153 | MTIP_DDF_RESUME_BIT = 6, |
157 | MTIP_DDF_INIT_DONE_BIT = 7, | 154 | MTIP_DDF_INIT_DONE_BIT = 7, |
158 | MTIP_DDF_REBUILD_FAILED_BIT = 8, | 155 | MTIP_DDF_REBUILD_FAILED_BIT = 8, |
156 | |||
157 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | | ||
158 | (1 << MTIP_DDF_SEC_LOCK_BIT) | | ||
159 | (1 << MTIP_DDF_OVER_TEMP_BIT) | | ||
160 | (1 << MTIP_DDF_WRITE_PROTECT_BIT) | | ||
161 | (1 << MTIP_DDF_REBUILD_FAILED_BIT)), | ||
162 | |||
159 | }; | 163 | }; |
160 | 164 | ||
161 | struct smart_attr { | 165 | struct smart_attr { |
@@ -499,6 +503,8 @@ struct driver_data { | |||
499 | 503 | ||
500 | bool trim_supp; /* flag indicating trim support */ | 504 | bool trim_supp; /* flag indicating trim support */ |
501 | 505 | ||
506 | bool sr; | ||
507 | |||
502 | int numa_node; /* NUMA support */ | 508 | int numa_node; /* NUMA support */ |
503 | 509 | ||
504 | char workq_name[32]; | 510 | char workq_name[32]; |
@@ -511,6 +517,8 @@ struct driver_data { | |||
511 | 517 | ||
512 | int isr_binding; | 518 | int isr_binding; |
513 | 519 | ||
520 | struct block_device *bdev; | ||
521 | |||
514 | int unal_qdepth; /* qdepth of unaligned IO queue */ | 522 | int unal_qdepth; /* qdepth of unaligned IO queue */ |
515 | 523 | ||
516 | struct list_head online_list; /* linkage for online list */ | 524 | struct list_head online_list; /* linkage for online list */ |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 56188475cfd3..ff8668c5efb1 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -473,45 +473,31 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) | |||
473 | { | 473 | { |
474 | if (!pkt_debugfs_root) | 474 | if (!pkt_debugfs_root) |
475 | return; | 475 | return; |
476 | pd->dfs_f_info = NULL; | ||
477 | pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); | 476 | pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); |
478 | if (IS_ERR(pd->dfs_d_root)) { | 477 | if (!pd->dfs_d_root) |
479 | pd->dfs_d_root = NULL; | ||
480 | return; | 478 | return; |
481 | } | 479 | |
482 | pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, | 480 | pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, |
483 | pd->dfs_d_root, pd, &debug_fops); | 481 | pd->dfs_d_root, pd, &debug_fops); |
484 | if (IS_ERR(pd->dfs_f_info)) { | ||
485 | pd->dfs_f_info = NULL; | ||
486 | return; | ||
487 | } | ||
488 | } | 482 | } |
489 | 483 | ||
490 | static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) | 484 | static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) |
491 | { | 485 | { |
492 | if (!pkt_debugfs_root) | 486 | if (!pkt_debugfs_root) |
493 | return; | 487 | return; |
494 | if (pd->dfs_f_info) | 488 | debugfs_remove(pd->dfs_f_info); |
495 | debugfs_remove(pd->dfs_f_info); | 489 | debugfs_remove(pd->dfs_d_root); |
496 | pd->dfs_f_info = NULL; | 490 | pd->dfs_f_info = NULL; |
497 | if (pd->dfs_d_root) | ||
498 | debugfs_remove(pd->dfs_d_root); | ||
499 | pd->dfs_d_root = NULL; | 491 | pd->dfs_d_root = NULL; |
500 | } | 492 | } |
501 | 493 | ||
502 | static void pkt_debugfs_init(void) | 494 | static void pkt_debugfs_init(void) |
503 | { | 495 | { |
504 | pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); | 496 | pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); |
505 | if (IS_ERR(pkt_debugfs_root)) { | ||
506 | pkt_debugfs_root = NULL; | ||
507 | return; | ||
508 | } | ||
509 | } | 497 | } |
510 | 498 | ||
511 | static void pkt_debugfs_cleanup(void) | 499 | static void pkt_debugfs_cleanup(void) |
512 | { | 500 | { |
513 | if (!pkt_debugfs_root) | ||
514 | return; | ||
515 | debugfs_remove(pkt_debugfs_root); | 501 | debugfs_remove(pkt_debugfs_root); |
516 | pkt_debugfs_root = NULL; | 502 | pkt_debugfs_root = NULL; |
517 | } | 503 | } |
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 6e85e21445eb..a8de2eec6ff3 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c | |||
@@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev) | |||
654 | for (i = 0; i < card->n_targets; i++) { | 654 | for (i = 0; i < card->n_targets; i++) { |
655 | spin_lock_bh(&card->ctrl[i].queue_lock); | 655 | spin_lock_bh(&card->ctrl[i].queue_lock); |
656 | cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], | 656 | cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], |
657 | &card->ctrl[i].queue); | 657 | &card->ctrl[i].queue, |
658 | COMPLETE_DMA); | ||
658 | spin_unlock_bh(&card->ctrl[i].queue_lock); | 659 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
659 | 660 | ||
660 | cnt += rsxx_dma_cancel(&card->ctrl[i]); | 661 | cnt += rsxx_dma_cancel(&card->ctrl[i]); |
@@ -748,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) | |||
748 | 749 | ||
749 | card->eeh_state = 0; | 750 | card->eeh_state = 0; |
750 | 751 | ||
751 | st = rsxx_eeh_remap_dmas(card); | ||
752 | if (st) | ||
753 | goto failed_remap_dmas; | ||
754 | |||
755 | spin_lock_irqsave(&card->irq_lock, flags); | 752 | spin_lock_irqsave(&card->irq_lock, flags); |
756 | if (card->n_targets & RSXX_MAX_TARGETS) | 753 | if (card->n_targets & RSXX_MAX_TARGETS) |
757 | rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); | 754 | rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); |
@@ -778,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) | |||
778 | return PCI_ERS_RESULT_RECOVERED; | 775 | return PCI_ERS_RESULT_RECOVERED; |
779 | 776 | ||
780 | failed_hw_buffers_init: | 777 | failed_hw_buffers_init: |
781 | failed_remap_dmas: | ||
782 | for (i = 0; i < card->n_targets; i++) { | 778 | for (i = 0; i < card->n_targets; i++) { |
783 | if (card->ctrl[i].status.buf) | 779 | if (card->ctrl[i].status.buf) |
784 | pci_free_consistent(card->dev, | 780 | pci_free_consistent(card->dev, |
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index d7af441880be..2284f5d3a54a 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c | |||
@@ -295,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) | |||
295 | return -ENOMEM; | 295 | return -ENOMEM; |
296 | } | 296 | } |
297 | 297 | ||
298 | blk_size = card->config.data.block_size; | 298 | if (card->config_valid) { |
299 | blk_size = card->config.data.block_size; | ||
300 | blk_queue_dma_alignment(card->queue, blk_size - 1); | ||
301 | blk_queue_logical_block_size(card->queue, blk_size); | ||
302 | } | ||
299 | 303 | ||
300 | blk_queue_make_request(card->queue, rsxx_make_request); | 304 | blk_queue_make_request(card->queue, rsxx_make_request); |
301 | blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); | 305 | blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); |
302 | blk_queue_dma_alignment(card->queue, blk_size - 1); | ||
303 | blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); | 306 | blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); |
304 | blk_queue_logical_block_size(card->queue, blk_size); | ||
305 | blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); | 307 | blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); |
306 | 308 | ||
307 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); | 309 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index bed32f16b084..fc88ba3e1bd2 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
224 | static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) | ||
225 | { | ||
226 | if (dma->cmd != HW_CMD_BLK_DISCARD) { | ||
227 | if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | ||
228 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | ||
229 | get_dma_size(dma), | ||
230 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
231 | PCI_DMA_TODEVICE : | ||
232 | PCI_DMA_FROMDEVICE); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | kmem_cache_free(rsxx_dma_pool, dma); | ||
237 | } | ||
238 | |||
224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | 239 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
225 | struct rsxx_dma *dma, | 240 | struct rsxx_dma *dma, |
226 | unsigned int status) | 241 | unsigned int status) |
@@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | |||
232 | if (status & DMA_CANCELLED) | 247 | if (status & DMA_CANCELLED) |
233 | ctrl->stats.dma_cancelled++; | 248 | ctrl->stats.dma_cancelled++; |
234 | 249 | ||
235 | if (dma->dma_addr) | ||
236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | ||
237 | get_dma_size(dma), | ||
238 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
239 | PCI_DMA_TODEVICE : | ||
240 | PCI_DMA_FROMDEVICE); | ||
241 | |||
242 | if (dma->cb) | 250 | if (dma->cb) |
243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); | 251 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
244 | 252 | ||
245 | kmem_cache_free(rsxx_dma_pool, dma); | 253 | rsxx_free_dma(ctrl, dma); |
246 | } | 254 | } |
247 | 255 | ||
248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | 256 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
249 | struct list_head *q) | 257 | struct list_head *q, unsigned int done) |
250 | { | 258 | { |
251 | struct rsxx_dma *dma; | 259 | struct rsxx_dma *dma; |
252 | struct rsxx_dma *tmp; | 260 | struct rsxx_dma *tmp; |
@@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | |||
254 | 262 | ||
255 | list_for_each_entry_safe(dma, tmp, q, list) { | 263 | list_for_each_entry_safe(dma, tmp, q, list) { |
256 | list_del(&dma->list); | 264 | list_del(&dma->list); |
257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | 265 | if (done & COMPLETE_DMA) |
266 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
267 | else | ||
268 | rsxx_free_dma(ctrl, dma); | ||
258 | cnt++; | 269 | cnt++; |
259 | } | 270 | } |
260 | 271 | ||
@@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data) | |||
370 | 381 | ||
371 | /* Clean up the DMA queue */ | 382 | /* Clean up the DMA queue */ |
372 | spin_lock(&ctrl->queue_lock); | 383 | spin_lock(&ctrl->queue_lock); |
373 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | 384 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
374 | spin_unlock(&ctrl->queue_lock); | 385 | spin_unlock(&ctrl->queue_lock); |
375 | 386 | ||
376 | cnt += rsxx_dma_cancel(ctrl); | 387 | cnt += rsxx_dma_cancel(ctrl); |
@@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) | |||
388 | int tag; | 399 | int tag; |
389 | int cmds_pending = 0; | 400 | int cmds_pending = 0; |
390 | struct hw_cmd *hw_cmd_buf; | 401 | struct hw_cmd *hw_cmd_buf; |
402 | int dir; | ||
391 | 403 | ||
392 | hw_cmd_buf = ctrl->cmd.buf; | 404 | hw_cmd_buf = ctrl->cmd.buf; |
393 | 405 | ||
@@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) | |||
424 | continue; | 436 | continue; |
425 | } | 437 | } |
426 | 438 | ||
439 | if (dma->cmd != HW_CMD_BLK_DISCARD) { | ||
440 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
441 | dir = PCI_DMA_TODEVICE; | ||
442 | else | ||
443 | dir = PCI_DMA_FROMDEVICE; | ||
444 | |||
445 | /* | ||
446 | * The function pci_map_page is placed here because we | ||
447 | * can only, by design, issue up to 255 commands to the | ||
448 | * hardware at one time per DMA channel. So the maximum | ||
449 | * amount of mapped memory would be 255 * 4 channels * | ||
450 | * 4096 Bytes which is less than 2GB, the limit of a x8 | ||
451 | * Non-HWWD PCIe slot. This way the pci_map_page | ||
452 | * function should never fail because of a lack of | ||
453 | * mappable memory. | ||
454 | */ | ||
455 | dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page, | ||
456 | dma->pg_off, dma->sub_page.cnt << 9, dir); | ||
457 | if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | ||
458 | push_tracker(ctrl->trackers, tag); | ||
459 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
460 | continue; | ||
461 | } | ||
462 | } | ||
463 | |||
427 | set_tracker_dma(ctrl->trackers, tag, dma); | 464 | set_tracker_dma(ctrl->trackers, tag, dma); |
428 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | 465 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; |
429 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | 466 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; |
@@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |||
620 | if (!dma) | 657 | if (!dma) |
621 | return -ENOMEM; | 658 | return -ENOMEM; |
622 | 659 | ||
623 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, | ||
624 | dir ? PCI_DMA_TODEVICE : | ||
625 | PCI_DMA_FROMDEVICE); | ||
626 | if (!dma->dma_addr) { | ||
627 | kmem_cache_free(rsxx_dma_pool, dma); | ||
628 | return -ENOMEM; | ||
629 | } | ||
630 | |||
631 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; | 660 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; |
632 | dma->laddr = laddr; | 661 | dma->laddr = laddr; |
633 | dma->sub_page.off = (dma_off >> 9); | 662 | dma->sub_page.off = (dma_off >> 9); |
@@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
736 | return 0; | 765 | return 0; |
737 | 766 | ||
738 | bvec_err: | 767 | bvec_err: |
739 | for (i = 0; i < card->n_targets; i++) { | 768 | for (i = 0; i < card->n_targets; i++) |
740 | spin_lock_bh(&card->ctrl[i].queue_lock); | 769 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], |
741 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | 770 | FREE_DMA); |
742 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
743 | } | ||
744 | 771 | ||
745 | return st; | 772 | return st; |
746 | } | 773 | } |
@@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
990 | 1017 | ||
991 | /* Clean up the DMA queue */ | 1018 | /* Clean up the DMA queue */ |
992 | spin_lock_bh(&ctrl->queue_lock); | 1019 | spin_lock_bh(&ctrl->queue_lock); |
993 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | 1020 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
994 | spin_unlock_bh(&ctrl->queue_lock); | 1021 | spin_unlock_bh(&ctrl->queue_lock); |
995 | 1022 | ||
996 | rsxx_dma_cancel(ctrl); | 1023 | rsxx_dma_cancel(ctrl); |
@@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1032 | else | 1059 | else |
1033 | card->ctrl[i].stats.reads_issued--; | 1060 | card->ctrl[i].stats.reads_issued--; |
1034 | 1061 | ||
1062 | if (dma->cmd != HW_CMD_BLK_DISCARD) { | ||
1063 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1064 | get_dma_size(dma), | ||
1065 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1066 | PCI_DMA_TODEVICE : | ||
1067 | PCI_DMA_FROMDEVICE); | ||
1068 | } | ||
1069 | |||
1035 | list_add_tail(&dma->list, &issued_dmas[i]); | 1070 | list_add_tail(&dma->list, &issued_dmas[i]); |
1036 | push_tracker(card->ctrl[i].trackers, j); | 1071 | push_tracker(card->ctrl[i].trackers, j); |
1037 | cnt++; | 1072 | cnt++; |
@@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1043 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | 1078 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); |
1044 | card->ctrl[i].stats.sw_q_depth += cnt; | 1079 | card->ctrl[i].stats.sw_q_depth += cnt; |
1045 | card->ctrl[i].e_cnt = 0; | 1080 | card->ctrl[i].e_cnt = 0; |
1046 | |||
1047 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1048 | if (dma->dma_addr) | ||
1049 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1050 | get_dma_size(dma), | ||
1051 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1052 | PCI_DMA_TODEVICE : | ||
1053 | PCI_DMA_FROMDEVICE); | ||
1054 | } | ||
1055 | spin_unlock_bh(&card->ctrl[i].queue_lock); | 1081 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1056 | } | 1082 | } |
1057 | 1083 | ||
@@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1060 | return 0; | 1086 | return 0; |
1061 | } | 1087 | } |
1062 | 1088 | ||
1063 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | ||
1064 | { | ||
1065 | struct rsxx_dma *dma; | ||
1066 | int i; | ||
1067 | |||
1068 | for (i = 0; i < card->n_targets; i++) { | ||
1069 | spin_lock_bh(&card->ctrl[i].queue_lock); | ||
1070 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1071 | dma->dma_addr = pci_map_page(card->dev, dma->page, | ||
1072 | dma->pg_off, get_dma_size(dma), | ||
1073 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1074 | PCI_DMA_TODEVICE : | ||
1075 | PCI_DMA_FROMDEVICE); | ||
1076 | if (!dma->dma_addr) { | ||
1077 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
1078 | kmem_cache_free(rsxx_dma_pool, dma); | ||
1079 | return -ENOMEM; | ||
1080 | } | ||
1081 | } | ||
1082 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
1083 | } | ||
1084 | |||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | int rsxx_dma_init(void) | 1089 | int rsxx_dma_init(void) |
1089 | { | 1090 | { |
1090 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | 1091 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); |
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 5ad5055a4104..6bbc64d0f690 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
@@ -52,7 +52,7 @@ struct proc_cmd; | |||
52 | #define RS70_PCI_REV_SUPPORTED 4 | 52 | #define RS70_PCI_REV_SUPPORTED 4 |
53 | 53 | ||
54 | #define DRIVER_NAME "rsxx" | 54 | #define DRIVER_NAME "rsxx" |
55 | #define DRIVER_VERSION "4.0" | 55 | #define DRIVER_VERSION "4.0.3.2516" |
56 | 56 | ||
57 | /* Block size is 4096 */ | 57 | /* Block size is 4096 */ |
58 | #define RSXX_HW_BLK_SHIFT 12 | 58 | #define RSXX_HW_BLK_SHIFT 12 |
@@ -345,6 +345,11 @@ enum rsxx_creg_stat { | |||
345 | CREG_STAT_TAG_MASK = 0x0000ff00, | 345 | CREG_STAT_TAG_MASK = 0x0000ff00, |
346 | }; | 346 | }; |
347 | 347 | ||
348 | enum rsxx_dma_finish { | ||
349 | FREE_DMA = 0x0, | ||
350 | COMPLETE_DMA = 0x1, | ||
351 | }; | ||
352 | |||
348 | static inline unsigned int CREG_DATA(int N) | 353 | static inline unsigned int CREG_DATA(int N) |
349 | { | 354 | { |
350 | return CREG_DATA0 + (N << 2); | 355 | return CREG_DATA0 + (N << 2); |
@@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, | |||
379 | int rsxx_dma_setup(struct rsxx_cardinfo *card); | 384 | int rsxx_dma_setup(struct rsxx_cardinfo *card); |
380 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | 385 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); |
381 | int rsxx_dma_init(void); | 386 | int rsxx_dma_init(void); |
382 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q); | 387 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
388 | struct list_head *q, | ||
389 | unsigned int done); | ||
383 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); | 390 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); |
384 | void rsxx_dma_cleanup(void); | 391 | void rsxx_dma_cleanup(void); |
385 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | 392 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c new file mode 100644 index 000000000000..9199c93be926 --- /dev/null +++ b/drivers/block/skd_main.c | |||
@@ -0,0 +1,5432 @@ | |||
1 | /* Copyright 2012 STEC, Inc. | ||
2 | * | ||
3 | * This file is licensed under the terms of the 3-clause | ||
4 | * BSD License (http://opensource.org/licenses/BSD-3-Clause) | ||
5 | * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), | ||
6 | * at your option. Both licenses are also available in the LICENSE file | ||
7 | * distributed with this project. This file may not be copied, modified, | ||
8 | * or distributed except in accordance with those terms. | ||
9 | * Gordoni Waidhofer <gwaidhofer@stec-inc.com> | ||
10 | * Initial Driver Design! | ||
11 | * Thomas Swann <tswann@stec-inc.com> | ||
12 | * Interrupt handling. | ||
13 | * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> | ||
14 | * biomode implementation. | ||
15 | * Akhil Bhansali <abhansali@stec-inc.com> | ||
16 | * Added support for DISCARD / FLUSH and FUA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/blkdev.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/compiler.h> | ||
29 | #include <linux/workqueue.h> | ||
30 | #include <linux/bitops.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/time.h> | ||
33 | #include <linux/hdreg.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/completion.h> | ||
36 | #include <linux/scatterlist.h> | ||
37 | #include <linux/version.h> | ||
38 | #include <linux/err.h> | ||
39 | #include <linux/scatterlist.h> | ||
40 | #include <linux/aer.h> | ||
41 | #include <linux/ctype.h> | ||
42 | #include <linux/wait.h> | ||
43 | #include <linux/uio.h> | ||
44 | #include <scsi/scsi.h> | ||
45 | #include <scsi/sg.h> | ||
46 | #include <linux/io.h> | ||
47 | #include <linux/uaccess.h> | ||
48 | #include <asm/unaligned.h> | ||
49 | |||
50 | #include "skd_s1120.h" | ||
51 | |||
52 | static int skd_dbg_level; | ||
53 | static int skd_isr_comp_limit = 4; | ||
54 | |||
55 | enum { | ||
56 | STEC_LINK_2_5GTS = 0, | ||
57 | STEC_LINK_5GTS = 1, | ||
58 | STEC_LINK_8GTS = 2, | ||
59 | STEC_LINK_UNKNOWN = 0xFF | ||
60 | }; | ||
61 | |||
62 | enum { | ||
63 | SKD_FLUSH_INITIALIZER, | ||
64 | SKD_FLUSH_ZERO_SIZE_FIRST, | ||
65 | SKD_FLUSH_DATA_SECOND, | ||
66 | }; | ||
67 | |||
68 | #define SKD_ASSERT(expr) \ | ||
69 | do { \ | ||
70 | if (unlikely(!(expr))) { \ | ||
71 | pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ | ||
72 | # expr, __FILE__, __func__, __LINE__); \ | ||
73 | } \ | ||
74 | } while (0) | ||
75 | |||
76 | #define DRV_NAME "skd" | ||
77 | #define DRV_VERSION "2.2.1" | ||
78 | #define DRV_BUILD_ID "0260" | ||
79 | #define PFX DRV_NAME ": " | ||
80 | #define DRV_BIN_VERSION 0x100 | ||
81 | #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID | ||
82 | |||
83 | MODULE_AUTHOR("bug-reports: support@stec-inc.com"); | ||
84 | MODULE_LICENSE("Dual BSD/GPL"); | ||
85 | |||
86 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); | ||
87 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); | ||
88 | |||
89 | #define PCI_VENDOR_ID_STEC 0x1B39 | ||
90 | #define PCI_DEVICE_ID_S1120 0x0001 | ||
91 | |||
92 | #define SKD_FUA_NV (1 << 1) | ||
93 | #define SKD_MINORS_PER_DEVICE 16 | ||
94 | |||
95 | #define SKD_MAX_QUEUE_DEPTH 200u | ||
96 | |||
97 | #define SKD_PAUSE_TIMEOUT (5 * 1000) | ||
98 | |||
99 | #define SKD_N_FITMSG_BYTES (512u) | ||
100 | |||
101 | #define SKD_N_SPECIAL_CONTEXT 32u | ||
102 | #define SKD_N_SPECIAL_FITMSG_BYTES (128u) | ||
103 | |||
104 | /* SG elements are 32 bytes, so we can make this 4096 and still be under the | ||
105 | * 128KB limit. That allows 4096*4K = 16M xfer size | ||
106 | */ | ||
107 | #define SKD_N_SG_PER_REQ_DEFAULT 256u | ||
108 | #define SKD_N_SG_PER_SPECIAL 256u | ||
109 | |||
110 | #define SKD_N_COMPLETION_ENTRY 256u | ||
111 | #define SKD_N_READ_CAP_BYTES (8u) | ||
112 | |||
113 | #define SKD_N_INTERNAL_BYTES (512u) | ||
114 | |||
115 | /* 5 bits of uniqifier, 0xF800 */ | ||
116 | #define SKD_ID_INCR (0x400) | ||
117 | #define SKD_ID_TABLE_MASK (3u << 8u) | ||
118 | #define SKD_ID_RW_REQUEST (0u << 8u) | ||
119 | #define SKD_ID_INTERNAL (1u << 8u) | ||
120 | #define SKD_ID_SPECIAL_REQUEST (2u << 8u) | ||
121 | #define SKD_ID_FIT_MSG (3u << 8u) | ||
122 | #define SKD_ID_SLOT_MASK 0x00FFu | ||
123 | #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu | ||
124 | |||
125 | #define SKD_N_TIMEOUT_SLOT 4u | ||
126 | #define SKD_TIMEOUT_SLOT_MASK 3u | ||
127 | |||
128 | #define SKD_N_MAX_SECTORS 2048u | ||
129 | |||
130 | #define SKD_MAX_RETRIES 2u | ||
131 | |||
132 | #define SKD_TIMER_SECONDS(seconds) (seconds) | ||
133 | #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) | ||
134 | |||
135 | #define INQ_STD_NBYTES 36 | ||
136 | #define SKD_DISCARD_CDB_LENGTH 24 | ||
137 | |||
138 | enum skd_drvr_state { | ||
139 | SKD_DRVR_STATE_LOAD, | ||
140 | SKD_DRVR_STATE_IDLE, | ||
141 | SKD_DRVR_STATE_BUSY, | ||
142 | SKD_DRVR_STATE_STARTING, | ||
143 | SKD_DRVR_STATE_ONLINE, | ||
144 | SKD_DRVR_STATE_PAUSING, | ||
145 | SKD_DRVR_STATE_PAUSED, | ||
146 | SKD_DRVR_STATE_DRAINING_TIMEOUT, | ||
147 | SKD_DRVR_STATE_RESTARTING, | ||
148 | SKD_DRVR_STATE_RESUMING, | ||
149 | SKD_DRVR_STATE_STOPPING, | ||
150 | SKD_DRVR_STATE_FAULT, | ||
151 | SKD_DRVR_STATE_DISAPPEARED, | ||
152 | SKD_DRVR_STATE_PROTOCOL_MISMATCH, | ||
153 | SKD_DRVR_STATE_BUSY_ERASE, | ||
154 | SKD_DRVR_STATE_BUSY_SANITIZE, | ||
155 | SKD_DRVR_STATE_BUSY_IMMINENT, | ||
156 | SKD_DRVR_STATE_WAIT_BOOT, | ||
157 | SKD_DRVR_STATE_SYNCING, | ||
158 | }; | ||
159 | |||
160 | #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) | ||
161 | #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) | ||
162 | #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) | ||
163 | #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) | ||
164 | #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) | ||
165 | #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) | ||
166 | #define SKD_START_WAIT_SECONDS 90u | ||
167 | |||
168 | enum skd_req_state { | ||
169 | SKD_REQ_STATE_IDLE, | ||
170 | SKD_REQ_STATE_SETUP, | ||
171 | SKD_REQ_STATE_BUSY, | ||
172 | SKD_REQ_STATE_COMPLETED, | ||
173 | SKD_REQ_STATE_TIMEOUT, | ||
174 | SKD_REQ_STATE_ABORTED, | ||
175 | }; | ||
176 | |||
177 | enum skd_fit_msg_state { | ||
178 | SKD_MSG_STATE_IDLE, | ||
179 | SKD_MSG_STATE_BUSY, | ||
180 | }; | ||
181 | |||
182 | enum skd_check_status_action { | ||
183 | SKD_CHECK_STATUS_REPORT_GOOD, | ||
184 | SKD_CHECK_STATUS_REPORT_SMART_ALERT, | ||
185 | SKD_CHECK_STATUS_REQUEUE_REQUEST, | ||
186 | SKD_CHECK_STATUS_REPORT_ERROR, | ||
187 | SKD_CHECK_STATUS_BUSY_IMMINENT, | ||
188 | }; | ||
189 | |||
190 | struct skd_fitmsg_context { | ||
191 | enum skd_fit_msg_state state; | ||
192 | |||
193 | struct skd_fitmsg_context *next; | ||
194 | |||
195 | u32 id; | ||
196 | u16 outstanding; | ||
197 | |||
198 | u32 length; | ||
199 | u32 offset; | ||
200 | |||
201 | u8 *msg_buf; | ||
202 | dma_addr_t mb_dma_address; | ||
203 | }; | ||
204 | |||
205 | struct skd_request_context { | ||
206 | enum skd_req_state state; | ||
207 | |||
208 | struct skd_request_context *next; | ||
209 | |||
210 | u16 id; | ||
211 | u32 fitmsg_id; | ||
212 | |||
213 | struct request *req; | ||
214 | u8 flush_cmd; | ||
215 | u8 discard_page; | ||
216 | |||
217 | u32 timeout_stamp; | ||
218 | u8 sg_data_dir; | ||
219 | struct scatterlist *sg; | ||
220 | u32 n_sg; | ||
221 | u32 sg_byte_count; | ||
222 | |||
223 | struct fit_sg_descriptor *sksg_list; | ||
224 | dma_addr_t sksg_dma_address; | ||
225 | |||
226 | struct fit_completion_entry_v1 completion; | ||
227 | |||
228 | struct fit_comp_error_info err_info; | ||
229 | |||
230 | }; | ||
231 | #define SKD_DATA_DIR_HOST_TO_CARD 1 | ||
232 | #define SKD_DATA_DIR_CARD_TO_HOST 2 | ||
233 | #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */ | ||
234 | |||
235 | struct skd_special_context { | ||
236 | struct skd_request_context req; | ||
237 | |||
238 | u8 orphaned; | ||
239 | |||
240 | void *data_buf; | ||
241 | dma_addr_t db_dma_address; | ||
242 | |||
243 | u8 *msg_buf; | ||
244 | dma_addr_t mb_dma_address; | ||
245 | }; | ||
246 | |||
247 | struct skd_sg_io { | ||
248 | fmode_t mode; | ||
249 | void __user *argp; | ||
250 | |||
251 | struct sg_io_hdr sg; | ||
252 | |||
253 | u8 cdb[16]; | ||
254 | |||
255 | u32 dxfer_len; | ||
256 | u32 iovcnt; | ||
257 | struct sg_iovec *iov; | ||
258 | struct sg_iovec no_iov_iov; | ||
259 | |||
260 | struct skd_special_context *skspcl; | ||
261 | }; | ||
262 | |||
263 | typedef enum skd_irq_type { | ||
264 | SKD_IRQ_LEGACY, | ||
265 | SKD_IRQ_MSI, | ||
266 | SKD_IRQ_MSIX | ||
267 | } skd_irq_type_t; | ||
268 | |||
269 | #define SKD_MAX_BARS 2 | ||
270 | |||
271 | struct skd_device { | ||
272 | volatile void __iomem *mem_map[SKD_MAX_BARS]; | ||
273 | resource_size_t mem_phys[SKD_MAX_BARS]; | ||
274 | u32 mem_size[SKD_MAX_BARS]; | ||
275 | |||
276 | skd_irq_type_t irq_type; | ||
277 | u32 msix_count; | ||
278 | struct skd_msix_entry *msix_entries; | ||
279 | |||
280 | struct pci_dev *pdev; | ||
281 | int pcie_error_reporting_is_enabled; | ||
282 | |||
283 | spinlock_t lock; | ||
284 | struct gendisk *disk; | ||
285 | struct request_queue *queue; | ||
286 | struct device *class_dev; | ||
287 | int gendisk_on; | ||
288 | int sync_done; | ||
289 | |||
290 | atomic_t device_count; | ||
291 | u32 devno; | ||
292 | u32 major; | ||
293 | char name[32]; | ||
294 | char isr_name[30]; | ||
295 | |||
296 | enum skd_drvr_state state; | ||
297 | u32 drive_state; | ||
298 | |||
299 | u32 in_flight; | ||
300 | u32 cur_max_queue_depth; | ||
301 | u32 queue_low_water_mark; | ||
302 | u32 dev_max_queue_depth; | ||
303 | |||
304 | u32 num_fitmsg_context; | ||
305 | u32 num_req_context; | ||
306 | |||
307 | u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; | ||
308 | u32 timeout_stamp; | ||
309 | struct skd_fitmsg_context *skmsg_free_list; | ||
310 | struct skd_fitmsg_context *skmsg_table; | ||
311 | |||
312 | struct skd_request_context *skreq_free_list; | ||
313 | struct skd_request_context *skreq_table; | ||
314 | |||
315 | struct skd_special_context *skspcl_free_list; | ||
316 | struct skd_special_context *skspcl_table; | ||
317 | |||
318 | struct skd_special_context internal_skspcl; | ||
319 | u32 read_cap_blocksize; | ||
320 | u32 read_cap_last_lba; | ||
321 | int read_cap_is_valid; | ||
322 | int inquiry_is_valid; | ||
323 | u8 inq_serial_num[13]; /*12 chars plus null term */ | ||
324 | u8 id_str[80]; /* holds a composite name (pci + sernum) */ | ||
325 | |||
326 | u8 skcomp_cycle; | ||
327 | u32 skcomp_ix; | ||
328 | struct fit_completion_entry_v1 *skcomp_table; | ||
329 | struct fit_comp_error_info *skerr_table; | ||
330 | dma_addr_t cq_dma_address; | ||
331 | |||
332 | wait_queue_head_t waitq; | ||
333 | |||
334 | struct timer_list timer; | ||
335 | u32 timer_countdown; | ||
336 | u32 timer_substate; | ||
337 | |||
338 | int n_special; | ||
339 | int sgs_per_request; | ||
340 | u32 last_mtd; | ||
341 | |||
342 | u32 proto_ver; | ||
343 | |||
344 | int dbg_level; | ||
345 | u32 connect_time_stamp; | ||
346 | int connect_retries; | ||
347 | #define SKD_MAX_CONNECT_RETRIES 16 | ||
348 | u32 drive_jiffies; | ||
349 | |||
350 | u32 timo_slot; | ||
351 | |||
352 | |||
353 | struct work_struct completion_worker; | ||
354 | }; | ||
355 | |||
356 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) | ||
357 | #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) | ||
358 | #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) | ||
359 | |||
360 | static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) | ||
361 | { | ||
362 | u32 val; | ||
363 | |||
364 | if (likely(skdev->dbg_level < 2)) | ||
365 | return readl(skdev->mem_map[1] + offset); | ||
366 | else { | ||
367 | barrier(); | ||
368 | val = readl(skdev->mem_map[1] + offset); | ||
369 | barrier(); | ||
370 | pr_debug("%s:%s:%d offset %x = %x\n", | ||
371 | skdev->name, __func__, __LINE__, offset, val); | ||
372 | return val; | ||
373 | } | ||
374 | |||
375 | } | ||
376 | |||
377 | static inline void skd_reg_write32(struct skd_device *skdev, u32 val, | ||
378 | u32 offset) | ||
379 | { | ||
380 | if (likely(skdev->dbg_level < 2)) { | ||
381 | writel(val, skdev->mem_map[1] + offset); | ||
382 | barrier(); | ||
383 | } else { | ||
384 | barrier(); | ||
385 | writel(val, skdev->mem_map[1] + offset); | ||
386 | barrier(); | ||
387 | pr_debug("%s:%s:%d offset %x = %x\n", | ||
388 | skdev->name, __func__, __LINE__, offset, val); | ||
389 | } | ||
390 | } | ||
391 | |||
392 | static inline void skd_reg_write64(struct skd_device *skdev, u64 val, | ||
393 | u32 offset) | ||
394 | { | ||
395 | if (likely(skdev->dbg_level < 2)) { | ||
396 | writeq(val, skdev->mem_map[1] + offset); | ||
397 | barrier(); | ||
398 | } else { | ||
399 | barrier(); | ||
400 | writeq(val, skdev->mem_map[1] + offset); | ||
401 | barrier(); | ||
402 | pr_debug("%s:%s:%d offset %x = %016llx\n", | ||
403 | skdev->name, __func__, __LINE__, offset, val); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | |||
408 | #define SKD_IRQ_DEFAULT SKD_IRQ_MSI | ||
409 | static int skd_isr_type = SKD_IRQ_DEFAULT; | ||
410 | |||
411 | module_param(skd_isr_type, int, 0444); | ||
412 | MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." | ||
413 | " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); | ||
414 | |||
415 | #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 | ||
416 | static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | ||
417 | |||
418 | module_param(skd_max_req_per_msg, int, 0444); | ||
419 | MODULE_PARM_DESC(skd_max_req_per_msg, | ||
420 | "Maximum SCSI requests packed in a single message." | ||
421 | " (1-14, default==1)"); | ||
422 | |||
423 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 | ||
424 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" | ||
425 | static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | ||
426 | |||
427 | module_param(skd_max_queue_depth, int, 0444); | ||
428 | MODULE_PARM_DESC(skd_max_queue_depth, | ||
429 | "Maximum SCSI requests issued to s1120." | ||
430 | " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); | ||
431 | |||
432 | static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | ||
433 | module_param(skd_sgs_per_request, int, 0444); | ||
434 | MODULE_PARM_DESC(skd_sgs_per_request, | ||
435 | "Maximum SG elements per block request." | ||
436 | " (1-4096, default==256)"); | ||
437 | |||
438 | static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | ||
439 | module_param(skd_max_pass_thru, int, 0444); | ||
440 | MODULE_PARM_DESC(skd_max_pass_thru, | ||
441 | "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); | ||
442 | |||
443 | module_param(skd_dbg_level, int, 0444); | ||
444 | MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); | ||
445 | |||
446 | module_param(skd_isr_comp_limit, int, 0444); | ||
447 | MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); | ||
448 | |||
449 | /* Major device number dynamically assigned. */ | ||
450 | static u32 skd_major; | ||
451 | |||
452 | static void skd_destruct(struct skd_device *skdev); | ||
453 | static const struct block_device_operations skd_blockdev_ops; | ||
454 | static void skd_send_fitmsg(struct skd_device *skdev, | ||
455 | struct skd_fitmsg_context *skmsg); | ||
456 | static void skd_send_special_fitmsg(struct skd_device *skdev, | ||
457 | struct skd_special_context *skspcl); | ||
458 | static void skd_request_fn(struct request_queue *rq); | ||
459 | static void skd_end_request(struct skd_device *skdev, | ||
460 | struct skd_request_context *skreq, int error); | ||
461 | static int skd_preop_sg_list(struct skd_device *skdev, | ||
462 | struct skd_request_context *skreq); | ||
463 | static void skd_postop_sg_list(struct skd_device *skdev, | ||
464 | struct skd_request_context *skreq); | ||
465 | |||
466 | static void skd_restart_device(struct skd_device *skdev); | ||
467 | static int skd_quiesce_dev(struct skd_device *skdev); | ||
468 | static int skd_unquiesce_dev(struct skd_device *skdev); | ||
469 | static void skd_release_special(struct skd_device *skdev, | ||
470 | struct skd_special_context *skspcl); | ||
471 | static void skd_disable_interrupts(struct skd_device *skdev); | ||
472 | static void skd_isr_fwstate(struct skd_device *skdev); | ||
473 | static void skd_recover_requests(struct skd_device *skdev, int requeue); | ||
474 | static void skd_soft_reset(struct skd_device *skdev); | ||
475 | |||
476 | static const char *skd_name(struct skd_device *skdev); | ||
477 | const char *skd_drive_state_to_str(int state); | ||
478 | const char *skd_skdev_state_to_str(enum skd_drvr_state state); | ||
479 | static void skd_log_skdev(struct skd_device *skdev, const char *event); | ||
480 | static void skd_log_skmsg(struct skd_device *skdev, | ||
481 | struct skd_fitmsg_context *skmsg, const char *event); | ||
482 | static void skd_log_skreq(struct skd_device *skdev, | ||
483 | struct skd_request_context *skreq, const char *event); | ||
484 | |||
485 | /* | ||
486 | ***************************************************************************** | ||
487 | * READ/WRITE REQUESTS | ||
488 | ***************************************************************************** | ||
489 | */ | ||
490 | static void skd_fail_all_pending(struct skd_device *skdev) | ||
491 | { | ||
492 | struct request_queue *q = skdev->queue; | ||
493 | struct request *req; | ||
494 | |||
495 | for (;; ) { | ||
496 | req = blk_peek_request(q); | ||
497 | if (req == NULL) | ||
498 | break; | ||
499 | blk_start_request(req); | ||
500 | __blk_end_request_all(req, -EIO); | ||
501 | } | ||
502 | } | ||
503 | |||
504 | static void | ||
505 | skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, | ||
506 | int data_dir, unsigned lba, | ||
507 | unsigned count) | ||
508 | { | ||
509 | if (data_dir == READ) | ||
510 | scsi_req->cdb[0] = 0x28; | ||
511 | else | ||
512 | scsi_req->cdb[0] = 0x2a; | ||
513 | |||
514 | scsi_req->cdb[1] = 0; | ||
515 | scsi_req->cdb[2] = (lba & 0xff000000) >> 24; | ||
516 | scsi_req->cdb[3] = (lba & 0xff0000) >> 16; | ||
517 | scsi_req->cdb[4] = (lba & 0xff00) >> 8; | ||
518 | scsi_req->cdb[5] = (lba & 0xff); | ||
519 | scsi_req->cdb[6] = 0; | ||
520 | scsi_req->cdb[7] = (count & 0xff00) >> 8; | ||
521 | scsi_req->cdb[8] = count & 0xff; | ||
522 | scsi_req->cdb[9] = 0; | ||
523 | } | ||
524 | |||
525 | static void | ||
526 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | ||
527 | struct skd_request_context *skreq) | ||
528 | { | ||
529 | skreq->flush_cmd = 1; | ||
530 | |||
531 | scsi_req->cdb[0] = 0x35; | ||
532 | scsi_req->cdb[1] = 0; | ||
533 | scsi_req->cdb[2] = 0; | ||
534 | scsi_req->cdb[3] = 0; | ||
535 | scsi_req->cdb[4] = 0; | ||
536 | scsi_req->cdb[5] = 0; | ||
537 | scsi_req->cdb[6] = 0; | ||
538 | scsi_req->cdb[7] = 0; | ||
539 | scsi_req->cdb[8] = 0; | ||
540 | scsi_req->cdb[9] = 0; | ||
541 | } | ||
542 | |||
543 | static void | ||
544 | skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, | ||
545 | struct skd_request_context *skreq, | ||
546 | struct page *page, | ||
547 | u32 lba, u32 count) | ||
548 | { | ||
549 | char *buf; | ||
550 | unsigned long len; | ||
551 | struct request *req; | ||
552 | |||
553 | buf = page_address(page); | ||
554 | len = SKD_DISCARD_CDB_LENGTH; | ||
555 | |||
556 | scsi_req->cdb[0] = UNMAP; | ||
557 | scsi_req->cdb[8] = len; | ||
558 | |||
559 | put_unaligned_be16(6 + 16, &buf[0]); | ||
560 | put_unaligned_be16(16, &buf[2]); | ||
561 | put_unaligned_be64(lba, &buf[8]); | ||
562 | put_unaligned_be32(count, &buf[16]); | ||
563 | |||
564 | req = skreq->req; | ||
565 | blk_add_request_payload(req, page, len); | ||
566 | req->buffer = buf; | ||
567 | } | ||
568 | |||
569 | static void skd_request_fn_not_online(struct request_queue *q); | ||
570 | |||
571 | static void skd_request_fn(struct request_queue *q) | ||
572 | { | ||
573 | struct skd_device *skdev = q->queuedata; | ||
574 | struct skd_fitmsg_context *skmsg = NULL; | ||
575 | struct fit_msg_hdr *fmh = NULL; | ||
576 | struct skd_request_context *skreq; | ||
577 | struct request *req = NULL; | ||
578 | struct skd_scsi_request *scsi_req; | ||
579 | struct page *page; | ||
580 | unsigned long io_flags; | ||
581 | int error; | ||
582 | u32 lba; | ||
583 | u32 count; | ||
584 | int data_dir; | ||
585 | u32 be_lba; | ||
586 | u32 be_count; | ||
587 | u64 be_dmaa; | ||
588 | u64 cmdctxt; | ||
589 | u32 timo_slot; | ||
590 | void *cmd_ptr; | ||
591 | int flush, fua; | ||
592 | |||
593 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | ||
594 | skd_request_fn_not_online(q); | ||
595 | return; | ||
596 | } | ||
597 | |||
598 | if (blk_queue_stopped(skdev->queue)) { | ||
599 | if (skdev->skmsg_free_list == NULL || | ||
600 | skdev->skreq_free_list == NULL || | ||
601 | skdev->in_flight >= skdev->queue_low_water_mark) | ||
602 | /* There is still some kind of shortage */ | ||
603 | return; | ||
604 | |||
605 | queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * Stop conditions: | ||
610 | * - There are no more native requests | ||
611 | * - There are already the maximum number of requests in progress | ||
612 | * - There are no more skd_request_context entries | ||
613 | * - There are no more FIT msg buffers | ||
614 | */ | ||
615 | for (;; ) { | ||
616 | |||
617 | flush = fua = 0; | ||
618 | |||
619 | req = blk_peek_request(q); | ||
620 | |||
621 | /* Are there any native requests to start? */ | ||
622 | if (req == NULL) | ||
623 | break; | ||
624 | |||
625 | lba = (u32)blk_rq_pos(req); | ||
626 | count = blk_rq_sectors(req); | ||
627 | data_dir = rq_data_dir(req); | ||
628 | io_flags = req->cmd_flags; | ||
629 | |||
630 | if (io_flags & REQ_FLUSH) | ||
631 | flush++; | ||
632 | |||
633 | if (io_flags & REQ_FUA) | ||
634 | fua++; | ||
635 | |||
636 | pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " | ||
637 | "count=%u(0x%x) dir=%d\n", | ||
638 | skdev->name, __func__, __LINE__, | ||
639 | req, lba, lba, count, count, data_dir); | ||
640 | |||
641 | /* At this point we know there is a request */ | ||
642 | |||
643 | /* Are too many requets already in progress? */ | ||
644 | if (skdev->in_flight >= skdev->cur_max_queue_depth) { | ||
645 | pr_debug("%s:%s:%d qdepth %d, limit %d\n", | ||
646 | skdev->name, __func__, __LINE__, | ||
647 | skdev->in_flight, skdev->cur_max_queue_depth); | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | /* Is a skd_request_context available? */ | ||
652 | skreq = skdev->skreq_free_list; | ||
653 | if (skreq == NULL) { | ||
654 | pr_debug("%s:%s:%d Out of req=%p\n", | ||
655 | skdev->name, __func__, __LINE__, q); | ||
656 | break; | ||
657 | } | ||
658 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); | ||
659 | SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); | ||
660 | |||
661 | /* Now we check to see if we can get a fit msg */ | ||
662 | if (skmsg == NULL) { | ||
663 | if (skdev->skmsg_free_list == NULL) { | ||
664 | pr_debug("%s:%s:%d Out of msg\n", | ||
665 | skdev->name, __func__, __LINE__); | ||
666 | break; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | skreq->flush_cmd = 0; | ||
671 | skreq->n_sg = 0; | ||
672 | skreq->sg_byte_count = 0; | ||
673 | skreq->discard_page = 0; | ||
674 | |||
675 | /* | ||
676 | * OK to now dequeue request from q. | ||
677 | * | ||
678 | * At this point we are comitted to either start or reject | ||
679 | * the native request. Note that skd_request_context is | ||
680 | * available but is still at the head of the free list. | ||
681 | */ | ||
682 | blk_start_request(req); | ||
683 | skreq->req = req; | ||
684 | skreq->fitmsg_id = 0; | ||
685 | |||
686 | /* Either a FIT msg is in progress or we have to start one. */ | ||
687 | if (skmsg == NULL) { | ||
688 | /* Are there any FIT msg buffers available? */ | ||
689 | skmsg = skdev->skmsg_free_list; | ||
690 | if (skmsg == NULL) { | ||
691 | pr_debug("%s:%s:%d Out of msg skdev=%p\n", | ||
692 | skdev->name, __func__, __LINE__, | ||
693 | skdev); | ||
694 | break; | ||
695 | } | ||
696 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); | ||
697 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); | ||
698 | |||
699 | skdev->skmsg_free_list = skmsg->next; | ||
700 | |||
701 | skmsg->state = SKD_MSG_STATE_BUSY; | ||
702 | skmsg->id += SKD_ID_INCR; | ||
703 | |||
704 | /* Initialize the FIT msg header */ | ||
705 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | ||
706 | memset(fmh, 0, sizeof(*fmh)); | ||
707 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | ||
708 | skmsg->length = sizeof(*fmh); | ||
709 | } | ||
710 | |||
711 | skreq->fitmsg_id = skmsg->id; | ||
712 | |||
713 | /* | ||
714 | * Note that a FIT msg may have just been started | ||
715 | * but contains no SoFIT requests yet. | ||
716 | */ | ||
717 | |||
718 | /* | ||
719 | * Transcode the request, checking as we go. The outcome of | ||
720 | * the transcoding is represented by the error variable. | ||
721 | */ | ||
722 | cmd_ptr = &skmsg->msg_buf[skmsg->length]; | ||
723 | memset(cmd_ptr, 0, 32); | ||
724 | |||
725 | be_lba = cpu_to_be32(lba); | ||
726 | be_count = cpu_to_be32(count); | ||
727 | be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); | ||
728 | cmdctxt = skreq->id + SKD_ID_INCR; | ||
729 | |||
730 | scsi_req = cmd_ptr; | ||
731 | scsi_req->hdr.tag = cmdctxt; | ||
732 | scsi_req->hdr.sg_list_dma_address = be_dmaa; | ||
733 | |||
734 | if (data_dir == READ) | ||
735 | skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; | ||
736 | else | ||
737 | skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; | ||
738 | |||
739 | if (io_flags & REQ_DISCARD) { | ||
740 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); | ||
741 | if (!page) { | ||
742 | pr_err("request_fn:Page allocation failed.\n"); | ||
743 | skd_end_request(skdev, skreq, -ENOMEM); | ||
744 | break; | ||
745 | } | ||
746 | skreq->discard_page = 1; | ||
747 | skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); | ||
748 | |||
749 | } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { | ||
750 | skd_prep_zerosize_flush_cdb(scsi_req, skreq); | ||
751 | SKD_ASSERT(skreq->flush_cmd == 1); | ||
752 | |||
753 | } else { | ||
754 | skd_prep_rw_cdb(scsi_req, data_dir, lba, count); | ||
755 | } | ||
756 | |||
757 | if (fua) | ||
758 | scsi_req->cdb[1] |= SKD_FUA_NV; | ||
759 | |||
760 | if (!req->bio) | ||
761 | goto skip_sg; | ||
762 | |||
763 | error = skd_preop_sg_list(skdev, skreq); | ||
764 | |||
765 | if (error != 0) { | ||
766 | /* | ||
767 | * Complete the native request with error. | ||
768 | * Note that the request context is still at the | ||
769 | * head of the free list, and that the SoFIT request | ||
770 | * was encoded into the FIT msg buffer but the FIT | ||
771 | * msg length has not been updated. In short, the | ||
772 | * only resource that has been allocated but might | ||
773 | * not be used is that the FIT msg could be empty. | ||
774 | */ | ||
775 | pr_debug("%s:%s:%d error Out\n", | ||
776 | skdev->name, __func__, __LINE__); | ||
777 | skd_end_request(skdev, skreq, error); | ||
778 | continue; | ||
779 | } | ||
780 | |||
781 | skip_sg: | ||
782 | scsi_req->hdr.sg_list_len_bytes = | ||
783 | cpu_to_be32(skreq->sg_byte_count); | ||
784 | |||
785 | /* Complete resource allocations. */ | ||
786 | skdev->skreq_free_list = skreq->next; | ||
787 | skreq->state = SKD_REQ_STATE_BUSY; | ||
788 | skreq->id += SKD_ID_INCR; | ||
789 | |||
790 | skmsg->length += sizeof(struct skd_scsi_request); | ||
791 | fmh->num_protocol_cmds_coalesced++; | ||
792 | |||
793 | /* | ||
794 | * Update the active request counts. | ||
795 | * Capture the timeout timestamp. | ||
796 | */ | ||
797 | skreq->timeout_stamp = skdev->timeout_stamp; | ||
798 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | ||
799 | skdev->timeout_slot[timo_slot]++; | ||
800 | skdev->in_flight++; | ||
801 | pr_debug("%s:%s:%d req=0x%x busy=%d\n", | ||
802 | skdev->name, __func__, __LINE__, | ||
803 | skreq->id, skdev->in_flight); | ||
804 | |||
805 | /* | ||
806 | * If the FIT msg buffer is full send it. | ||
807 | */ | ||
808 | if (skmsg->length >= SKD_N_FITMSG_BYTES || | ||
809 | fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { | ||
810 | skd_send_fitmsg(skdev, skmsg); | ||
811 | skmsg = NULL; | ||
812 | fmh = NULL; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | /* | ||
817 | * Is a FIT msg in progress? If it is empty put the buffer back | ||
818 | * on the free list. If it is non-empty send what we got. | ||
819 | * This minimizes latency when there are fewer requests than | ||
820 | * what fits in a FIT msg. | ||
821 | */ | ||
822 | if (skmsg != NULL) { | ||
823 | /* Bigger than just a FIT msg header? */ | ||
824 | if (skmsg->length > sizeof(struct fit_msg_hdr)) { | ||
825 | pr_debug("%s:%s:%d sending msg=%p, len %d\n", | ||
826 | skdev->name, __func__, __LINE__, | ||
827 | skmsg, skmsg->length); | ||
828 | skd_send_fitmsg(skdev, skmsg); | ||
829 | } else { | ||
830 | /* | ||
831 | * The FIT msg is empty. It means we got started | ||
832 | * on the msg, but the requests were rejected. | ||
833 | */ | ||
834 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
835 | skmsg->id += SKD_ID_INCR; | ||
836 | skmsg->next = skdev->skmsg_free_list; | ||
837 | skdev->skmsg_free_list = skmsg; | ||
838 | } | ||
839 | skmsg = NULL; | ||
840 | fmh = NULL; | ||
841 | } | ||
842 | |||
843 | /* | ||
844 | * If req is non-NULL it means there is something to do but | ||
845 | * we are out of a resource. | ||
846 | */ | ||
847 | if (req) | ||
848 | blk_stop_queue(skdev->queue); | ||
849 | } | ||
850 | |||
851 | static void skd_end_request(struct skd_device *skdev, | ||
852 | struct skd_request_context *skreq, int error) | ||
853 | { | ||
854 | struct request *req = skreq->req; | ||
855 | unsigned int io_flags = req->cmd_flags; | ||
856 | |||
857 | if ((io_flags & REQ_DISCARD) && | ||
858 | (skreq->discard_page == 1)) { | ||
859 | pr_debug("%s:%s:%d, free the page!", | ||
860 | skdev->name, __func__, __LINE__); | ||
861 | free_page((unsigned long)req->buffer); | ||
862 | req->buffer = NULL; | ||
863 | } | ||
864 | |||
865 | if (unlikely(error)) { | ||
866 | struct request *req = skreq->req; | ||
867 | char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; | ||
868 | u32 lba = (u32)blk_rq_pos(req); | ||
869 | u32 count = blk_rq_sectors(req); | ||
870 | |||
871 | pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", | ||
872 | skd_name(skdev), cmd, lba, count, skreq->id); | ||
873 | } else | ||
874 | pr_debug("%s:%s:%d id=0x%x error=%d\n", | ||
875 | skdev->name, __func__, __LINE__, skreq->id, error); | ||
876 | |||
877 | __blk_end_request_all(skreq->req, error); | ||
878 | } | ||
879 | |||
880 | static int skd_preop_sg_list(struct skd_device *skdev, | ||
881 | struct skd_request_context *skreq) | ||
882 | { | ||
883 | struct request *req = skreq->req; | ||
884 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | ||
885 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | ||
886 | struct scatterlist *sg = &skreq->sg[0]; | ||
887 | int n_sg; | ||
888 | int i; | ||
889 | |||
890 | skreq->sg_byte_count = 0; | ||
891 | |||
892 | /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || | ||
893 | skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ | ||
894 | |||
895 | n_sg = blk_rq_map_sg(skdev->queue, req, sg); | ||
896 | if (n_sg <= 0) | ||
897 | return -EINVAL; | ||
898 | |||
899 | /* | ||
900 | * Map scatterlist to PCI bus addresses. | ||
901 | * Note PCI might change the number of entries. | ||
902 | */ | ||
903 | n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); | ||
904 | if (n_sg <= 0) | ||
905 | return -EINVAL; | ||
906 | |||
907 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); | ||
908 | |||
909 | skreq->n_sg = n_sg; | ||
910 | |||
911 | for (i = 0; i < n_sg; i++) { | ||
912 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | ||
913 | u32 cnt = sg_dma_len(&sg[i]); | ||
914 | uint64_t dma_addr = sg_dma_address(&sg[i]); | ||
915 | |||
916 | sgd->control = FIT_SGD_CONTROL_NOT_LAST; | ||
917 | sgd->byte_count = cnt; | ||
918 | skreq->sg_byte_count += cnt; | ||
919 | sgd->host_side_addr = dma_addr; | ||
920 | sgd->dev_side_addr = 0; | ||
921 | } | ||
922 | |||
923 | skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; | ||
924 | skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; | ||
925 | |||
926 | if (unlikely(skdev->dbg_level > 1)) { | ||
927 | pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", | ||
928 | skdev->name, __func__, __LINE__, | ||
929 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | ||
930 | for (i = 0; i < n_sg; i++) { | ||
931 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | ||
932 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " | ||
933 | "addr=0x%llx next=0x%llx\n", | ||
934 | skdev->name, __func__, __LINE__, | ||
935 | i, sgd->byte_count, sgd->control, | ||
936 | sgd->host_side_addr, sgd->next_desc_ptr); | ||
937 | } | ||
938 | } | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static void skd_postop_sg_list(struct skd_device *skdev, | ||
944 | struct skd_request_context *skreq) | ||
945 | { | ||
946 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | ||
947 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | ||
948 | |||
949 | /* | ||
950 | * restore the next ptr for next IO request so we | ||
951 | * don't have to set it every time. | ||
952 | */ | ||
953 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = | ||
954 | skreq->sksg_dma_address + | ||
955 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); | ||
956 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); | ||
957 | } | ||
958 | |||
959 | static void skd_request_fn_not_online(struct request_queue *q) | ||
960 | { | ||
961 | struct skd_device *skdev = q->queuedata; | ||
962 | int error; | ||
963 | |||
964 | SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); | ||
965 | |||
966 | skd_log_skdev(skdev, "req_not_online"); | ||
967 | switch (skdev->state) { | ||
968 | case SKD_DRVR_STATE_PAUSING: | ||
969 | case SKD_DRVR_STATE_PAUSED: | ||
970 | case SKD_DRVR_STATE_STARTING: | ||
971 | case SKD_DRVR_STATE_RESTARTING: | ||
972 | case SKD_DRVR_STATE_WAIT_BOOT: | ||
973 | /* In case of starting, we haven't started the queue, | ||
974 | * so we can't get here... but requests are | ||
975 | * possibly hanging out waiting for us because we | ||
976 | * reported the dev/skd0 already. They'll wait | ||
977 | * forever if connect doesn't complete. | ||
978 | * What to do??? delay dev/skd0 ?? | ||
979 | */ | ||
980 | case SKD_DRVR_STATE_BUSY: | ||
981 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
982 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
983 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | ||
984 | return; | ||
985 | |||
986 | case SKD_DRVR_STATE_BUSY_SANITIZE: | ||
987 | case SKD_DRVR_STATE_STOPPING: | ||
988 | case SKD_DRVR_STATE_SYNCING: | ||
989 | case SKD_DRVR_STATE_FAULT: | ||
990 | case SKD_DRVR_STATE_DISAPPEARED: | ||
991 | default: | ||
992 | error = -EIO; | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | /* If we get here, terminate all pending block requeusts | ||
997 | * with EIO and any scsi pass thru with appropriate sense | ||
998 | */ | ||
999 | |||
1000 | skd_fail_all_pending(skdev); | ||
1001 | } | ||
1002 | |||
1003 | /* | ||
1004 | ***************************************************************************** | ||
1005 | * TIMER | ||
1006 | ***************************************************************************** | ||
1007 | */ | ||
1008 | |||
1009 | static void skd_timer_tick_not_online(struct skd_device *skdev); | ||
1010 | |||
1011 | static void skd_timer_tick(ulong arg) | ||
1012 | { | ||
1013 | struct skd_device *skdev = (struct skd_device *)arg; | ||
1014 | |||
1015 | u32 timo_slot; | ||
1016 | u32 overdue_timestamp; | ||
1017 | unsigned long reqflags; | ||
1018 | u32 state; | ||
1019 | |||
1020 | if (skdev->state == SKD_DRVR_STATE_FAULT) | ||
1021 | /* The driver has declared fault, and we want it to | ||
1022 | * stay that way until driver is reloaded. | ||
1023 | */ | ||
1024 | return; | ||
1025 | |||
1026 | spin_lock_irqsave(&skdev->lock, reqflags); | ||
1027 | |||
1028 | state = SKD_READL(skdev, FIT_STATUS); | ||
1029 | state &= FIT_SR_DRIVE_STATE_MASK; | ||
1030 | if (state != skdev->drive_state) | ||
1031 | skd_isr_fwstate(skdev); | ||
1032 | |||
1033 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | ||
1034 | skd_timer_tick_not_online(skdev); | ||
1035 | goto timer_func_out; | ||
1036 | } | ||
1037 | skdev->timeout_stamp++; | ||
1038 | timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | ||
1039 | |||
1040 | /* | ||
1041 | * All requests that happened during the previous use of | ||
1042 | * this slot should be done by now. The previous use was | ||
1043 | * over 7 seconds ago. | ||
1044 | */ | ||
1045 | if (skdev->timeout_slot[timo_slot] == 0) | ||
1046 | goto timer_func_out; | ||
1047 | |||
1048 | /* Something is overdue */ | ||
1049 | overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; | ||
1050 | |||
1051 | pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", | ||
1052 | skdev->name, __func__, __LINE__, | ||
1053 | skdev->timeout_slot[timo_slot], skdev->in_flight); | ||
1054 | pr_err("(%s): Overdue IOs (%d), busy %d\n", | ||
1055 | skd_name(skdev), skdev->timeout_slot[timo_slot], | ||
1056 | skdev->in_flight); | ||
1057 | |||
1058 | skdev->timer_countdown = SKD_DRAINING_TIMO; | ||
1059 | skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; | ||
1060 | skdev->timo_slot = timo_slot; | ||
1061 | blk_stop_queue(skdev->queue); | ||
1062 | |||
1063 | timer_func_out: | ||
1064 | mod_timer(&skdev->timer, (jiffies + HZ)); | ||
1065 | |||
1066 | spin_unlock_irqrestore(&skdev->lock, reqflags); | ||
1067 | } | ||
1068 | |||
1069 | static void skd_timer_tick_not_online(struct skd_device *skdev) | ||
1070 | { | ||
1071 | switch (skdev->state) { | ||
1072 | case SKD_DRVR_STATE_IDLE: | ||
1073 | case SKD_DRVR_STATE_LOAD: | ||
1074 | break; | ||
1075 | case SKD_DRVR_STATE_BUSY_SANITIZE: | ||
1076 | pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", | ||
1077 | skdev->name, __func__, __LINE__, | ||
1078 | skdev->drive_state, skdev->state); | ||
1079 | /* If we've been in sanitize for 3 seconds, we figure we're not | ||
1080 | * going to get anymore completions, so recover requests now | ||
1081 | */ | ||
1082 | if (skdev->timer_countdown > 0) { | ||
1083 | skdev->timer_countdown--; | ||
1084 | return; | ||
1085 | } | ||
1086 | skd_recover_requests(skdev, 0); | ||
1087 | break; | ||
1088 | |||
1089 | case SKD_DRVR_STATE_BUSY: | ||
1090 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
1091 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
1092 | pr_debug("%s:%s:%d busy[%x], countdown=%d\n", | ||
1093 | skdev->name, __func__, __LINE__, | ||
1094 | skdev->state, skdev->timer_countdown); | ||
1095 | if (skdev->timer_countdown > 0) { | ||
1096 | skdev->timer_countdown--; | ||
1097 | return; | ||
1098 | } | ||
1099 | pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", | ||
1100 | skdev->name, __func__, __LINE__, | ||
1101 | skdev->state, skdev->timer_countdown); | ||
1102 | skd_restart_device(skdev); | ||
1103 | break; | ||
1104 | |||
1105 | case SKD_DRVR_STATE_WAIT_BOOT: | ||
1106 | case SKD_DRVR_STATE_STARTING: | ||
1107 | if (skdev->timer_countdown > 0) { | ||
1108 | skdev->timer_countdown--; | ||
1109 | return; | ||
1110 | } | ||
1111 | /* For now, we fault the drive. Could attempt resets to | ||
1112 | * revcover at some point. */ | ||
1113 | skdev->state = SKD_DRVR_STATE_FAULT; | ||
1114 | |||
1115 | pr_err("(%s): DriveFault Connect Timeout (%x)\n", | ||
1116 | skd_name(skdev), skdev->drive_state); | ||
1117 | |||
1118 | /*start the queue so we can respond with error to requests */ | ||
1119 | /* wakeup anyone waiting for startup complete */ | ||
1120 | blk_start_queue(skdev->queue); | ||
1121 | skdev->gendisk_on = -1; | ||
1122 | wake_up_interruptible(&skdev->waitq); | ||
1123 | break; | ||
1124 | |||
1125 | case SKD_DRVR_STATE_ONLINE: | ||
1126 | /* shouldn't get here. */ | ||
1127 | break; | ||
1128 | |||
1129 | case SKD_DRVR_STATE_PAUSING: | ||
1130 | case SKD_DRVR_STATE_PAUSED: | ||
1131 | break; | ||
1132 | |||
1133 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | ||
1134 | pr_debug("%s:%s:%d " | ||
1135 | "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", | ||
1136 | skdev->name, __func__, __LINE__, | ||
1137 | skdev->timo_slot, | ||
1138 | skdev->timer_countdown, | ||
1139 | skdev->in_flight, | ||
1140 | skdev->timeout_slot[skdev->timo_slot]); | ||
1141 | /* if the slot has cleared we can let the I/O continue */ | ||
1142 | if (skdev->timeout_slot[skdev->timo_slot] == 0) { | ||
1143 | pr_debug("%s:%s:%d Slot drained, starting queue.\n", | ||
1144 | skdev->name, __func__, __LINE__); | ||
1145 | skdev->state = SKD_DRVR_STATE_ONLINE; | ||
1146 | blk_start_queue(skdev->queue); | ||
1147 | return; | ||
1148 | } | ||
1149 | if (skdev->timer_countdown > 0) { | ||
1150 | skdev->timer_countdown--; | ||
1151 | return; | ||
1152 | } | ||
1153 | skd_restart_device(skdev); | ||
1154 | break; | ||
1155 | |||
1156 | case SKD_DRVR_STATE_RESTARTING: | ||
1157 | if (skdev->timer_countdown > 0) { | ||
1158 | skdev->timer_countdown--; | ||
1159 | return; | ||
1160 | } | ||
1161 | /* For now, we fault the drive. Could attempt resets to | ||
1162 | * revcover at some point. */ | ||
1163 | skdev->state = SKD_DRVR_STATE_FAULT; | ||
1164 | pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", | ||
1165 | skd_name(skdev), skdev->drive_state); | ||
1166 | |||
1167 | /* | ||
1168 | * Recovering does two things: | ||
1169 | * 1. completes IO with error | ||
1170 | * 2. reclaims dma resources | ||
1171 | * When is it safe to recover requests? | ||
1172 | * - if the drive state is faulted | ||
1173 | * - if the state is still soft reset after out timeout | ||
1174 | * - if the drive registers are dead (state = FF) | ||
1175 | * If it is "unsafe", we still need to recover, so we will | ||
1176 | * disable pci bus mastering and disable our interrupts. | ||
1177 | */ | ||
1178 | |||
1179 | if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || | ||
1180 | (skdev->drive_state == FIT_SR_DRIVE_FAULT) || | ||
1181 | (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) | ||
1182 | /* It never came out of soft reset. Try to | ||
1183 | * recover the requests and then let them | ||
1184 | * fail. This is to mitigate hung processes. */ | ||
1185 | skd_recover_requests(skdev, 0); | ||
1186 | else { | ||
1187 | pr_err("(%s): Disable BusMaster (%x)\n", | ||
1188 | skd_name(skdev), skdev->drive_state); | ||
1189 | pci_disable_device(skdev->pdev); | ||
1190 | skd_disable_interrupts(skdev); | ||
1191 | skd_recover_requests(skdev, 0); | ||
1192 | } | ||
1193 | |||
1194 | /*start the queue so we can respond with error to requests */ | ||
1195 | /* wakeup anyone waiting for startup complete */ | ||
1196 | blk_start_queue(skdev->queue); | ||
1197 | skdev->gendisk_on = -1; | ||
1198 | wake_up_interruptible(&skdev->waitq); | ||
1199 | break; | ||
1200 | |||
1201 | case SKD_DRVR_STATE_RESUMING: | ||
1202 | case SKD_DRVR_STATE_STOPPING: | ||
1203 | case SKD_DRVR_STATE_SYNCING: | ||
1204 | case SKD_DRVR_STATE_FAULT: | ||
1205 | case SKD_DRVR_STATE_DISAPPEARED: | ||
1206 | default: | ||
1207 | break; | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | static int skd_start_timer(struct skd_device *skdev) | ||
1212 | { | ||
1213 | int rc; | ||
1214 | |||
1215 | init_timer(&skdev->timer); | ||
1216 | setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); | ||
1217 | |||
1218 | rc = mod_timer(&skdev->timer, (jiffies + HZ)); | ||
1219 | if (rc) | ||
1220 | pr_err("%s: failed to start timer %d\n", | ||
1221 | __func__, rc); | ||
1222 | return rc; | ||
1223 | } | ||
1224 | |||
1225 | static void skd_kill_timer(struct skd_device *skdev) | ||
1226 | { | ||
1227 | del_timer_sync(&skdev->timer); | ||
1228 | } | ||
1229 | |||
1230 | /* | ||
1231 | ***************************************************************************** | ||
1232 | * IOCTL | ||
1233 | ***************************************************************************** | ||
1234 | */ | ||
1235 | static int skd_ioctl_sg_io(struct skd_device *skdev, | ||
1236 | fmode_t mode, void __user *argp); | ||
1237 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | ||
1238 | struct skd_sg_io *sksgio); | ||
1239 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | ||
1240 | struct skd_sg_io *sksgio); | ||
1241 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | ||
1242 | struct skd_sg_io *sksgio); | ||
1243 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | ||
1244 | struct skd_sg_io *sksgio, int dxfer_dir); | ||
1245 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | ||
1246 | struct skd_sg_io *sksgio); | ||
1247 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); | ||
1248 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | ||
1249 | struct skd_sg_io *sksgio); | ||
1250 | static int skd_sg_io_put_status(struct skd_device *skdev, | ||
1251 | struct skd_sg_io *sksgio); | ||
1252 | |||
1253 | static void skd_complete_special(struct skd_device *skdev, | ||
1254 | volatile struct fit_completion_entry_v1 | ||
1255 | *skcomp, | ||
1256 | volatile struct fit_comp_error_info *skerr, | ||
1257 | struct skd_special_context *skspcl); | ||
1258 | |||
1259 | static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, | ||
1260 | uint cmd_in, ulong arg) | ||
1261 | { | ||
1262 | int rc = 0; | ||
1263 | struct gendisk *disk = bdev->bd_disk; | ||
1264 | struct skd_device *skdev = disk->private_data; | ||
1265 | void __user *p = (void *)arg; | ||
1266 | |||
1267 | pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", | ||
1268 | skdev->name, __func__, __LINE__, | ||
1269 | disk->disk_name, current->comm, mode, cmd_in, arg); | ||
1270 | |||
1271 | if (!capable(CAP_SYS_ADMIN)) | ||
1272 | return -EPERM; | ||
1273 | |||
1274 | switch (cmd_in) { | ||
1275 | case SG_SET_TIMEOUT: | ||
1276 | case SG_GET_TIMEOUT: | ||
1277 | case SG_GET_VERSION_NUM: | ||
1278 | rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); | ||
1279 | break; | ||
1280 | case SG_IO: | ||
1281 | rc = skd_ioctl_sg_io(skdev, mode, p); | ||
1282 | break; | ||
1283 | |||
1284 | default: | ||
1285 | rc = -ENOTTY; | ||
1286 | break; | ||
1287 | } | ||
1288 | |||
1289 | pr_debug("%s:%s:%d %s: completion rc %d\n", | ||
1290 | skdev->name, __func__, __LINE__, disk->disk_name, rc); | ||
1291 | return rc; | ||
1292 | } | ||
1293 | |||
1294 | static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, | ||
1295 | void __user *argp) | ||
1296 | { | ||
1297 | int rc; | ||
1298 | struct skd_sg_io sksgio; | ||
1299 | |||
1300 | memset(&sksgio, 0, sizeof(sksgio)); | ||
1301 | sksgio.mode = mode; | ||
1302 | sksgio.argp = argp; | ||
1303 | sksgio.iov = &sksgio.no_iov_iov; | ||
1304 | |||
1305 | switch (skdev->state) { | ||
1306 | case SKD_DRVR_STATE_ONLINE: | ||
1307 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
1308 | break; | ||
1309 | |||
1310 | default: | ||
1311 | pr_debug("%s:%s:%d drive not online\n", | ||
1312 | skdev->name, __func__, __LINE__); | ||
1313 | rc = -ENXIO; | ||
1314 | goto out; | ||
1315 | } | ||
1316 | |||
1317 | rc = skd_sg_io_get_and_check_args(skdev, &sksgio); | ||
1318 | if (rc) | ||
1319 | goto out; | ||
1320 | |||
1321 | rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); | ||
1322 | if (rc) | ||
1323 | goto out; | ||
1324 | |||
1325 | rc = skd_sg_io_prep_buffering(skdev, &sksgio); | ||
1326 | if (rc) | ||
1327 | goto out; | ||
1328 | |||
1329 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); | ||
1330 | if (rc) | ||
1331 | goto out; | ||
1332 | |||
1333 | rc = skd_sg_io_send_fitmsg(skdev, &sksgio); | ||
1334 | if (rc) | ||
1335 | goto out; | ||
1336 | |||
1337 | rc = skd_sg_io_await(skdev, &sksgio); | ||
1338 | if (rc) | ||
1339 | goto out; | ||
1340 | |||
1341 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); | ||
1342 | if (rc) | ||
1343 | goto out; | ||
1344 | |||
1345 | rc = skd_sg_io_put_status(skdev, &sksgio); | ||
1346 | if (rc) | ||
1347 | goto out; | ||
1348 | |||
1349 | rc = 0; | ||
1350 | |||
1351 | out: | ||
1352 | skd_sg_io_release_skspcl(skdev, &sksgio); | ||
1353 | |||
1354 | if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) | ||
1355 | kfree(sksgio.iov); | ||
1356 | return rc; | ||
1357 | } | ||
1358 | |||
1359 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | ||
1360 | struct skd_sg_io *sksgio) | ||
1361 | { | ||
1362 | struct sg_io_hdr *sgp = &sksgio->sg; | ||
1363 | int i, acc; | ||
1364 | |||
1365 | if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { | ||
1366 | pr_debug("%s:%s:%d access sg failed %p\n", | ||
1367 | skdev->name, __func__, __LINE__, sksgio->argp); | ||
1368 | return -EFAULT; | ||
1369 | } | ||
1370 | |||
1371 | if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { | ||
1372 | pr_debug("%s:%s:%d copy_from_user sg failed %p\n", | ||
1373 | skdev->name, __func__, __LINE__, sksgio->argp); | ||
1374 | return -EFAULT; | ||
1375 | } | ||
1376 | |||
1377 | if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { | ||
1378 | pr_debug("%s:%s:%d interface_id invalid 0x%x\n", | ||
1379 | skdev->name, __func__, __LINE__, sgp->interface_id); | ||
1380 | return -EINVAL; | ||
1381 | } | ||
1382 | |||
1383 | if (sgp->cmd_len > sizeof(sksgio->cdb)) { | ||
1384 | pr_debug("%s:%s:%d cmd_len invalid %d\n", | ||
1385 | skdev->name, __func__, __LINE__, sgp->cmd_len); | ||
1386 | return -EINVAL; | ||
1387 | } | ||
1388 | |||
1389 | if (sgp->iovec_count > 256) { | ||
1390 | pr_debug("%s:%s:%d iovec_count invalid %d\n", | ||
1391 | skdev->name, __func__, __LINE__, sgp->iovec_count); | ||
1392 | return -EINVAL; | ||
1393 | } | ||
1394 | |||
1395 | if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { | ||
1396 | pr_debug("%s:%s:%d dxfer_len invalid %d\n", | ||
1397 | skdev->name, __func__, __LINE__, sgp->dxfer_len); | ||
1398 | return -EINVAL; | ||
1399 | } | ||
1400 | |||
1401 | switch (sgp->dxfer_direction) { | ||
1402 | case SG_DXFER_NONE: | ||
1403 | acc = -1; | ||
1404 | break; | ||
1405 | |||
1406 | case SG_DXFER_TO_DEV: | ||
1407 | acc = VERIFY_READ; | ||
1408 | break; | ||
1409 | |||
1410 | case SG_DXFER_FROM_DEV: | ||
1411 | case SG_DXFER_TO_FROM_DEV: | ||
1412 | acc = VERIFY_WRITE; | ||
1413 | break; | ||
1414 | |||
1415 | default: | ||
1416 | pr_debug("%s:%s:%d dxfer_dir invalid %d\n", | ||
1417 | skdev->name, __func__, __LINE__, sgp->dxfer_direction); | ||
1418 | return -EINVAL; | ||
1419 | } | ||
1420 | |||
1421 | if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { | ||
1422 | pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", | ||
1423 | skdev->name, __func__, __LINE__, sgp->cmdp); | ||
1424 | return -EFAULT; | ||
1425 | } | ||
1426 | |||
1427 | if (sgp->mx_sb_len != 0) { | ||
1428 | if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { | ||
1429 | pr_debug("%s:%s:%d access sbp failed %p\n", | ||
1430 | skdev->name, __func__, __LINE__, sgp->sbp); | ||
1431 | return -EFAULT; | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | if (sgp->iovec_count == 0) { | ||
1436 | sksgio->iov[0].iov_base = sgp->dxferp; | ||
1437 | sksgio->iov[0].iov_len = sgp->dxfer_len; | ||
1438 | sksgio->iovcnt = 1; | ||
1439 | sksgio->dxfer_len = sgp->dxfer_len; | ||
1440 | } else { | ||
1441 | struct sg_iovec *iov; | ||
1442 | uint nbytes = sizeof(*iov) * sgp->iovec_count; | ||
1443 | size_t iov_data_len; | ||
1444 | |||
1445 | iov = kmalloc(nbytes, GFP_KERNEL); | ||
1446 | if (iov == NULL) { | ||
1447 | pr_debug("%s:%s:%d alloc iovec failed %d\n", | ||
1448 | skdev->name, __func__, __LINE__, | ||
1449 | sgp->iovec_count); | ||
1450 | return -ENOMEM; | ||
1451 | } | ||
1452 | sksgio->iov = iov; | ||
1453 | sksgio->iovcnt = sgp->iovec_count; | ||
1454 | |||
1455 | if (copy_from_user(iov, sgp->dxferp, nbytes)) { | ||
1456 | pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", | ||
1457 | skdev->name, __func__, __LINE__, sgp->dxferp); | ||
1458 | return -EFAULT; | ||
1459 | } | ||
1460 | |||
1461 | /* | ||
1462 | * Sum up the vecs, making sure they don't overflow | ||
1463 | */ | ||
1464 | iov_data_len = 0; | ||
1465 | for (i = 0; i < sgp->iovec_count; i++) { | ||
1466 | if (iov_data_len + iov[i].iov_len < iov_data_len) | ||
1467 | return -EINVAL; | ||
1468 | iov_data_len += iov[i].iov_len; | ||
1469 | } | ||
1470 | |||
1471 | /* SG_IO howto says that the shorter of the two wins */ | ||
1472 | if (sgp->dxfer_len < iov_data_len) { | ||
1473 | sksgio->iovcnt = iov_shorten((struct iovec *)iov, | ||
1474 | sgp->iovec_count, | ||
1475 | sgp->dxfer_len); | ||
1476 | sksgio->dxfer_len = sgp->dxfer_len; | ||
1477 | } else | ||
1478 | sksgio->dxfer_len = iov_data_len; | ||
1479 | } | ||
1480 | |||
1481 | if (sgp->dxfer_direction != SG_DXFER_NONE) { | ||
1482 | struct sg_iovec *iov = sksgio->iov; | ||
1483 | for (i = 0; i < sksgio->iovcnt; i++, iov++) { | ||
1484 | if (!access_ok(acc, iov->iov_base, iov->iov_len)) { | ||
1485 | pr_debug("%s:%s:%d access data failed %p/%d\n", | ||
1486 | skdev->name, __func__, __LINE__, | ||
1487 | iov->iov_base, (int)iov->iov_len); | ||
1488 | return -EFAULT; | ||
1489 | } | ||
1490 | } | ||
1491 | } | ||
1492 | |||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | ||
1497 | struct skd_sg_io *sksgio) | ||
1498 | { | ||
1499 | struct skd_special_context *skspcl = NULL; | ||
1500 | int rc; | ||
1501 | |||
1502 | for (;;) { | ||
1503 | ulong flags; | ||
1504 | |||
1505 | spin_lock_irqsave(&skdev->lock, flags); | ||
1506 | skspcl = skdev->skspcl_free_list; | ||
1507 | if (skspcl != NULL) { | ||
1508 | skdev->skspcl_free_list = | ||
1509 | (struct skd_special_context *)skspcl->req.next; | ||
1510 | skspcl->req.id += SKD_ID_INCR; | ||
1511 | skspcl->req.state = SKD_REQ_STATE_SETUP; | ||
1512 | skspcl->orphaned = 0; | ||
1513 | skspcl->req.n_sg = 0; | ||
1514 | } | ||
1515 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
1516 | |||
1517 | if (skspcl != NULL) { | ||
1518 | rc = 0; | ||
1519 | break; | ||
1520 | } | ||
1521 | |||
1522 | pr_debug("%s:%s:%d blocking\n", | ||
1523 | skdev->name, __func__, __LINE__); | ||
1524 | |||
1525 | rc = wait_event_interruptible_timeout( | ||
1526 | skdev->waitq, | ||
1527 | (skdev->skspcl_free_list != NULL), | ||
1528 | msecs_to_jiffies(sksgio->sg.timeout)); | ||
1529 | |||
1530 | pr_debug("%s:%s:%d unblocking, rc=%d\n", | ||
1531 | skdev->name, __func__, __LINE__, rc); | ||
1532 | |||
1533 | if (rc <= 0) { | ||
1534 | if (rc == 0) | ||
1535 | rc = -ETIMEDOUT; | ||
1536 | else | ||
1537 | rc = -EINTR; | ||
1538 | break; | ||
1539 | } | ||
1540 | /* | ||
1541 | * If we get here rc > 0 meaning the timeout to | ||
1542 | * wait_event_interruptible_timeout() had time left, hence the | ||
1543 | * sought event -- non-empty free list -- happened. | ||
1544 | * Retry the allocation. | ||
1545 | */ | ||
1546 | } | ||
1547 | sksgio->skspcl = skspcl; | ||
1548 | |||
1549 | return rc; | ||
1550 | } | ||
1551 | |||
1552 | static int skd_skreq_prep_buffering(struct skd_device *skdev, | ||
1553 | struct skd_request_context *skreq, | ||
1554 | u32 dxfer_len) | ||
1555 | { | ||
1556 | u32 resid = dxfer_len; | ||
1557 | |||
1558 | /* | ||
1559 | * The DMA engine must have aligned addresses and byte counts. | ||
1560 | */ | ||
1561 | resid += (-resid) & 3; | ||
1562 | skreq->sg_byte_count = resid; | ||
1563 | |||
1564 | skreq->n_sg = 0; | ||
1565 | |||
1566 | while (resid > 0) { | ||
1567 | u32 nbytes = PAGE_SIZE; | ||
1568 | u32 ix = skreq->n_sg; | ||
1569 | struct scatterlist *sg = &skreq->sg[ix]; | ||
1570 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | ||
1571 | struct page *page; | ||
1572 | |||
1573 | if (nbytes > resid) | ||
1574 | nbytes = resid; | ||
1575 | |||
1576 | page = alloc_page(GFP_KERNEL); | ||
1577 | if (page == NULL) | ||
1578 | return -ENOMEM; | ||
1579 | |||
1580 | sg_set_page(sg, page, nbytes, 0); | ||
1581 | |||
1582 | /* TODO: This should be going through a pci_???() | ||
1583 | * routine to do proper mapping. */ | ||
1584 | sksg->control = FIT_SGD_CONTROL_NOT_LAST; | ||
1585 | sksg->byte_count = nbytes; | ||
1586 | |||
1587 | sksg->host_side_addr = sg_phys(sg); | ||
1588 | |||
1589 | sksg->dev_side_addr = 0; | ||
1590 | sksg->next_desc_ptr = skreq->sksg_dma_address + | ||
1591 | (ix + 1) * sizeof(*sksg); | ||
1592 | |||
1593 | skreq->n_sg++; | ||
1594 | resid -= nbytes; | ||
1595 | } | ||
1596 | |||
1597 | if (skreq->n_sg > 0) { | ||
1598 | u32 ix = skreq->n_sg - 1; | ||
1599 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | ||
1600 | |||
1601 | sksg->control = FIT_SGD_CONTROL_LAST; | ||
1602 | sksg->next_desc_ptr = 0; | ||
1603 | } | ||
1604 | |||
1605 | if (unlikely(skdev->dbg_level > 1)) { | ||
1606 | u32 i; | ||
1607 | |||
1608 | pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", | ||
1609 | skdev->name, __func__, __LINE__, | ||
1610 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | ||
1611 | for (i = 0; i < skreq->n_sg; i++) { | ||
1612 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | ||
1613 | |||
1614 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " | ||
1615 | "addr=0x%llx next=0x%llx\n", | ||
1616 | skdev->name, __func__, __LINE__, | ||
1617 | i, sgd->byte_count, sgd->control, | ||
1618 | sgd->host_side_addr, sgd->next_desc_ptr); | ||
1619 | } | ||
1620 | } | ||
1621 | |||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1625 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | ||
1626 | struct skd_sg_io *sksgio) | ||
1627 | { | ||
1628 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1629 | struct skd_request_context *skreq = &skspcl->req; | ||
1630 | u32 dxfer_len = sksgio->dxfer_len; | ||
1631 | int rc; | ||
1632 | |||
1633 | rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); | ||
1634 | /* | ||
1635 | * Eventually, errors or not, skd_release_special() is called | ||
1636 | * to recover allocations including partial allocations. | ||
1637 | */ | ||
1638 | return rc; | ||
1639 | } | ||
1640 | |||
1641 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | ||
1642 | struct skd_sg_io *sksgio, int dxfer_dir) | ||
1643 | { | ||
1644 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1645 | u32 iov_ix = 0; | ||
1646 | struct sg_iovec curiov; | ||
1647 | u32 sksg_ix = 0; | ||
1648 | u8 *bufp = NULL; | ||
1649 | u32 buf_len = 0; | ||
1650 | u32 resid = sksgio->dxfer_len; | ||
1651 | int rc; | ||
1652 | |||
1653 | curiov.iov_len = 0; | ||
1654 | curiov.iov_base = NULL; | ||
1655 | |||
1656 | if (dxfer_dir != sksgio->sg.dxfer_direction) { | ||
1657 | if (dxfer_dir != SG_DXFER_TO_DEV || | ||
1658 | sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) | ||
1659 | return 0; | ||
1660 | } | ||
1661 | |||
1662 | while (resid > 0) { | ||
1663 | u32 nbytes = PAGE_SIZE; | ||
1664 | |||
1665 | if (curiov.iov_len == 0) { | ||
1666 | curiov = sksgio->iov[iov_ix++]; | ||
1667 | continue; | ||
1668 | } | ||
1669 | |||
1670 | if (buf_len == 0) { | ||
1671 | struct page *page; | ||
1672 | page = sg_page(&skspcl->req.sg[sksg_ix++]); | ||
1673 | bufp = page_address(page); | ||
1674 | buf_len = PAGE_SIZE; | ||
1675 | } | ||
1676 | |||
1677 | nbytes = min_t(u32, nbytes, resid); | ||
1678 | nbytes = min_t(u32, nbytes, curiov.iov_len); | ||
1679 | nbytes = min_t(u32, nbytes, buf_len); | ||
1680 | |||
1681 | if (dxfer_dir == SG_DXFER_TO_DEV) | ||
1682 | rc = __copy_from_user(bufp, curiov.iov_base, nbytes); | ||
1683 | else | ||
1684 | rc = __copy_to_user(curiov.iov_base, bufp, nbytes); | ||
1685 | |||
1686 | if (rc) | ||
1687 | return -EFAULT; | ||
1688 | |||
1689 | resid -= nbytes; | ||
1690 | curiov.iov_len -= nbytes; | ||
1691 | curiov.iov_base += nbytes; | ||
1692 | buf_len -= nbytes; | ||
1693 | } | ||
1694 | |||
1695 | return 0; | ||
1696 | } | ||
1697 | |||
1698 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | ||
1699 | struct skd_sg_io *sksgio) | ||
1700 | { | ||
1701 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1702 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | ||
1703 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | ||
1704 | |||
1705 | memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); | ||
1706 | |||
1707 | /* Initialize the FIT msg header */ | ||
1708 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | ||
1709 | fmh->num_protocol_cmds_coalesced = 1; | ||
1710 | |||
1711 | /* Initialize the SCSI request */ | ||
1712 | if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) | ||
1713 | scsi_req->hdr.sg_list_dma_address = | ||
1714 | cpu_to_be64(skspcl->req.sksg_dma_address); | ||
1715 | scsi_req->hdr.tag = skspcl->req.id; | ||
1716 | scsi_req->hdr.sg_list_len_bytes = | ||
1717 | cpu_to_be32(skspcl->req.sg_byte_count); | ||
1718 | memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); | ||
1719 | |||
1720 | skspcl->req.state = SKD_REQ_STATE_BUSY; | ||
1721 | skd_send_special_fitmsg(skdev, skspcl); | ||
1722 | |||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) | ||
1727 | { | ||
1728 | unsigned long flags; | ||
1729 | int rc; | ||
1730 | |||
1731 | rc = wait_event_interruptible_timeout(skdev->waitq, | ||
1732 | (sksgio->skspcl->req.state != | ||
1733 | SKD_REQ_STATE_BUSY), | ||
1734 | msecs_to_jiffies(sksgio->sg. | ||
1735 | timeout)); | ||
1736 | |||
1737 | spin_lock_irqsave(&skdev->lock, flags); | ||
1738 | |||
1739 | if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { | ||
1740 | pr_debug("%s:%s:%d skspcl %p aborted\n", | ||
1741 | skdev->name, __func__, __LINE__, sksgio->skspcl); | ||
1742 | |||
1743 | /* Build check cond, sense and let command finish. */ | ||
1744 | /* For a timeout, we must fabricate completion and sense | ||
1745 | * data to complete the command */ | ||
1746 | sksgio->skspcl->req.completion.status = | ||
1747 | SAM_STAT_CHECK_CONDITION; | ||
1748 | |||
1749 | memset(&sksgio->skspcl->req.err_info, 0, | ||
1750 | sizeof(sksgio->skspcl->req.err_info)); | ||
1751 | sksgio->skspcl->req.err_info.type = 0x70; | ||
1752 | sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; | ||
1753 | sksgio->skspcl->req.err_info.code = 0x44; | ||
1754 | sksgio->skspcl->req.err_info.qual = 0; | ||
1755 | rc = 0; | ||
1756 | } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) | ||
1757 | /* No longer on the adapter. We finish. */ | ||
1758 | rc = 0; | ||
1759 | else { | ||
1760 | /* Something's gone wrong. Still busy. Timeout or | ||
1761 | * user interrupted (control-C). Mark as an orphan | ||
1762 | * so it will be disposed when completed. */ | ||
1763 | sksgio->skspcl->orphaned = 1; | ||
1764 | sksgio->skspcl = NULL; | ||
1765 | if (rc == 0) { | ||
1766 | pr_debug("%s:%s:%d timed out %p (%u ms)\n", | ||
1767 | skdev->name, __func__, __LINE__, | ||
1768 | sksgio, sksgio->sg.timeout); | ||
1769 | rc = -ETIMEDOUT; | ||
1770 | } else { | ||
1771 | pr_debug("%s:%s:%d cntlc %p\n", | ||
1772 | skdev->name, __func__, __LINE__, sksgio); | ||
1773 | rc = -EINTR; | ||
1774 | } | ||
1775 | } | ||
1776 | |||
1777 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
1778 | |||
1779 | return rc; | ||
1780 | } | ||
1781 | |||
1782 | static int skd_sg_io_put_status(struct skd_device *skdev, | ||
1783 | struct skd_sg_io *sksgio) | ||
1784 | { | ||
1785 | struct sg_io_hdr *sgp = &sksgio->sg; | ||
1786 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1787 | int resid = 0; | ||
1788 | |||
1789 | u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); | ||
1790 | |||
1791 | sgp->status = skspcl->req.completion.status; | ||
1792 | resid = sksgio->dxfer_len - nb; | ||
1793 | |||
1794 | sgp->masked_status = sgp->status & STATUS_MASK; | ||
1795 | sgp->msg_status = 0; | ||
1796 | sgp->host_status = 0; | ||
1797 | sgp->driver_status = 0; | ||
1798 | sgp->resid = resid; | ||
1799 | if (sgp->masked_status || sgp->host_status || sgp->driver_status) | ||
1800 | sgp->info |= SG_INFO_CHECK; | ||
1801 | |||
1802 | pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", | ||
1803 | skdev->name, __func__, __LINE__, | ||
1804 | sgp->status, sgp->masked_status, sgp->resid); | ||
1805 | |||
1806 | if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { | ||
1807 | if (sgp->mx_sb_len > 0) { | ||
1808 | struct fit_comp_error_info *ei = &skspcl->req.err_info; | ||
1809 | u32 nbytes = sizeof(*ei); | ||
1810 | |||
1811 | nbytes = min_t(u32, nbytes, sgp->mx_sb_len); | ||
1812 | |||
1813 | sgp->sb_len_wr = nbytes; | ||
1814 | |||
1815 | if (__copy_to_user(sgp->sbp, ei, nbytes)) { | ||
1816 | pr_debug("%s:%s:%d copy_to_user sense failed %p\n", | ||
1817 | skdev->name, __func__, __LINE__, | ||
1818 | sgp->sbp); | ||
1819 | return -EFAULT; | ||
1820 | } | ||
1821 | } | ||
1822 | } | ||
1823 | |||
1824 | if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { | ||
1825 | pr_debug("%s:%s:%d copy_to_user sg failed %p\n", | ||
1826 | skdev->name, __func__, __LINE__, sksgio->argp); | ||
1827 | return -EFAULT; | ||
1828 | } | ||
1829 | |||
1830 | return 0; | ||
1831 | } | ||
1832 | |||
1833 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | ||
1834 | struct skd_sg_io *sksgio) | ||
1835 | { | ||
1836 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1837 | |||
1838 | if (skspcl != NULL) { | ||
1839 | ulong flags; | ||
1840 | |||
1841 | sksgio->skspcl = NULL; | ||
1842 | |||
1843 | spin_lock_irqsave(&skdev->lock, flags); | ||
1844 | skd_release_special(skdev, skspcl); | ||
1845 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
1846 | } | ||
1847 | |||
1848 | return 0; | ||
1849 | } | ||
1850 | |||
1851 | /* | ||
1852 | ***************************************************************************** | ||
1853 | * INTERNAL REQUESTS -- generated by driver itself | ||
1854 | ***************************************************************************** | ||
1855 | */ | ||
1856 | |||
1857 | static int skd_format_internal_skspcl(struct skd_device *skdev) | ||
1858 | { | ||
1859 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | ||
1860 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | ||
1861 | struct fit_msg_hdr *fmh; | ||
1862 | uint64_t dma_address; | ||
1863 | struct skd_scsi_request *scsi; | ||
1864 | |||
1865 | fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; | ||
1866 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | ||
1867 | fmh->num_protocol_cmds_coalesced = 1; | ||
1868 | |||
1869 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | ||
1870 | memset(scsi, 0, sizeof(*scsi)); | ||
1871 | dma_address = skspcl->req.sksg_dma_address; | ||
1872 | scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); | ||
1873 | sgd->control = FIT_SGD_CONTROL_LAST; | ||
1874 | sgd->byte_count = 0; | ||
1875 | sgd->host_side_addr = skspcl->db_dma_address; | ||
1876 | sgd->dev_side_addr = 0; | ||
1877 | sgd->next_desc_ptr = 0LL; | ||
1878 | |||
1879 | return 1; | ||
1880 | } | ||
1881 | |||
1882 | #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES | ||
1883 | |||
1884 | static void skd_send_internal_skspcl(struct skd_device *skdev, | ||
1885 | struct skd_special_context *skspcl, | ||
1886 | u8 opcode) | ||
1887 | { | ||
1888 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | ||
1889 | struct skd_scsi_request *scsi; | ||
1890 | unsigned char *buf = skspcl->data_buf; | ||
1891 | int i; | ||
1892 | |||
1893 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) | ||
1894 | /* | ||
1895 | * A refresh is already in progress. | ||
1896 | * Just wait for it to finish. | ||
1897 | */ | ||
1898 | return; | ||
1899 | |||
1900 | SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); | ||
1901 | skspcl->req.state = SKD_REQ_STATE_BUSY; | ||
1902 | skspcl->req.id += SKD_ID_INCR; | ||
1903 | |||
1904 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | ||
1905 | scsi->hdr.tag = skspcl->req.id; | ||
1906 | |||
1907 | memset(scsi->cdb, 0, sizeof(scsi->cdb)); | ||
1908 | |||
1909 | switch (opcode) { | ||
1910 | case TEST_UNIT_READY: | ||
1911 | scsi->cdb[0] = TEST_UNIT_READY; | ||
1912 | sgd->byte_count = 0; | ||
1913 | scsi->hdr.sg_list_len_bytes = 0; | ||
1914 | break; | ||
1915 | |||
1916 | case READ_CAPACITY: | ||
1917 | scsi->cdb[0] = READ_CAPACITY; | ||
1918 | sgd->byte_count = SKD_N_READ_CAP_BYTES; | ||
1919 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1920 | break; | ||
1921 | |||
1922 | case INQUIRY: | ||
1923 | scsi->cdb[0] = INQUIRY; | ||
1924 | scsi->cdb[1] = 0x01; /* evpd */ | ||
1925 | scsi->cdb[2] = 0x80; /* serial number page */ | ||
1926 | scsi->cdb[4] = 0x10; | ||
1927 | sgd->byte_count = 16; | ||
1928 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1929 | break; | ||
1930 | |||
1931 | case SYNCHRONIZE_CACHE: | ||
1932 | scsi->cdb[0] = SYNCHRONIZE_CACHE; | ||
1933 | sgd->byte_count = 0; | ||
1934 | scsi->hdr.sg_list_len_bytes = 0; | ||
1935 | break; | ||
1936 | |||
1937 | case WRITE_BUFFER: | ||
1938 | scsi->cdb[0] = WRITE_BUFFER; | ||
1939 | scsi->cdb[1] = 0x02; | ||
1940 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | ||
1941 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | ||
1942 | sgd->byte_count = WR_BUF_SIZE; | ||
1943 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1944 | /* fill incrementing byte pattern */ | ||
1945 | for (i = 0; i < sgd->byte_count; i++) | ||
1946 | buf[i] = i & 0xFF; | ||
1947 | break; | ||
1948 | |||
1949 | case READ_BUFFER: | ||
1950 | scsi->cdb[0] = READ_BUFFER; | ||
1951 | scsi->cdb[1] = 0x02; | ||
1952 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | ||
1953 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | ||
1954 | sgd->byte_count = WR_BUF_SIZE; | ||
1955 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1956 | memset(skspcl->data_buf, 0, sgd->byte_count); | ||
1957 | break; | ||
1958 | |||
1959 | default: | ||
1960 | SKD_ASSERT("Don't know what to send"); | ||
1961 | return; | ||
1962 | |||
1963 | } | ||
1964 | skd_send_special_fitmsg(skdev, skspcl); | ||
1965 | } | ||
1966 | |||
1967 | static void skd_refresh_device_data(struct skd_device *skdev) | ||
1968 | { | ||
1969 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | ||
1970 | |||
1971 | skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); | ||
1972 | } | ||
1973 | |||
1974 | static int skd_chk_read_buf(struct skd_device *skdev, | ||
1975 | struct skd_special_context *skspcl) | ||
1976 | { | ||
1977 | unsigned char *buf = skspcl->data_buf; | ||
1978 | int i; | ||
1979 | |||
1980 | /* check for incrementing byte pattern */ | ||
1981 | for (i = 0; i < WR_BUF_SIZE; i++) | ||
1982 | if (buf[i] != (i & 0xFF)) | ||
1983 | return 1; | ||
1984 | |||
1985 | return 0; | ||
1986 | } | ||
1987 | |||
1988 | static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, | ||
1989 | u8 code, u8 qual, u8 fruc) | ||
1990 | { | ||
1991 | /* If the check condition is of special interest, log a message */ | ||
1992 | if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) | ||
1993 | && (code == 0x04) && (qual == 0x06)) { | ||
1994 | pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" | ||
1995 | "ascq/fruc %02x/%02x/%02x/%02x\n", | ||
1996 | skd_name(skdev), key, code, qual, fruc); | ||
1997 | } | ||
1998 | } | ||
1999 | |||
2000 | static void skd_complete_internal(struct skd_device *skdev, | ||
2001 | volatile struct fit_completion_entry_v1 | ||
2002 | *skcomp, | ||
2003 | volatile struct fit_comp_error_info *skerr, | ||
2004 | struct skd_special_context *skspcl) | ||
2005 | { | ||
2006 | u8 *buf = skspcl->data_buf; | ||
2007 | u8 status; | ||
2008 | int i; | ||
2009 | struct skd_scsi_request *scsi = | ||
2010 | (struct skd_scsi_request *)&skspcl->msg_buf[64]; | ||
2011 | |||
2012 | SKD_ASSERT(skspcl == &skdev->internal_skspcl); | ||
2013 | |||
2014 | pr_debug("%s:%s:%d complete internal %x\n", | ||
2015 | skdev->name, __func__, __LINE__, scsi->cdb[0]); | ||
2016 | |||
2017 | skspcl->req.completion = *skcomp; | ||
2018 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
2019 | skspcl->req.id += SKD_ID_INCR; | ||
2020 | |||
2021 | status = skspcl->req.completion.status; | ||
2022 | |||
2023 | skd_log_check_status(skdev, status, skerr->key, skerr->code, | ||
2024 | skerr->qual, skerr->fruc); | ||
2025 | |||
2026 | switch (scsi->cdb[0]) { | ||
2027 | case TEST_UNIT_READY: | ||
2028 | if (status == SAM_STAT_GOOD) | ||
2029 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | ||
2030 | else if ((status == SAM_STAT_CHECK_CONDITION) && | ||
2031 | (skerr->key == MEDIUM_ERROR)) | ||
2032 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | ||
2033 | else { | ||
2034 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | ||
2035 | pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", | ||
2036 | skdev->name, __func__, __LINE__, | ||
2037 | skdev->state); | ||
2038 | return; | ||
2039 | } | ||
2040 | pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", | ||
2041 | skdev->name, __func__, __LINE__); | ||
2042 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | ||
2043 | } | ||
2044 | break; | ||
2045 | |||
2046 | case WRITE_BUFFER: | ||
2047 | if (status == SAM_STAT_GOOD) | ||
2048 | skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); | ||
2049 | else { | ||
2050 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | ||
2051 | pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", | ||
2052 | skdev->name, __func__, __LINE__, | ||
2053 | skdev->state); | ||
2054 | return; | ||
2055 | } | ||
2056 | pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", | ||
2057 | skdev->name, __func__, __LINE__); | ||
2058 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | ||
2059 | } | ||
2060 | break; | ||
2061 | |||
2062 | case READ_BUFFER: | ||
2063 | if (status == SAM_STAT_GOOD) { | ||
2064 | if (skd_chk_read_buf(skdev, skspcl) == 0) | ||
2065 | skd_send_internal_skspcl(skdev, skspcl, | ||
2066 | READ_CAPACITY); | ||
2067 | else { | ||
2068 | pr_err( | ||
2069 | "(%s):*** W/R Buffer mismatch %d ***\n", | ||
2070 | skd_name(skdev), skdev->connect_retries); | ||
2071 | if (skdev->connect_retries < | ||
2072 | SKD_MAX_CONNECT_RETRIES) { | ||
2073 | skdev->connect_retries++; | ||
2074 | skd_soft_reset(skdev); | ||
2075 | } else { | ||
2076 | pr_err( | ||
2077 | "(%s): W/R Buffer Connect Error\n", | ||
2078 | skd_name(skdev)); | ||
2079 | return; | ||
2080 | } | ||
2081 | } | ||
2082 | |||
2083 | } else { | ||
2084 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | ||
2085 | pr_debug("%s:%s:%d " | ||
2086 | "read buffer failed, don't send anymore state 0x%x\n", | ||
2087 | skdev->name, __func__, __LINE__, | ||
2088 | skdev->state); | ||
2089 | return; | ||
2090 | } | ||
2091 | pr_debug("%s:%s:%d " | ||
2092 | "**** read buffer failed, retry skerr\n", | ||
2093 | skdev->name, __func__, __LINE__); | ||
2094 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | ||
2095 | } | ||
2096 | break; | ||
2097 | |||
2098 | case READ_CAPACITY: | ||
2099 | skdev->read_cap_is_valid = 0; | ||
2100 | if (status == SAM_STAT_GOOD) { | ||
2101 | skdev->read_cap_last_lba = | ||
2102 | (buf[0] << 24) | (buf[1] << 16) | | ||
2103 | (buf[2] << 8) | buf[3]; | ||
2104 | skdev->read_cap_blocksize = | ||
2105 | (buf[4] << 24) | (buf[5] << 16) | | ||
2106 | (buf[6] << 8) | buf[7]; | ||
2107 | |||
2108 | pr_debug("%s:%s:%d last lba %d, bs %d\n", | ||
2109 | skdev->name, __func__, __LINE__, | ||
2110 | skdev->read_cap_last_lba, | ||
2111 | skdev->read_cap_blocksize); | ||
2112 | |||
2113 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | ||
2114 | |||
2115 | skdev->read_cap_is_valid = 1; | ||
2116 | |||
2117 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | ||
2118 | } else if ((status == SAM_STAT_CHECK_CONDITION) && | ||
2119 | (skerr->key == MEDIUM_ERROR)) { | ||
2120 | skdev->read_cap_last_lba = ~0; | ||
2121 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | ||
2122 | pr_debug("%s:%s:%d " | ||
2123 | "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", | ||
2124 | skdev->name, __func__, __LINE__); | ||
2125 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | ||
2126 | } else { | ||
2127 | pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", | ||
2128 | skdev->name, __func__, __LINE__); | ||
2129 | skd_send_internal_skspcl(skdev, skspcl, | ||
2130 | TEST_UNIT_READY); | ||
2131 | } | ||
2132 | break; | ||
2133 | |||
2134 | case INQUIRY: | ||
2135 | skdev->inquiry_is_valid = 0; | ||
2136 | if (status == SAM_STAT_GOOD) { | ||
2137 | skdev->inquiry_is_valid = 1; | ||
2138 | |||
2139 | for (i = 0; i < 12; i++) | ||
2140 | skdev->inq_serial_num[i] = buf[i + 4]; | ||
2141 | skdev->inq_serial_num[12] = 0; | ||
2142 | } | ||
2143 | |||
2144 | if (skd_unquiesce_dev(skdev) < 0) | ||
2145 | pr_debug("%s:%s:%d **** failed, to ONLINE device\n", | ||
2146 | skdev->name, __func__, __LINE__); | ||
2147 | /* connection is complete */ | ||
2148 | skdev->connect_retries = 0; | ||
2149 | break; | ||
2150 | |||
2151 | case SYNCHRONIZE_CACHE: | ||
2152 | if (status == SAM_STAT_GOOD) | ||
2153 | skdev->sync_done = 1; | ||
2154 | else | ||
2155 | skdev->sync_done = -1; | ||
2156 | wake_up_interruptible(&skdev->waitq); | ||
2157 | break; | ||
2158 | |||
2159 | default: | ||
2160 | SKD_ASSERT("we didn't send this"); | ||
2161 | } | ||
2162 | } | ||
2163 | |||
2164 | /* | ||
2165 | ***************************************************************************** | ||
2166 | * FIT MESSAGES | ||
2167 | ***************************************************************************** | ||
2168 | */ | ||
2169 | |||
2170 | static void skd_send_fitmsg(struct skd_device *skdev, | ||
2171 | struct skd_fitmsg_context *skmsg) | ||
2172 | { | ||
2173 | u64 qcmd; | ||
2174 | struct fit_msg_hdr *fmh; | ||
2175 | |||
2176 | pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", | ||
2177 | skdev->name, __func__, __LINE__, | ||
2178 | skmsg->mb_dma_address, skdev->in_flight); | ||
2179 | pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", | ||
2180 | skdev->name, __func__, __LINE__, | ||
2181 | skmsg->msg_buf, skmsg->offset); | ||
2182 | |||
2183 | qcmd = skmsg->mb_dma_address; | ||
2184 | qcmd |= FIT_QCMD_QID_NORMAL; | ||
2185 | |||
2186 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | ||
2187 | skmsg->outstanding = fmh->num_protocol_cmds_coalesced; | ||
2188 | |||
2189 | if (unlikely(skdev->dbg_level > 1)) { | ||
2190 | u8 *bp = (u8 *)skmsg->msg_buf; | ||
2191 | int i; | ||
2192 | for (i = 0; i < skmsg->length; i += 8) { | ||
2193 | pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x " | ||
2194 | "%02x %02x %02x %02x\n", | ||
2195 | skdev->name, __func__, __LINE__, | ||
2196 | i, bp[i + 0], bp[i + 1], bp[i + 2], | ||
2197 | bp[i + 3], bp[i + 4], bp[i + 5], | ||
2198 | bp[i + 6], bp[i + 7]); | ||
2199 | if (i == 0) | ||
2200 | i = 64 - 8; | ||
2201 | } | ||
2202 | } | ||
2203 | |||
2204 | if (skmsg->length > 256) | ||
2205 | qcmd |= FIT_QCMD_MSGSIZE_512; | ||
2206 | else if (skmsg->length > 128) | ||
2207 | qcmd |= FIT_QCMD_MSGSIZE_256; | ||
2208 | else if (skmsg->length > 64) | ||
2209 | qcmd |= FIT_QCMD_MSGSIZE_128; | ||
2210 | else | ||
2211 | /* | ||
2212 | * This makes no sense because the FIT msg header is | ||
2213 | * 64 bytes. If the msg is only 64 bytes long it has | ||
2214 | * no payload. | ||
2215 | */ | ||
2216 | qcmd |= FIT_QCMD_MSGSIZE_64; | ||
2217 | |||
2218 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | ||
2219 | |||
2220 | } | ||
2221 | |||
2222 | static void skd_send_special_fitmsg(struct skd_device *skdev, | ||
2223 | struct skd_special_context *skspcl) | ||
2224 | { | ||
2225 | u64 qcmd; | ||
2226 | |||
2227 | if (unlikely(skdev->dbg_level > 1)) { | ||
2228 | u8 *bp = (u8 *)skspcl->msg_buf; | ||
2229 | int i; | ||
2230 | |||
2231 | for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { | ||
2232 | pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x " | ||
2233 | "%02x %02x %02x %02x\n", | ||
2234 | skdev->name, __func__, __LINE__, i, | ||
2235 | bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], | ||
2236 | bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); | ||
2237 | if (i == 0) | ||
2238 | i = 64 - 8; | ||
2239 | } | ||
2240 | |||
2241 | pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", | ||
2242 | skdev->name, __func__, __LINE__, | ||
2243 | skspcl, skspcl->req.id, skspcl->req.sksg_list, | ||
2244 | skspcl->req.sksg_dma_address); | ||
2245 | for (i = 0; i < skspcl->req.n_sg; i++) { | ||
2246 | struct fit_sg_descriptor *sgd = | ||
2247 | &skspcl->req.sksg_list[i]; | ||
2248 | |||
2249 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " | ||
2250 | "addr=0x%llx next=0x%llx\n", | ||
2251 | skdev->name, __func__, __LINE__, | ||
2252 | i, sgd->byte_count, sgd->control, | ||
2253 | sgd->host_side_addr, sgd->next_desc_ptr); | ||
2254 | } | ||
2255 | } | ||
2256 | |||
2257 | /* | ||
2258 | * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr | ||
2259 | * and one 64-byte SSDI command. | ||
2260 | */ | ||
2261 | qcmd = skspcl->mb_dma_address; | ||
2262 | qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; | ||
2263 | |||
2264 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | ||
2265 | } | ||
2266 | |||
2267 | /* | ||
2268 | ***************************************************************************** | ||
2269 | * COMPLETION QUEUE | ||
2270 | ***************************************************************************** | ||
2271 | */ | ||
2272 | |||
2273 | static void skd_complete_other(struct skd_device *skdev, | ||
2274 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2275 | volatile struct fit_comp_error_info *skerr); | ||
2276 | |||
2277 | struct sns_info { | ||
2278 | u8 type; | ||
2279 | u8 stat; | ||
2280 | u8 key; | ||
2281 | u8 asc; | ||
2282 | u8 ascq; | ||
2283 | u8 mask; | ||
2284 | enum skd_check_status_action action; | ||
2285 | }; | ||
2286 | |||
2287 | static struct sns_info skd_chkstat_table[] = { | ||
2288 | /* Good */ | ||
2289 | { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, | ||
2290 | SKD_CHECK_STATUS_REPORT_GOOD }, | ||
2291 | |||
2292 | /* Smart alerts */ | ||
2293 | { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ | ||
2294 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | ||
2295 | { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ | ||
2296 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | ||
2297 | { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ | ||
2298 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | ||
2299 | |||
2300 | /* Retry (with limits) */ | ||
2301 | { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ | ||
2302 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2303 | { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ | ||
2304 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2305 | { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ | ||
2306 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2307 | { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ | ||
2308 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2309 | |||
2310 | /* Busy (or about to be) */ | ||
2311 | { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ | ||
2312 | SKD_CHECK_STATUS_BUSY_IMMINENT }, | ||
2313 | }; | ||
2314 | |||
2315 | /* | ||
2316 | * Look up status and sense data to decide how to handle the error | ||
2317 | * from the device. | ||
2318 | * mask says which fields must match e.g., mask=0x18 means check | ||
2319 | * type and stat, ignore key, asc, ascq. | ||
2320 | */ | ||
2321 | |||
2322 | static enum skd_check_status_action | ||
2323 | skd_check_status(struct skd_device *skdev, | ||
2324 | u8 cmp_status, volatile struct fit_comp_error_info *skerr) | ||
2325 | { | ||
2326 | int i, n; | ||
2327 | |||
2328 | pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", | ||
2329 | skd_name(skdev), skerr->key, skerr->code, skerr->qual, | ||
2330 | skerr->fruc); | ||
2331 | |||
2332 | pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", | ||
2333 | skdev->name, __func__, __LINE__, skerr->type, cmp_status, | ||
2334 | skerr->key, skerr->code, skerr->qual, skerr->fruc); | ||
2335 | |||
2336 | /* Does the info match an entry in the good category? */ | ||
2337 | n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); | ||
2338 | for (i = 0; i < n; i++) { | ||
2339 | struct sns_info *sns = &skd_chkstat_table[i]; | ||
2340 | |||
2341 | if (sns->mask & 0x10) | ||
2342 | if (skerr->type != sns->type) | ||
2343 | continue; | ||
2344 | |||
2345 | if (sns->mask & 0x08) | ||
2346 | if (cmp_status != sns->stat) | ||
2347 | continue; | ||
2348 | |||
2349 | if (sns->mask & 0x04) | ||
2350 | if (skerr->key != sns->key) | ||
2351 | continue; | ||
2352 | |||
2353 | if (sns->mask & 0x02) | ||
2354 | if (skerr->code != sns->asc) | ||
2355 | continue; | ||
2356 | |||
2357 | if (sns->mask & 0x01) | ||
2358 | if (skerr->qual != sns->ascq) | ||
2359 | continue; | ||
2360 | |||
2361 | if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { | ||
2362 | pr_err("(%s): SMART Alert: sense key/asc/ascq " | ||
2363 | "%02x/%02x/%02x\n", | ||
2364 | skd_name(skdev), skerr->key, | ||
2365 | skerr->code, skerr->qual); | ||
2366 | } | ||
2367 | return sns->action; | ||
2368 | } | ||
2369 | |||
2370 | /* No other match, so nonzero status means error, | ||
2371 | * zero status means good | ||
2372 | */ | ||
2373 | if (cmp_status) { | ||
2374 | pr_debug("%s:%s:%d status check: error\n", | ||
2375 | skdev->name, __func__, __LINE__); | ||
2376 | return SKD_CHECK_STATUS_REPORT_ERROR; | ||
2377 | } | ||
2378 | |||
2379 | pr_debug("%s:%s:%d status check good default\n", | ||
2380 | skdev->name, __func__, __LINE__); | ||
2381 | return SKD_CHECK_STATUS_REPORT_GOOD; | ||
2382 | } | ||
2383 | |||
2384 | static void skd_resolve_req_exception(struct skd_device *skdev, | ||
2385 | struct skd_request_context *skreq) | ||
2386 | { | ||
2387 | u8 cmp_status = skreq->completion.status; | ||
2388 | |||
2389 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { | ||
2390 | case SKD_CHECK_STATUS_REPORT_GOOD: | ||
2391 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: | ||
2392 | skd_end_request(skdev, skreq, 0); | ||
2393 | break; | ||
2394 | |||
2395 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | ||
2396 | skd_log_skreq(skdev, skreq, "retry(busy)"); | ||
2397 | blk_requeue_request(skdev->queue, skreq->req); | ||
2398 | pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); | ||
2399 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; | ||
2400 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); | ||
2401 | skd_quiesce_dev(skdev); | ||
2402 | break; | ||
2403 | |||
2404 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: | ||
2405 | if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { | ||
2406 | skd_log_skreq(skdev, skreq, "retry"); | ||
2407 | blk_requeue_request(skdev->queue, skreq->req); | ||
2408 | break; | ||
2409 | } | ||
2410 | /* fall through to report error */ | ||
2411 | |||
2412 | case SKD_CHECK_STATUS_REPORT_ERROR: | ||
2413 | default: | ||
2414 | skd_end_request(skdev, skreq, -EIO); | ||
2415 | break; | ||
2416 | } | ||
2417 | } | ||
2418 | |||
2419 | /* assume spinlock is already held */ | ||
2420 | static void skd_release_skreq(struct skd_device *skdev, | ||
2421 | struct skd_request_context *skreq) | ||
2422 | { | ||
2423 | u32 msg_slot; | ||
2424 | struct skd_fitmsg_context *skmsg; | ||
2425 | |||
2426 | u32 timo_slot; | ||
2427 | |||
2428 | /* | ||
2429 | * Reclaim the FIT msg buffer if this is | ||
2430 | * the first of the requests it carried to | ||
2431 | * be completed. The FIT msg buffer used to | ||
2432 | * send this request cannot be reused until | ||
2433 | * we are sure the s1120 card has copied | ||
2434 | * it to its memory. The FIT msg might have | ||
2435 | * contained several requests. As soon as | ||
2436 | * any of them are completed we know that | ||
2437 | * the entire FIT msg was transferred. | ||
2438 | * Only the first completed request will | ||
2439 | * match the FIT msg buffer id. The FIT | ||
2440 | * msg buffer id is immediately updated. | ||
2441 | * When subsequent requests complete the FIT | ||
2442 | * msg buffer id won't match, so we know | ||
2443 | * quite cheaply that it is already done. | ||
2444 | */ | ||
2445 | msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; | ||
2446 | SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); | ||
2447 | |||
2448 | skmsg = &skdev->skmsg_table[msg_slot]; | ||
2449 | if (skmsg->id == skreq->fitmsg_id) { | ||
2450 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); | ||
2451 | SKD_ASSERT(skmsg->outstanding > 0); | ||
2452 | skmsg->outstanding--; | ||
2453 | if (skmsg->outstanding == 0) { | ||
2454 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
2455 | skmsg->id += SKD_ID_INCR; | ||
2456 | skmsg->next = skdev->skmsg_free_list; | ||
2457 | skdev->skmsg_free_list = skmsg; | ||
2458 | } | ||
2459 | } | ||
2460 | |||
2461 | /* | ||
2462 | * Decrease the number of active requests. | ||
2463 | * Also decrements the count in the timeout slot. | ||
2464 | */ | ||
2465 | SKD_ASSERT(skdev->in_flight > 0); | ||
2466 | skdev->in_flight -= 1; | ||
2467 | |||
2468 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | ||
2469 | SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); | ||
2470 | skdev->timeout_slot[timo_slot] -= 1; | ||
2471 | |||
2472 | /* | ||
2473 | * Reset backpointer | ||
2474 | */ | ||
2475 | skreq->req = NULL; | ||
2476 | |||
2477 | /* | ||
2478 | * Reclaim the skd_request_context | ||
2479 | */ | ||
2480 | skreq->state = SKD_REQ_STATE_IDLE; | ||
2481 | skreq->id += SKD_ID_INCR; | ||
2482 | skreq->next = skdev->skreq_free_list; | ||
2483 | skdev->skreq_free_list = skreq; | ||
2484 | } | ||
2485 | |||
2486 | #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA | ||
2487 | |||
2488 | static void skd_do_inq_page_00(struct skd_device *skdev, | ||
2489 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2490 | volatile struct fit_comp_error_info *skerr, | ||
2491 | uint8_t *cdb, uint8_t *buf) | ||
2492 | { | ||
2493 | uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; | ||
2494 | |||
2495 | /* Caller requested "supported pages". The driver needs to insert | ||
2496 | * its page. | ||
2497 | */ | ||
2498 | pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", | ||
2499 | skdev->name, __func__, __LINE__); | ||
2500 | |||
2501 | /* If the device rejected the request because the CDB was | ||
2502 | * improperly formed, then just leave. | ||
2503 | */ | ||
2504 | if (skcomp->status == SAM_STAT_CHECK_CONDITION && | ||
2505 | skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) | ||
2506 | return; | ||
2507 | |||
2508 | /* Get the amount of space the caller allocated */ | ||
2509 | max_bytes = (cdb[3] << 8) | cdb[4]; | ||
2510 | |||
2511 | /* Get the number of pages actually returned by the device */ | ||
2512 | drive_pages = (buf[2] << 8) | buf[3]; | ||
2513 | drive_bytes = drive_pages + 4; | ||
2514 | new_size = drive_pages + 1; | ||
2515 | |||
2516 | /* Supported pages must be in numerical order, so find where | ||
2517 | * the driver page needs to be inserted into the list of | ||
2518 | * pages returned by the device. | ||
2519 | */ | ||
2520 | for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { | ||
2521 | if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) | ||
2522 | return; /* Device using this page code. abort */ | ||
2523 | else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) | ||
2524 | break; | ||
2525 | } | ||
2526 | |||
2527 | if (insert_pt < max_bytes) { | ||
2528 | uint16_t u; | ||
2529 | |||
2530 | /* Shift everything up one byte to make room. */ | ||
2531 | for (u = new_size + 3; u > insert_pt; u--) | ||
2532 | buf[u] = buf[u - 1]; | ||
2533 | buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; | ||
2534 | |||
2535 | /* SCSI byte order increment of num_returned_bytes by 1 */ | ||
2536 | skcomp->num_returned_bytes = | ||
2537 | be32_to_cpu(skcomp->num_returned_bytes) + 1; | ||
2538 | skcomp->num_returned_bytes = | ||
2539 | be32_to_cpu(skcomp->num_returned_bytes); | ||
2540 | } | ||
2541 | |||
2542 | /* update page length field to reflect the driver's page too */ | ||
2543 | buf[2] = (uint8_t)((new_size >> 8) & 0xFF); | ||
2544 | buf[3] = (uint8_t)((new_size >> 0) & 0xFF); | ||
2545 | } | ||
2546 | |||
2547 | static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) | ||
2548 | { | ||
2549 | int pcie_reg; | ||
2550 | u16 pci_bus_speed; | ||
2551 | u8 pci_lanes; | ||
2552 | |||
2553 | pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2554 | if (pcie_reg) { | ||
2555 | u16 linksta; | ||
2556 | pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); | ||
2557 | |||
2558 | pci_bus_speed = linksta & 0xF; | ||
2559 | pci_lanes = (linksta & 0x3F0) >> 4; | ||
2560 | } else { | ||
2561 | *speed = STEC_LINK_UNKNOWN; | ||
2562 | *width = 0xFF; | ||
2563 | return; | ||
2564 | } | ||
2565 | |||
2566 | switch (pci_bus_speed) { | ||
2567 | case 1: | ||
2568 | *speed = STEC_LINK_2_5GTS; | ||
2569 | break; | ||
2570 | case 2: | ||
2571 | *speed = STEC_LINK_5GTS; | ||
2572 | break; | ||
2573 | case 3: | ||
2574 | *speed = STEC_LINK_8GTS; | ||
2575 | break; | ||
2576 | default: | ||
2577 | *speed = STEC_LINK_UNKNOWN; | ||
2578 | break; | ||
2579 | } | ||
2580 | |||
2581 | if (pci_lanes <= 0x20) | ||
2582 | *width = pci_lanes; | ||
2583 | else | ||
2584 | *width = 0xFF; | ||
2585 | } | ||
2586 | |||
2587 | static void skd_do_inq_page_da(struct skd_device *skdev, | ||
2588 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2589 | volatile struct fit_comp_error_info *skerr, | ||
2590 | uint8_t *cdb, uint8_t *buf) | ||
2591 | { | ||
2592 | struct pci_dev *pdev = skdev->pdev; | ||
2593 | unsigned max_bytes; | ||
2594 | struct driver_inquiry_data inq; | ||
2595 | u16 val; | ||
2596 | |||
2597 | pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", | ||
2598 | skdev->name, __func__, __LINE__); | ||
2599 | |||
2600 | memset(&inq, 0, sizeof(inq)); | ||
2601 | |||
2602 | inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; | ||
2603 | |||
2604 | skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); | ||
2605 | inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); | ||
2606 | inq.pcie_device_number = PCI_SLOT(pdev->devfn); | ||
2607 | inq.pcie_function_number = PCI_FUNC(pdev->devfn); | ||
2608 | |||
2609 | pci_read_config_word(pdev, PCI_VENDOR_ID, &val); | ||
2610 | inq.pcie_vendor_id = cpu_to_be16(val); | ||
2611 | |||
2612 | pci_read_config_word(pdev, PCI_DEVICE_ID, &val); | ||
2613 | inq.pcie_device_id = cpu_to_be16(val); | ||
2614 | |||
2615 | pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); | ||
2616 | inq.pcie_subsystem_vendor_id = cpu_to_be16(val); | ||
2617 | |||
2618 | pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); | ||
2619 | inq.pcie_subsystem_device_id = cpu_to_be16(val); | ||
2620 | |||
2621 | /* Driver version, fixed lenth, padded with spaces on the right */ | ||
2622 | inq.driver_version_length = sizeof(inq.driver_version); | ||
2623 | memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); | ||
2624 | memcpy(inq.driver_version, DRV_VER_COMPL, | ||
2625 | min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); | ||
2626 | |||
2627 | inq.page_length = cpu_to_be16((sizeof(inq) - 4)); | ||
2628 | |||
2629 | /* Clear the error set by the device */ | ||
2630 | skcomp->status = SAM_STAT_GOOD; | ||
2631 | memset((void *)skerr, 0, sizeof(*skerr)); | ||
2632 | |||
2633 | /* copy response into output buffer */ | ||
2634 | max_bytes = (cdb[3] << 8) | cdb[4]; | ||
2635 | memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); | ||
2636 | |||
2637 | skcomp->num_returned_bytes = | ||
2638 | be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); | ||
2639 | } | ||
2640 | |||
2641 | static void skd_do_driver_inq(struct skd_device *skdev, | ||
2642 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2643 | volatile struct fit_comp_error_info *skerr, | ||
2644 | uint8_t *cdb, uint8_t *buf) | ||
2645 | { | ||
2646 | if (!buf) | ||
2647 | return; | ||
2648 | else if (cdb[0] != INQUIRY) | ||
2649 | return; /* Not an INQUIRY */ | ||
2650 | else if ((cdb[1] & 1) == 0) | ||
2651 | return; /* EVPD not set */ | ||
2652 | else if (cdb[2] == 0) | ||
2653 | /* Need to add driver's page to supported pages list */ | ||
2654 | skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); | ||
2655 | else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) | ||
2656 | /* Caller requested driver's page */ | ||
2657 | skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); | ||
2658 | } | ||
2659 | |||
2660 | static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) | ||
2661 | { | ||
2662 | if (!sg) | ||
2663 | return NULL; | ||
2664 | if (!sg_page(sg)) | ||
2665 | return NULL; | ||
2666 | return sg_virt(sg); | ||
2667 | } | ||
2668 | |||
2669 | static void skd_process_scsi_inq(struct skd_device *skdev, | ||
2670 | volatile struct fit_completion_entry_v1 | ||
2671 | *skcomp, | ||
2672 | volatile struct fit_comp_error_info *skerr, | ||
2673 | struct skd_special_context *skspcl) | ||
2674 | { | ||
2675 | uint8_t *buf; | ||
2676 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | ||
2677 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | ||
2678 | |||
2679 | dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, | ||
2680 | skspcl->req.sg_data_dir); | ||
2681 | buf = skd_sg_1st_page_ptr(skspcl->req.sg); | ||
2682 | |||
2683 | if (buf) | ||
2684 | skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); | ||
2685 | } | ||
2686 | |||
2687 | |||
2688 | static int skd_isr_completion_posted(struct skd_device *skdev, | ||
2689 | int limit, int *enqueued) | ||
2690 | { | ||
2691 | volatile struct fit_completion_entry_v1 *skcmp = NULL; | ||
2692 | volatile struct fit_comp_error_info *skerr; | ||
2693 | u16 req_id; | ||
2694 | u32 req_slot; | ||
2695 | struct skd_request_context *skreq; | ||
2696 | u16 cmp_cntxt = 0; | ||
2697 | u8 cmp_status = 0; | ||
2698 | u8 cmp_cycle = 0; | ||
2699 | u32 cmp_bytes = 0; | ||
2700 | int rc = 0; | ||
2701 | int processed = 0; | ||
2702 | |||
2703 | for (;; ) { | ||
2704 | SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); | ||
2705 | |||
2706 | skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; | ||
2707 | cmp_cycle = skcmp->cycle; | ||
2708 | cmp_cntxt = skcmp->tag; | ||
2709 | cmp_status = skcmp->status; | ||
2710 | cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); | ||
2711 | |||
2712 | skerr = &skdev->skerr_table[skdev->skcomp_ix]; | ||
2713 | |||
2714 | pr_debug("%s:%s:%d " | ||
2715 | "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " | ||
2716 | "busy=%d rbytes=0x%x proto=%d\n", | ||
2717 | skdev->name, __func__, __LINE__, skdev->skcomp_cycle, | ||
2718 | skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, | ||
2719 | skdev->in_flight, cmp_bytes, skdev->proto_ver); | ||
2720 | |||
2721 | if (cmp_cycle != skdev->skcomp_cycle) { | ||
2722 | pr_debug("%s:%s:%d end of completions\n", | ||
2723 | skdev->name, __func__, __LINE__); | ||
2724 | break; | ||
2725 | } | ||
2726 | /* | ||
2727 | * Update the completion queue head index and possibly | ||
2728 | * the completion cycle count. 8-bit wrap-around. | ||
2729 | */ | ||
2730 | skdev->skcomp_ix++; | ||
2731 | if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { | ||
2732 | skdev->skcomp_ix = 0; | ||
2733 | skdev->skcomp_cycle++; | ||
2734 | } | ||
2735 | |||
2736 | /* | ||
2737 | * The command context is a unique 32-bit ID. The low order | ||
2738 | * bits help locate the request. The request is usually a | ||
2739 | * r/w request (see skd_start() above) or a special request. | ||
2740 | */ | ||
2741 | req_id = cmp_cntxt; | ||
2742 | req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; | ||
2743 | |||
2744 | /* Is this other than a r/w request? */ | ||
2745 | if (req_slot >= skdev->num_req_context) { | ||
2746 | /* | ||
2747 | * This is not a completion for a r/w request. | ||
2748 | */ | ||
2749 | skd_complete_other(skdev, skcmp, skerr); | ||
2750 | continue; | ||
2751 | } | ||
2752 | |||
2753 | skreq = &skdev->skreq_table[req_slot]; | ||
2754 | |||
2755 | /* | ||
2756 | * Make sure the request ID for the slot matches. | ||
2757 | */ | ||
2758 | if (skreq->id != req_id) { | ||
2759 | pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", | ||
2760 | skdev->name, __func__, __LINE__, | ||
2761 | req_id, skreq->id); | ||
2762 | { | ||
2763 | u16 new_id = cmp_cntxt; | ||
2764 | pr_err("(%s): Completion mismatch " | ||
2765 | "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", | ||
2766 | skd_name(skdev), req_id, | ||
2767 | skreq->id, new_id); | ||
2768 | |||
2769 | continue; | ||
2770 | } | ||
2771 | } | ||
2772 | |||
2773 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); | ||
2774 | |||
2775 | if (skreq->state == SKD_REQ_STATE_ABORTED) { | ||
2776 | pr_debug("%s:%s:%d reclaim req %p id=%04x\n", | ||
2777 | skdev->name, __func__, __LINE__, | ||
2778 | skreq, skreq->id); | ||
2779 | /* a previously timed out command can | ||
2780 | * now be cleaned up */ | ||
2781 | skd_release_skreq(skdev, skreq); | ||
2782 | continue; | ||
2783 | } | ||
2784 | |||
2785 | skreq->completion = *skcmp; | ||
2786 | if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { | ||
2787 | skreq->err_info = *skerr; | ||
2788 | skd_log_check_status(skdev, cmp_status, skerr->key, | ||
2789 | skerr->code, skerr->qual, | ||
2790 | skerr->fruc); | ||
2791 | } | ||
2792 | /* Release DMA resources for the request. */ | ||
2793 | if (skreq->n_sg > 0) | ||
2794 | skd_postop_sg_list(skdev, skreq); | ||
2795 | |||
2796 | if (!skreq->req) { | ||
2797 | pr_debug("%s:%s:%d NULL backptr skdreq %p, " | ||
2798 | "req=0x%x req_id=0x%x\n", | ||
2799 | skdev->name, __func__, __LINE__, | ||
2800 | skreq, skreq->id, req_id); | ||
2801 | } else { | ||
2802 | /* | ||
2803 | * Capture the outcome and post it back to the | ||
2804 | * native request. | ||
2805 | */ | ||
2806 | if (likely(cmp_status == SAM_STAT_GOOD)) | ||
2807 | skd_end_request(skdev, skreq, 0); | ||
2808 | else | ||
2809 | skd_resolve_req_exception(skdev, skreq); | ||
2810 | } | ||
2811 | |||
2812 | /* | ||
2813 | * Release the skreq, its FIT msg (if one), timeout slot, | ||
2814 | * and queue depth. | ||
2815 | */ | ||
2816 | skd_release_skreq(skdev, skreq); | ||
2817 | |||
2818 | /* skd_isr_comp_limit equal zero means no limit */ | ||
2819 | if (limit) { | ||
2820 | if (++processed >= limit) { | ||
2821 | rc = 1; | ||
2822 | break; | ||
2823 | } | ||
2824 | } | ||
2825 | } | ||
2826 | |||
2827 | if ((skdev->state == SKD_DRVR_STATE_PAUSING) | ||
2828 | && (skdev->in_flight) == 0) { | ||
2829 | skdev->state = SKD_DRVR_STATE_PAUSED; | ||
2830 | wake_up_interruptible(&skdev->waitq); | ||
2831 | } | ||
2832 | |||
2833 | return rc; | ||
2834 | } | ||
2835 | |||
2836 | static void skd_complete_other(struct skd_device *skdev, | ||
2837 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2838 | volatile struct fit_comp_error_info *skerr) | ||
2839 | { | ||
2840 | u32 req_id = 0; | ||
2841 | u32 req_table; | ||
2842 | u32 req_slot; | ||
2843 | struct skd_special_context *skspcl; | ||
2844 | |||
2845 | req_id = skcomp->tag; | ||
2846 | req_table = req_id & SKD_ID_TABLE_MASK; | ||
2847 | req_slot = req_id & SKD_ID_SLOT_MASK; | ||
2848 | |||
2849 | pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", | ||
2850 | skdev->name, __func__, __LINE__, | ||
2851 | req_table, req_id, req_slot); | ||
2852 | |||
2853 | /* | ||
2854 | * Based on the request id, determine how to dispatch this completion. | ||
2855 | * This swich/case is finding the good cases and forwarding the | ||
2856 | * completion entry. Errors are reported below the switch. | ||
2857 | */ | ||
2858 | switch (req_table) { | ||
2859 | case SKD_ID_RW_REQUEST: | ||
2860 | /* | ||
2861 | * The caller, skd_completion_posted_isr() above, | ||
2862 | * handles r/w requests. The only way we get here | ||
2863 | * is if the req_slot is out of bounds. | ||
2864 | */ | ||
2865 | break; | ||
2866 | |||
2867 | case SKD_ID_SPECIAL_REQUEST: | ||
2868 | /* | ||
2869 | * Make sure the req_slot is in bounds and that the id | ||
2870 | * matches. | ||
2871 | */ | ||
2872 | if (req_slot < skdev->n_special) { | ||
2873 | skspcl = &skdev->skspcl_table[req_slot]; | ||
2874 | if (skspcl->req.id == req_id && | ||
2875 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | ||
2876 | skd_complete_special(skdev, | ||
2877 | skcomp, skerr, skspcl); | ||
2878 | return; | ||
2879 | } | ||
2880 | } | ||
2881 | break; | ||
2882 | |||
2883 | case SKD_ID_INTERNAL: | ||
2884 | if (req_slot == 0) { | ||
2885 | skspcl = &skdev->internal_skspcl; | ||
2886 | if (skspcl->req.id == req_id && | ||
2887 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | ||
2888 | skd_complete_internal(skdev, | ||
2889 | skcomp, skerr, skspcl); | ||
2890 | return; | ||
2891 | } | ||
2892 | } | ||
2893 | break; | ||
2894 | |||
2895 | case SKD_ID_FIT_MSG: | ||
2896 | /* | ||
2897 | * These id's should never appear in a completion record. | ||
2898 | */ | ||
2899 | break; | ||
2900 | |||
2901 | default: | ||
2902 | /* | ||
2903 | * These id's should never appear anywhere; | ||
2904 | */ | ||
2905 | break; | ||
2906 | } | ||
2907 | |||
2908 | /* | ||
2909 | * If we get here it is a bad or stale id. | ||
2910 | */ | ||
2911 | } | ||
2912 | |||
2913 | static void skd_complete_special(struct skd_device *skdev, | ||
2914 | volatile struct fit_completion_entry_v1 | ||
2915 | *skcomp, | ||
2916 | volatile struct fit_comp_error_info *skerr, | ||
2917 | struct skd_special_context *skspcl) | ||
2918 | { | ||
2919 | pr_debug("%s:%s:%d completing special request %p\n", | ||
2920 | skdev->name, __func__, __LINE__, skspcl); | ||
2921 | if (skspcl->orphaned) { | ||
2922 | /* Discard orphaned request */ | ||
2923 | /* ?: Can this release directly or does it need | ||
2924 | * to use a worker? */ | ||
2925 | pr_debug("%s:%s:%d release orphaned %p\n", | ||
2926 | skdev->name, __func__, __LINE__, skspcl); | ||
2927 | skd_release_special(skdev, skspcl); | ||
2928 | return; | ||
2929 | } | ||
2930 | |||
2931 | skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); | ||
2932 | |||
2933 | skspcl->req.state = SKD_REQ_STATE_COMPLETED; | ||
2934 | skspcl->req.completion = *skcomp; | ||
2935 | skspcl->req.err_info = *skerr; | ||
2936 | |||
2937 | skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, | ||
2938 | skerr->code, skerr->qual, skerr->fruc); | ||
2939 | |||
2940 | wake_up_interruptible(&skdev->waitq); | ||
2941 | } | ||
2942 | |||
2943 | /* assume spinlock is already held */ | ||
2944 | static void skd_release_special(struct skd_device *skdev, | ||
2945 | struct skd_special_context *skspcl) | ||
2946 | { | ||
2947 | int i, was_depleted; | ||
2948 | |||
2949 | for (i = 0; i < skspcl->req.n_sg; i++) { | ||
2950 | struct page *page = sg_page(&skspcl->req.sg[i]); | ||
2951 | __free_page(page); | ||
2952 | } | ||
2953 | |||
2954 | was_depleted = (skdev->skspcl_free_list == NULL); | ||
2955 | |||
2956 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
2957 | skspcl->req.id += SKD_ID_INCR; | ||
2958 | skspcl->req.next = | ||
2959 | (struct skd_request_context *)skdev->skspcl_free_list; | ||
2960 | skdev->skspcl_free_list = (struct skd_special_context *)skspcl; | ||
2961 | |||
2962 | if (was_depleted) { | ||
2963 | pr_debug("%s:%s:%d skspcl was depleted\n", | ||
2964 | skdev->name, __func__, __LINE__); | ||
2965 | /* Free list was depleted. Their might be waiters. */ | ||
2966 | wake_up_interruptible(&skdev->waitq); | ||
2967 | } | ||
2968 | } | ||
2969 | |||
2970 | static void skd_reset_skcomp(struct skd_device *skdev) | ||
2971 | { | ||
2972 | u32 nbytes; | ||
2973 | struct fit_completion_entry_v1 *skcomp; | ||
2974 | |||
2975 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | ||
2976 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | ||
2977 | |||
2978 | memset(skdev->skcomp_table, 0, nbytes); | ||
2979 | |||
2980 | skdev->skcomp_ix = 0; | ||
2981 | skdev->skcomp_cycle = 1; | ||
2982 | } | ||
2983 | |||
2984 | /* | ||
2985 | ***************************************************************************** | ||
2986 | * INTERRUPTS | ||
2987 | ***************************************************************************** | ||
2988 | */ | ||
2989 | static void skd_completion_worker(struct work_struct *work) | ||
2990 | { | ||
2991 | struct skd_device *skdev = | ||
2992 | container_of(work, struct skd_device, completion_worker); | ||
2993 | unsigned long flags; | ||
2994 | int flush_enqueued = 0; | ||
2995 | |||
2996 | spin_lock_irqsave(&skdev->lock, flags); | ||
2997 | |||
2998 | /* | ||
2999 | * pass in limit=0, which means no limit.. | ||
3000 | * process everything in compq | ||
3001 | */ | ||
3002 | skd_isr_completion_posted(skdev, 0, &flush_enqueued); | ||
3003 | skd_request_fn(skdev->queue); | ||
3004 | |||
3005 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3006 | } | ||
3007 | |||
3008 | static void skd_isr_msg_from_dev(struct skd_device *skdev); | ||
3009 | |||
3010 | irqreturn_t | ||
3011 | static skd_isr(int irq, void *ptr) | ||
3012 | { | ||
3013 | struct skd_device *skdev; | ||
3014 | u32 intstat; | ||
3015 | u32 ack; | ||
3016 | int rc = 0; | ||
3017 | int deferred = 0; | ||
3018 | int flush_enqueued = 0; | ||
3019 | |||
3020 | skdev = (struct skd_device *)ptr; | ||
3021 | spin_lock(&skdev->lock); | ||
3022 | |||
3023 | for (;; ) { | ||
3024 | intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); | ||
3025 | |||
3026 | ack = FIT_INT_DEF_MASK; | ||
3027 | ack &= intstat; | ||
3028 | |||
3029 | pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", | ||
3030 | skdev->name, __func__, __LINE__, intstat, ack); | ||
3031 | |||
3032 | /* As long as there is an int pending on device, keep | ||
3033 | * running loop. When none, get out, but if we've never | ||
3034 | * done any processing, call completion handler? | ||
3035 | */ | ||
3036 | if (ack == 0) { | ||
3037 | /* No interrupts on device, but run the completion | ||
3038 | * processor anyway? | ||
3039 | */ | ||
3040 | if (rc == 0) | ||
3041 | if (likely (skdev->state | ||
3042 | == SKD_DRVR_STATE_ONLINE)) | ||
3043 | deferred = 1; | ||
3044 | break; | ||
3045 | } | ||
3046 | |||
3047 | rc = IRQ_HANDLED; | ||
3048 | |||
3049 | SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); | ||
3050 | |||
3051 | if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && | ||
3052 | (skdev->state != SKD_DRVR_STATE_STOPPING))) { | ||
3053 | if (intstat & FIT_ISH_COMPLETION_POSTED) { | ||
3054 | /* | ||
3055 | * If we have already deferred completion | ||
3056 | * processing, don't bother running it again | ||
3057 | */ | ||
3058 | if (deferred == 0) | ||
3059 | deferred = | ||
3060 | skd_isr_completion_posted(skdev, | ||
3061 | skd_isr_comp_limit, &flush_enqueued); | ||
3062 | } | ||
3063 | |||
3064 | if (intstat & FIT_ISH_FW_STATE_CHANGE) { | ||
3065 | skd_isr_fwstate(skdev); | ||
3066 | if (skdev->state == SKD_DRVR_STATE_FAULT || | ||
3067 | skdev->state == | ||
3068 | SKD_DRVR_STATE_DISAPPEARED) { | ||
3069 | spin_unlock(&skdev->lock); | ||
3070 | return rc; | ||
3071 | } | ||
3072 | } | ||
3073 | |||
3074 | if (intstat & FIT_ISH_MSG_FROM_DEV) | ||
3075 | skd_isr_msg_from_dev(skdev); | ||
3076 | } | ||
3077 | } | ||
3078 | |||
3079 | if (unlikely(flush_enqueued)) | ||
3080 | skd_request_fn(skdev->queue); | ||
3081 | |||
3082 | if (deferred) | ||
3083 | schedule_work(&skdev->completion_worker); | ||
3084 | else if (!flush_enqueued) | ||
3085 | skd_request_fn(skdev->queue); | ||
3086 | |||
3087 | spin_unlock(&skdev->lock); | ||
3088 | |||
3089 | return rc; | ||
3090 | } | ||
3091 | |||
3092 | static void skd_drive_fault(struct skd_device *skdev) | ||
3093 | { | ||
3094 | skdev->state = SKD_DRVR_STATE_FAULT; | ||
3095 | pr_err("(%s): Drive FAULT\n", skd_name(skdev)); | ||
3096 | } | ||
3097 | |||
3098 | static void skd_drive_disappeared(struct skd_device *skdev) | ||
3099 | { | ||
3100 | skdev->state = SKD_DRVR_STATE_DISAPPEARED; | ||
3101 | pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); | ||
3102 | } | ||
3103 | |||
3104 | static void skd_isr_fwstate(struct skd_device *skdev) | ||
3105 | { | ||
3106 | u32 sense; | ||
3107 | u32 state; | ||
3108 | u32 mtd; | ||
3109 | int prev_driver_state = skdev->state; | ||
3110 | |||
3111 | sense = SKD_READL(skdev, FIT_STATUS); | ||
3112 | state = sense & FIT_SR_DRIVE_STATE_MASK; | ||
3113 | |||
3114 | pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", | ||
3115 | skd_name(skdev), | ||
3116 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | ||
3117 | skd_drive_state_to_str(state), state); | ||
3118 | |||
3119 | skdev->drive_state = state; | ||
3120 | |||
3121 | switch (skdev->drive_state) { | ||
3122 | case FIT_SR_DRIVE_INIT: | ||
3123 | if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { | ||
3124 | skd_disable_interrupts(skdev); | ||
3125 | break; | ||
3126 | } | ||
3127 | if (skdev->state == SKD_DRVR_STATE_RESTARTING) | ||
3128 | skd_recover_requests(skdev, 0); | ||
3129 | if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { | ||
3130 | skdev->timer_countdown = SKD_STARTING_TIMO; | ||
3131 | skdev->state = SKD_DRVR_STATE_STARTING; | ||
3132 | skd_soft_reset(skdev); | ||
3133 | break; | ||
3134 | } | ||
3135 | mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); | ||
3136 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3137 | skdev->last_mtd = mtd; | ||
3138 | break; | ||
3139 | |||
3140 | case FIT_SR_DRIVE_ONLINE: | ||
3141 | skdev->cur_max_queue_depth = skd_max_queue_depth; | ||
3142 | if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) | ||
3143 | skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; | ||
3144 | |||
3145 | skdev->queue_low_water_mark = | ||
3146 | skdev->cur_max_queue_depth * 2 / 3 + 1; | ||
3147 | if (skdev->queue_low_water_mark < 1) | ||
3148 | skdev->queue_low_water_mark = 1; | ||
3149 | pr_info( | ||
3150 | "(%s): Queue depth limit=%d dev=%d lowat=%d\n", | ||
3151 | skd_name(skdev), | ||
3152 | skdev->cur_max_queue_depth, | ||
3153 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | ||
3154 | |||
3155 | skd_refresh_device_data(skdev); | ||
3156 | break; | ||
3157 | |||
3158 | case FIT_SR_DRIVE_BUSY: | ||
3159 | skdev->state = SKD_DRVR_STATE_BUSY; | ||
3160 | skdev->timer_countdown = SKD_BUSY_TIMO; | ||
3161 | skd_quiesce_dev(skdev); | ||
3162 | break; | ||
3163 | case FIT_SR_DRIVE_BUSY_SANITIZE: | ||
3164 | /* set timer for 3 seconds, we'll abort any unfinished | ||
3165 | * commands after that expires | ||
3166 | */ | ||
3167 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | ||
3168 | skdev->timer_countdown = SKD_TIMER_SECONDS(3); | ||
3169 | blk_start_queue(skdev->queue); | ||
3170 | break; | ||
3171 | case FIT_SR_DRIVE_BUSY_ERASE: | ||
3172 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | ||
3173 | skdev->timer_countdown = SKD_BUSY_TIMO; | ||
3174 | break; | ||
3175 | case FIT_SR_DRIVE_OFFLINE: | ||
3176 | skdev->state = SKD_DRVR_STATE_IDLE; | ||
3177 | break; | ||
3178 | case FIT_SR_DRIVE_SOFT_RESET: | ||
3179 | switch (skdev->state) { | ||
3180 | case SKD_DRVR_STATE_STARTING: | ||
3181 | case SKD_DRVR_STATE_RESTARTING: | ||
3182 | /* Expected by a caller of skd_soft_reset() */ | ||
3183 | break; | ||
3184 | default: | ||
3185 | skdev->state = SKD_DRVR_STATE_RESTARTING; | ||
3186 | break; | ||
3187 | } | ||
3188 | break; | ||
3189 | case FIT_SR_DRIVE_FW_BOOTING: | ||
3190 | pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", | ||
3191 | skdev->name, __func__, __LINE__, skdev->name); | ||
3192 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; | ||
3193 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | ||
3194 | break; | ||
3195 | |||
3196 | case FIT_SR_DRIVE_DEGRADED: | ||
3197 | case FIT_SR_PCIE_LINK_DOWN: | ||
3198 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | ||
3199 | break; | ||
3200 | |||
3201 | case FIT_SR_DRIVE_FAULT: | ||
3202 | skd_drive_fault(skdev); | ||
3203 | skd_recover_requests(skdev, 0); | ||
3204 | blk_start_queue(skdev->queue); | ||
3205 | break; | ||
3206 | |||
3207 | /* PCIe bus returned all Fs? */ | ||
3208 | case 0xFF: | ||
3209 | pr_info("(%s): state=0x%x sense=0x%x\n", | ||
3210 | skd_name(skdev), state, sense); | ||
3211 | skd_drive_disappeared(skdev); | ||
3212 | skd_recover_requests(skdev, 0); | ||
3213 | blk_start_queue(skdev->queue); | ||
3214 | break; | ||
3215 | default: | ||
3216 | /* | ||
3217 | * Uknown FW State. Wait for a state we recognize. | ||
3218 | */ | ||
3219 | break; | ||
3220 | } | ||
3221 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | ||
3222 | skd_name(skdev), | ||
3223 | skd_skdev_state_to_str(prev_driver_state), prev_driver_state, | ||
3224 | skd_skdev_state_to_str(skdev->state), skdev->state); | ||
3225 | } | ||
3226 | |||
3227 | static void skd_recover_requests(struct skd_device *skdev, int requeue) | ||
3228 | { | ||
3229 | int i; | ||
3230 | |||
3231 | for (i = 0; i < skdev->num_req_context; i++) { | ||
3232 | struct skd_request_context *skreq = &skdev->skreq_table[i]; | ||
3233 | |||
3234 | if (skreq->state == SKD_REQ_STATE_BUSY) { | ||
3235 | skd_log_skreq(skdev, skreq, "recover"); | ||
3236 | |||
3237 | SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); | ||
3238 | SKD_ASSERT(skreq->req != NULL); | ||
3239 | |||
3240 | /* Release DMA resources for the request. */ | ||
3241 | if (skreq->n_sg > 0) | ||
3242 | skd_postop_sg_list(skdev, skreq); | ||
3243 | |||
3244 | if (requeue && | ||
3245 | (unsigned long) ++skreq->req->special < | ||
3246 | SKD_MAX_RETRIES) | ||
3247 | blk_requeue_request(skdev->queue, skreq->req); | ||
3248 | else | ||
3249 | skd_end_request(skdev, skreq, -EIO); | ||
3250 | |||
3251 | skreq->req = NULL; | ||
3252 | |||
3253 | skreq->state = SKD_REQ_STATE_IDLE; | ||
3254 | skreq->id += SKD_ID_INCR; | ||
3255 | } | ||
3256 | if (i > 0) | ||
3257 | skreq[-1].next = skreq; | ||
3258 | skreq->next = NULL; | ||
3259 | } | ||
3260 | skdev->skreq_free_list = skdev->skreq_table; | ||
3261 | |||
3262 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | ||
3263 | struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; | ||
3264 | |||
3265 | if (skmsg->state == SKD_MSG_STATE_BUSY) { | ||
3266 | skd_log_skmsg(skdev, skmsg, "salvaged"); | ||
3267 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); | ||
3268 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
3269 | skmsg->id += SKD_ID_INCR; | ||
3270 | } | ||
3271 | if (i > 0) | ||
3272 | skmsg[-1].next = skmsg; | ||
3273 | skmsg->next = NULL; | ||
3274 | } | ||
3275 | skdev->skmsg_free_list = skdev->skmsg_table; | ||
3276 | |||
3277 | for (i = 0; i < skdev->n_special; i++) { | ||
3278 | struct skd_special_context *skspcl = &skdev->skspcl_table[i]; | ||
3279 | |||
3280 | /* If orphaned, reclaim it because it has already been reported | ||
3281 | * to the process as an error (it was just waiting for | ||
3282 | * a completion that didn't come, and now it will never come) | ||
3283 | * If busy, change to a state that will cause it to error | ||
3284 | * out in the wait routine and let it do the normal | ||
3285 | * reporting and reclaiming | ||
3286 | */ | ||
3287 | if (skspcl->req.state == SKD_REQ_STATE_BUSY) { | ||
3288 | if (skspcl->orphaned) { | ||
3289 | pr_debug("%s:%s:%d orphaned %p\n", | ||
3290 | skdev->name, __func__, __LINE__, | ||
3291 | skspcl); | ||
3292 | skd_release_special(skdev, skspcl); | ||
3293 | } else { | ||
3294 | pr_debug("%s:%s:%d not orphaned %p\n", | ||
3295 | skdev->name, __func__, __LINE__, | ||
3296 | skspcl); | ||
3297 | skspcl->req.state = SKD_REQ_STATE_ABORTED; | ||
3298 | } | ||
3299 | } | ||
3300 | } | ||
3301 | skdev->skspcl_free_list = skdev->skspcl_table; | ||
3302 | |||
3303 | for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) | ||
3304 | skdev->timeout_slot[i] = 0; | ||
3305 | |||
3306 | skdev->in_flight = 0; | ||
3307 | } | ||
3308 | |||
3309 | static void skd_isr_msg_from_dev(struct skd_device *skdev) | ||
3310 | { | ||
3311 | u32 mfd; | ||
3312 | u32 mtd; | ||
3313 | u32 data; | ||
3314 | |||
3315 | mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | ||
3316 | |||
3317 | pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", | ||
3318 | skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); | ||
3319 | |||
3320 | /* ignore any mtd that is an ack for something we didn't send */ | ||
3321 | if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) | ||
3322 | return; | ||
3323 | |||
3324 | switch (FIT_MXD_TYPE(mfd)) { | ||
3325 | case FIT_MTD_FITFW_INIT: | ||
3326 | skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); | ||
3327 | |||
3328 | if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { | ||
3329 | pr_err("(%s): protocol mismatch\n", | ||
3330 | skdev->name); | ||
3331 | pr_err("(%s): got=%d support=%d\n", | ||
3332 | skdev->name, skdev->proto_ver, | ||
3333 | FIT_PROTOCOL_VERSION_1); | ||
3334 | pr_err("(%s): please upgrade driver\n", | ||
3335 | skdev->name); | ||
3336 | skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; | ||
3337 | skd_soft_reset(skdev); | ||
3338 | break; | ||
3339 | } | ||
3340 | mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); | ||
3341 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3342 | skdev->last_mtd = mtd; | ||
3343 | break; | ||
3344 | |||
3345 | case FIT_MTD_GET_CMDQ_DEPTH: | ||
3346 | skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); | ||
3347 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, | ||
3348 | SKD_N_COMPLETION_ENTRY); | ||
3349 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3350 | skdev->last_mtd = mtd; | ||
3351 | break; | ||
3352 | |||
3353 | case FIT_MTD_SET_COMPQ_DEPTH: | ||
3354 | SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); | ||
3355 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); | ||
3356 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3357 | skdev->last_mtd = mtd; | ||
3358 | break; | ||
3359 | |||
3360 | case FIT_MTD_SET_COMPQ_ADDR: | ||
3361 | skd_reset_skcomp(skdev); | ||
3362 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); | ||
3363 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3364 | skdev->last_mtd = mtd; | ||
3365 | break; | ||
3366 | |||
3367 | case FIT_MTD_CMD_LOG_HOST_ID: | ||
3368 | skdev->connect_time_stamp = get_seconds(); | ||
3369 | data = skdev->connect_time_stamp & 0xFFFF; | ||
3370 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); | ||
3371 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3372 | skdev->last_mtd = mtd; | ||
3373 | break; | ||
3374 | |||
3375 | case FIT_MTD_CMD_LOG_TIME_STAMP_LO: | ||
3376 | skdev->drive_jiffies = FIT_MXD_DATA(mfd); | ||
3377 | data = (skdev->connect_time_stamp >> 16) & 0xFFFF; | ||
3378 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); | ||
3379 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3380 | skdev->last_mtd = mtd; | ||
3381 | break; | ||
3382 | |||
3383 | case FIT_MTD_CMD_LOG_TIME_STAMP_HI: | ||
3384 | skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); | ||
3385 | mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); | ||
3386 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3387 | skdev->last_mtd = mtd; | ||
3388 | |||
3389 | pr_err("(%s): Time sync driver=0x%x device=0x%x\n", | ||
3390 | skd_name(skdev), | ||
3391 | skdev->connect_time_stamp, skdev->drive_jiffies); | ||
3392 | break; | ||
3393 | |||
3394 | case FIT_MTD_ARM_QUEUE: | ||
3395 | skdev->last_mtd = 0; | ||
3396 | /* | ||
3397 | * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. | ||
3398 | */ | ||
3399 | break; | ||
3400 | |||
3401 | default: | ||
3402 | break; | ||
3403 | } | ||
3404 | } | ||
3405 | |||
3406 | static void skd_disable_interrupts(struct skd_device *skdev) | ||
3407 | { | ||
3408 | u32 sense; | ||
3409 | |||
3410 | sense = SKD_READL(skdev, FIT_CONTROL); | ||
3411 | sense &= ~FIT_CR_ENABLE_INTERRUPTS; | ||
3412 | SKD_WRITEL(skdev, sense, FIT_CONTROL); | ||
3413 | pr_debug("%s:%s:%d sense 0x%x\n", | ||
3414 | skdev->name, __func__, __LINE__, sense); | ||
3415 | |||
3416 | /* Note that the 1s is written. A 1-bit means | ||
3417 | * disable, a 0 means enable. | ||
3418 | */ | ||
3419 | SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); | ||
3420 | } | ||
3421 | |||
3422 | static void skd_enable_interrupts(struct skd_device *skdev) | ||
3423 | { | ||
3424 | u32 val; | ||
3425 | |||
3426 | /* unmask interrupts first */ | ||
3427 | val = FIT_ISH_FW_STATE_CHANGE + | ||
3428 | FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; | ||
3429 | |||
3430 | /* Note that the compliment of mask is written. A 1-bit means | ||
3431 | * disable, a 0 means enable. */ | ||
3432 | SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); | ||
3433 | pr_debug("%s:%s:%d interrupt mask=0x%x\n", | ||
3434 | skdev->name, __func__, __LINE__, ~val); | ||
3435 | |||
3436 | val = SKD_READL(skdev, FIT_CONTROL); | ||
3437 | val |= FIT_CR_ENABLE_INTERRUPTS; | ||
3438 | pr_debug("%s:%s:%d control=0x%x\n", | ||
3439 | skdev->name, __func__, __LINE__, val); | ||
3440 | SKD_WRITEL(skdev, val, FIT_CONTROL); | ||
3441 | } | ||
3442 | |||
3443 | /* | ||
3444 | ***************************************************************************** | ||
3445 | * START, STOP, RESTART, QUIESCE, UNQUIESCE | ||
3446 | ***************************************************************************** | ||
3447 | */ | ||
3448 | |||
3449 | static void skd_soft_reset(struct skd_device *skdev) | ||
3450 | { | ||
3451 | u32 val; | ||
3452 | |||
3453 | val = SKD_READL(skdev, FIT_CONTROL); | ||
3454 | val |= (FIT_CR_SOFT_RESET); | ||
3455 | pr_debug("%s:%s:%d control=0x%x\n", | ||
3456 | skdev->name, __func__, __LINE__, val); | ||
3457 | SKD_WRITEL(skdev, val, FIT_CONTROL); | ||
3458 | } | ||
3459 | |||
3460 | static void skd_start_device(struct skd_device *skdev) | ||
3461 | { | ||
3462 | unsigned long flags; | ||
3463 | u32 sense; | ||
3464 | u32 state; | ||
3465 | |||
3466 | spin_lock_irqsave(&skdev->lock, flags); | ||
3467 | |||
3468 | /* ack all ghost interrupts */ | ||
3469 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | ||
3470 | |||
3471 | sense = SKD_READL(skdev, FIT_STATUS); | ||
3472 | |||
3473 | pr_debug("%s:%s:%d initial status=0x%x\n", | ||
3474 | skdev->name, __func__, __LINE__, sense); | ||
3475 | |||
3476 | state = sense & FIT_SR_DRIVE_STATE_MASK; | ||
3477 | skdev->drive_state = state; | ||
3478 | skdev->last_mtd = 0; | ||
3479 | |||
3480 | skdev->state = SKD_DRVR_STATE_STARTING; | ||
3481 | skdev->timer_countdown = SKD_STARTING_TIMO; | ||
3482 | |||
3483 | skd_enable_interrupts(skdev); | ||
3484 | |||
3485 | switch (skdev->drive_state) { | ||
3486 | case FIT_SR_DRIVE_OFFLINE: | ||
3487 | pr_err("(%s): Drive offline...\n", skd_name(skdev)); | ||
3488 | break; | ||
3489 | |||
3490 | case FIT_SR_DRIVE_FW_BOOTING: | ||
3491 | pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", | ||
3492 | skdev->name, __func__, __LINE__, skdev->name); | ||
3493 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; | ||
3494 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | ||
3495 | break; | ||
3496 | |||
3497 | case FIT_SR_DRIVE_BUSY_SANITIZE: | ||
3498 | pr_info("(%s): Start: BUSY_SANITIZE\n", | ||
3499 | skd_name(skdev)); | ||
3500 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | ||
3501 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | ||
3502 | break; | ||
3503 | |||
3504 | case FIT_SR_DRIVE_BUSY_ERASE: | ||
3505 | pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); | ||
3506 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | ||
3507 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | ||
3508 | break; | ||
3509 | |||
3510 | case FIT_SR_DRIVE_INIT: | ||
3511 | case FIT_SR_DRIVE_ONLINE: | ||
3512 | skd_soft_reset(skdev); | ||
3513 | break; | ||
3514 | |||
3515 | case FIT_SR_DRIVE_BUSY: | ||
3516 | pr_err("(%s): Drive Busy...\n", skd_name(skdev)); | ||
3517 | skdev->state = SKD_DRVR_STATE_BUSY; | ||
3518 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | ||
3519 | break; | ||
3520 | |||
3521 | case FIT_SR_DRIVE_SOFT_RESET: | ||
3522 | pr_err("(%s) drive soft reset in prog\n", | ||
3523 | skd_name(skdev)); | ||
3524 | break; | ||
3525 | |||
3526 | case FIT_SR_DRIVE_FAULT: | ||
3527 | /* Fault state is bad...soft reset won't do it... | ||
3528 | * Hard reset, maybe, but does it work on device? | ||
3529 | * For now, just fault so the system doesn't hang. | ||
3530 | */ | ||
3531 | skd_drive_fault(skdev); | ||
3532 | /*start the queue so we can respond with error to requests */ | ||
3533 | pr_debug("%s:%s:%d starting %s queue\n", | ||
3534 | skdev->name, __func__, __LINE__, skdev->name); | ||
3535 | blk_start_queue(skdev->queue); | ||
3536 | skdev->gendisk_on = -1; | ||
3537 | wake_up_interruptible(&skdev->waitq); | ||
3538 | break; | ||
3539 | |||
3540 | case 0xFF: | ||
3541 | /* Most likely the device isn't there or isn't responding | ||
3542 | * to the BAR1 addresses. */ | ||
3543 | skd_drive_disappeared(skdev); | ||
3544 | /*start the queue so we can respond with error to requests */ | ||
3545 | pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", | ||
3546 | skdev->name, __func__, __LINE__, skdev->name); | ||
3547 | blk_start_queue(skdev->queue); | ||
3548 | skdev->gendisk_on = -1; | ||
3549 | wake_up_interruptible(&skdev->waitq); | ||
3550 | break; | ||
3551 | |||
3552 | default: | ||
3553 | pr_err("(%s) Start: unknown state %x\n", | ||
3554 | skd_name(skdev), skdev->drive_state); | ||
3555 | break; | ||
3556 | } | ||
3557 | |||
3558 | state = SKD_READL(skdev, FIT_CONTROL); | ||
3559 | pr_debug("%s:%s:%d FIT Control Status=0x%x\n", | ||
3560 | skdev->name, __func__, __LINE__, state); | ||
3561 | |||
3562 | state = SKD_READL(skdev, FIT_INT_STATUS_HOST); | ||
3563 | pr_debug("%s:%s:%d Intr Status=0x%x\n", | ||
3564 | skdev->name, __func__, __LINE__, state); | ||
3565 | |||
3566 | state = SKD_READL(skdev, FIT_INT_MASK_HOST); | ||
3567 | pr_debug("%s:%s:%d Intr Mask=0x%x\n", | ||
3568 | skdev->name, __func__, __LINE__, state); | ||
3569 | |||
3570 | state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | ||
3571 | pr_debug("%s:%s:%d Msg from Dev=0x%x\n", | ||
3572 | skdev->name, __func__, __LINE__, state); | ||
3573 | |||
3574 | state = SKD_READL(skdev, FIT_HW_VERSION); | ||
3575 | pr_debug("%s:%s:%d HW version=0x%x\n", | ||
3576 | skdev->name, __func__, __LINE__, state); | ||
3577 | |||
3578 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3579 | } | ||
3580 | |||
3581 | static void skd_stop_device(struct skd_device *skdev) | ||
3582 | { | ||
3583 | unsigned long flags; | ||
3584 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | ||
3585 | u32 dev_state; | ||
3586 | int i; | ||
3587 | |||
3588 | spin_lock_irqsave(&skdev->lock, flags); | ||
3589 | |||
3590 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | ||
3591 | pr_err("(%s): skd_stop_device not online no sync\n", | ||
3592 | skd_name(skdev)); | ||
3593 | goto stop_out; | ||
3594 | } | ||
3595 | |||
3596 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) { | ||
3597 | pr_err("(%s): skd_stop_device no special\n", | ||
3598 | skd_name(skdev)); | ||
3599 | goto stop_out; | ||
3600 | } | ||
3601 | |||
3602 | skdev->state = SKD_DRVR_STATE_SYNCING; | ||
3603 | skdev->sync_done = 0; | ||
3604 | |||
3605 | skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); | ||
3606 | |||
3607 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3608 | |||
3609 | wait_event_interruptible_timeout(skdev->waitq, | ||
3610 | (skdev->sync_done), (10 * HZ)); | ||
3611 | |||
3612 | spin_lock_irqsave(&skdev->lock, flags); | ||
3613 | |||
3614 | switch (skdev->sync_done) { | ||
3615 | case 0: | ||
3616 | pr_err("(%s): skd_stop_device no sync\n", | ||
3617 | skd_name(skdev)); | ||
3618 | break; | ||
3619 | case 1: | ||
3620 | pr_err("(%s): skd_stop_device sync done\n", | ||
3621 | skd_name(skdev)); | ||
3622 | break; | ||
3623 | default: | ||
3624 | pr_err("(%s): skd_stop_device sync error\n", | ||
3625 | skd_name(skdev)); | ||
3626 | } | ||
3627 | |||
3628 | stop_out: | ||
3629 | skdev->state = SKD_DRVR_STATE_STOPPING; | ||
3630 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3631 | |||
3632 | skd_kill_timer(skdev); | ||
3633 | |||
3634 | spin_lock_irqsave(&skdev->lock, flags); | ||
3635 | skd_disable_interrupts(skdev); | ||
3636 | |||
3637 | /* ensure all ints on device are cleared */ | ||
3638 | /* soft reset the device to unload with a clean slate */ | ||
3639 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | ||
3640 | SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); | ||
3641 | |||
3642 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3643 | |||
3644 | /* poll every 100ms, 1 second timeout */ | ||
3645 | for (i = 0; i < 10; i++) { | ||
3646 | dev_state = | ||
3647 | SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; | ||
3648 | if (dev_state == FIT_SR_DRIVE_INIT) | ||
3649 | break; | ||
3650 | set_current_state(TASK_INTERRUPTIBLE); | ||
3651 | schedule_timeout(msecs_to_jiffies(100)); | ||
3652 | } | ||
3653 | |||
3654 | if (dev_state != FIT_SR_DRIVE_INIT) | ||
3655 | pr_err("(%s): skd_stop_device state error 0x%02x\n", | ||
3656 | skd_name(skdev), dev_state); | ||
3657 | } | ||
3658 | |||
3659 | /* assume spinlock is held */ | ||
3660 | static void skd_restart_device(struct skd_device *skdev) | ||
3661 | { | ||
3662 | u32 state; | ||
3663 | |||
3664 | /* ack all ghost interrupts */ | ||
3665 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | ||
3666 | |||
3667 | state = SKD_READL(skdev, FIT_STATUS); | ||
3668 | |||
3669 | pr_debug("%s:%s:%d drive status=0x%x\n", | ||
3670 | skdev->name, __func__, __LINE__, state); | ||
3671 | |||
3672 | state &= FIT_SR_DRIVE_STATE_MASK; | ||
3673 | skdev->drive_state = state; | ||
3674 | skdev->last_mtd = 0; | ||
3675 | |||
3676 | skdev->state = SKD_DRVR_STATE_RESTARTING; | ||
3677 | skdev->timer_countdown = SKD_RESTARTING_TIMO; | ||
3678 | |||
3679 | skd_soft_reset(skdev); | ||
3680 | } | ||
3681 | |||
3682 | /* assume spinlock is held */ | ||
3683 | static int skd_quiesce_dev(struct skd_device *skdev) | ||
3684 | { | ||
3685 | int rc = 0; | ||
3686 | |||
3687 | switch (skdev->state) { | ||
3688 | case SKD_DRVR_STATE_BUSY: | ||
3689 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
3690 | pr_debug("%s:%s:%d stopping %s queue\n", | ||
3691 | skdev->name, __func__, __LINE__, skdev->name); | ||
3692 | blk_stop_queue(skdev->queue); | ||
3693 | break; | ||
3694 | case SKD_DRVR_STATE_ONLINE: | ||
3695 | case SKD_DRVR_STATE_STOPPING: | ||
3696 | case SKD_DRVR_STATE_SYNCING: | ||
3697 | case SKD_DRVR_STATE_PAUSING: | ||
3698 | case SKD_DRVR_STATE_PAUSED: | ||
3699 | case SKD_DRVR_STATE_STARTING: | ||
3700 | case SKD_DRVR_STATE_RESTARTING: | ||
3701 | case SKD_DRVR_STATE_RESUMING: | ||
3702 | default: | ||
3703 | rc = -EINVAL; | ||
3704 | pr_debug("%s:%s:%d state [%d] not implemented\n", | ||
3705 | skdev->name, __func__, __LINE__, skdev->state); | ||
3706 | } | ||
3707 | return rc; | ||
3708 | } | ||
3709 | |||
3710 | /* assume spinlock is held */ | ||
3711 | static int skd_unquiesce_dev(struct skd_device *skdev) | ||
3712 | { | ||
3713 | int prev_driver_state = skdev->state; | ||
3714 | |||
3715 | skd_log_skdev(skdev, "unquiesce"); | ||
3716 | if (skdev->state == SKD_DRVR_STATE_ONLINE) { | ||
3717 | pr_debug("%s:%s:%d **** device already ONLINE\n", | ||
3718 | skdev->name, __func__, __LINE__); | ||
3719 | return 0; | ||
3720 | } | ||
3721 | if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { | ||
3722 | /* | ||
3723 | * If there has been an state change to other than | ||
3724 | * ONLINE, we will rely on controller state change | ||
3725 | * to come back online and restart the queue. | ||
3726 | * The BUSY state means that driver is ready to | ||
3727 | * continue normal processing but waiting for controller | ||
3728 | * to become available. | ||
3729 | */ | ||
3730 | skdev->state = SKD_DRVR_STATE_BUSY; | ||
3731 | pr_debug("%s:%s:%d drive BUSY state\n", | ||
3732 | skdev->name, __func__, __LINE__); | ||
3733 | return 0; | ||
3734 | } | ||
3735 | |||
3736 | /* | ||
3737 | * Drive has just come online, driver is either in startup, | ||
3738 | * paused performing a task, or bust waiting for hardware. | ||
3739 | */ | ||
3740 | switch (skdev->state) { | ||
3741 | case SKD_DRVR_STATE_PAUSED: | ||
3742 | case SKD_DRVR_STATE_BUSY: | ||
3743 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
3744 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
3745 | case SKD_DRVR_STATE_STARTING: | ||
3746 | case SKD_DRVR_STATE_RESTARTING: | ||
3747 | case SKD_DRVR_STATE_FAULT: | ||
3748 | case SKD_DRVR_STATE_IDLE: | ||
3749 | case SKD_DRVR_STATE_LOAD: | ||
3750 | skdev->state = SKD_DRVR_STATE_ONLINE; | ||
3751 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | ||
3752 | skd_name(skdev), | ||
3753 | skd_skdev_state_to_str(prev_driver_state), | ||
3754 | prev_driver_state, skd_skdev_state_to_str(skdev->state), | ||
3755 | skdev->state); | ||
3756 | pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", | ||
3757 | skdev->name, __func__, __LINE__); | ||
3758 | pr_debug("%s:%s:%d starting %s queue\n", | ||
3759 | skdev->name, __func__, __LINE__, skdev->name); | ||
3760 | pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); | ||
3761 | blk_start_queue(skdev->queue); | ||
3762 | skdev->gendisk_on = 1; | ||
3763 | wake_up_interruptible(&skdev->waitq); | ||
3764 | break; | ||
3765 | |||
3766 | case SKD_DRVR_STATE_DISAPPEARED: | ||
3767 | default: | ||
3768 | pr_debug("%s:%s:%d **** driver state %d, not implemented \n", | ||
3769 | skdev->name, __func__, __LINE__, | ||
3770 | skdev->state); | ||
3771 | return -EBUSY; | ||
3772 | } | ||
3773 | return 0; | ||
3774 | } | ||
3775 | |||
3776 | /* | ||
3777 | ***************************************************************************** | ||
3778 | * PCIe MSI/MSI-X INTERRUPT HANDLERS | ||
3779 | ***************************************************************************** | ||
3780 | */ | ||
3781 | |||
3782 | static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) | ||
3783 | { | ||
3784 | struct skd_device *skdev = skd_host_data; | ||
3785 | unsigned long flags; | ||
3786 | |||
3787 | spin_lock_irqsave(&skdev->lock, flags); | ||
3788 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3789 | skdev->name, __func__, __LINE__, | ||
3790 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3791 | pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), | ||
3792 | irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3793 | SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); | ||
3794 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3795 | return IRQ_HANDLED; | ||
3796 | } | ||
3797 | |||
3798 | static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) | ||
3799 | { | ||
3800 | struct skd_device *skdev = skd_host_data; | ||
3801 | unsigned long flags; | ||
3802 | |||
3803 | spin_lock_irqsave(&skdev->lock, flags); | ||
3804 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3805 | skdev->name, __func__, __LINE__, | ||
3806 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3807 | SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); | ||
3808 | skd_isr_fwstate(skdev); | ||
3809 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3810 | return IRQ_HANDLED; | ||
3811 | } | ||
3812 | |||
3813 | static irqreturn_t skd_comp_q(int irq, void *skd_host_data) | ||
3814 | { | ||
3815 | struct skd_device *skdev = skd_host_data; | ||
3816 | unsigned long flags; | ||
3817 | int flush_enqueued = 0; | ||
3818 | int deferred; | ||
3819 | |||
3820 | spin_lock_irqsave(&skdev->lock, flags); | ||
3821 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3822 | skdev->name, __func__, __LINE__, | ||
3823 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3824 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); | ||
3825 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, | ||
3826 | &flush_enqueued); | ||
3827 | if (flush_enqueued) | ||
3828 | skd_request_fn(skdev->queue); | ||
3829 | |||
3830 | if (deferred) | ||
3831 | schedule_work(&skdev->completion_worker); | ||
3832 | else if (!flush_enqueued) | ||
3833 | skd_request_fn(skdev->queue); | ||
3834 | |||
3835 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3836 | |||
3837 | return IRQ_HANDLED; | ||
3838 | } | ||
3839 | |||
3840 | static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) | ||
3841 | { | ||
3842 | struct skd_device *skdev = skd_host_data; | ||
3843 | unsigned long flags; | ||
3844 | |||
3845 | spin_lock_irqsave(&skdev->lock, flags); | ||
3846 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3847 | skdev->name, __func__, __LINE__, | ||
3848 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3849 | SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); | ||
3850 | skd_isr_msg_from_dev(skdev); | ||
3851 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3852 | return IRQ_HANDLED; | ||
3853 | } | ||
3854 | |||
3855 | static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) | ||
3856 | { | ||
3857 | struct skd_device *skdev = skd_host_data; | ||
3858 | unsigned long flags; | ||
3859 | |||
3860 | spin_lock_irqsave(&skdev->lock, flags); | ||
3861 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3862 | skdev->name, __func__, __LINE__, | ||
3863 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3864 | SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); | ||
3865 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3866 | return IRQ_HANDLED; | ||
3867 | } | ||
3868 | |||
3869 | /* | ||
3870 | ***************************************************************************** | ||
3871 | * PCIe MSI/MSI-X SETUP | ||
3872 | ***************************************************************************** | ||
3873 | */ | ||
3874 | |||
3875 | struct skd_msix_entry { | ||
3876 | int have_irq; | ||
3877 | u32 vector; | ||
3878 | u32 entry; | ||
3879 | struct skd_device *rsp; | ||
3880 | char isr_name[30]; | ||
3881 | }; | ||
3882 | |||
3883 | struct skd_init_msix_entry { | ||
3884 | const char *name; | ||
3885 | irq_handler_t handler; | ||
3886 | }; | ||
3887 | |||
3888 | #define SKD_MAX_MSIX_COUNT 13 | ||
3889 | #define SKD_MIN_MSIX_COUNT 7 | ||
3890 | #define SKD_BASE_MSIX_IRQ 4 | ||
3891 | |||
3892 | static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { | ||
3893 | { "(DMA 0)", skd_reserved_isr }, | ||
3894 | { "(DMA 1)", skd_reserved_isr }, | ||
3895 | { "(DMA 2)", skd_reserved_isr }, | ||
3896 | { "(DMA 3)", skd_reserved_isr }, | ||
3897 | { "(State Change)", skd_statec_isr }, | ||
3898 | { "(COMPL_Q)", skd_comp_q }, | ||
3899 | { "(MSG)", skd_msg_isr }, | ||
3900 | { "(Reserved)", skd_reserved_isr }, | ||
3901 | { "(Reserved)", skd_reserved_isr }, | ||
3902 | { "(Queue Full 0)", skd_qfull_isr }, | ||
3903 | { "(Queue Full 1)", skd_qfull_isr }, | ||
3904 | { "(Queue Full 2)", skd_qfull_isr }, | ||
3905 | { "(Queue Full 3)", skd_qfull_isr }, | ||
3906 | }; | ||
3907 | |||
3908 | static void skd_release_msix(struct skd_device *skdev) | ||
3909 | { | ||
3910 | struct skd_msix_entry *qentry; | ||
3911 | int i; | ||
3912 | |||
3913 | if (skdev->msix_entries == NULL) | ||
3914 | return; | ||
3915 | for (i = 0; i < skdev->msix_count; i++) { | ||
3916 | qentry = &skdev->msix_entries[i]; | ||
3917 | skdev = qentry->rsp; | ||
3918 | |||
3919 | if (qentry->have_irq) | ||
3920 | devm_free_irq(&skdev->pdev->dev, | ||
3921 | qentry->vector, qentry->rsp); | ||
3922 | } | ||
3923 | pci_disable_msix(skdev->pdev); | ||
3924 | kfree(skdev->msix_entries); | ||
3925 | skdev->msix_count = 0; | ||
3926 | skdev->msix_entries = NULL; | ||
3927 | } | ||
3928 | |||
3929 | static int skd_acquire_msix(struct skd_device *skdev) | ||
3930 | { | ||
3931 | int i, rc; | ||
3932 | struct pci_dev *pdev; | ||
3933 | struct msix_entry *entries = NULL; | ||
3934 | struct skd_msix_entry *qentry; | ||
3935 | |||
3936 | pdev = skdev->pdev; | ||
3937 | skdev->msix_count = SKD_MAX_MSIX_COUNT; | ||
3938 | entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT, | ||
3939 | GFP_KERNEL); | ||
3940 | if (!entries) | ||
3941 | return -ENOMEM; | ||
3942 | |||
3943 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) | ||
3944 | entries[i].entry = i; | ||
3945 | |||
3946 | rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT); | ||
3947 | if (rc < 0) | ||
3948 | goto msix_out; | ||
3949 | if (rc) { | ||
3950 | if (rc < SKD_MIN_MSIX_COUNT) { | ||
3951 | pr_err("(%s): failed to enable MSI-X %d\n", | ||
3952 | skd_name(skdev), rc); | ||
3953 | goto msix_out; | ||
3954 | } | ||
3955 | pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n", | ||
3956 | skdev->name, __func__, __LINE__, | ||
3957 | pci_name(pdev), skdev->name, rc); | ||
3958 | |||
3959 | skdev->msix_count = rc; | ||
3960 | rc = pci_enable_msix(pdev, entries, skdev->msix_count); | ||
3961 | if (rc) { | ||
3962 | pr_err("(%s): failed to enable MSI-X " | ||
3963 | "support (%d) %d\n", | ||
3964 | skd_name(skdev), skdev->msix_count, rc); | ||
3965 | goto msix_out; | ||
3966 | } | ||
3967 | } | ||
3968 | skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * | ||
3969 | skdev->msix_count, GFP_KERNEL); | ||
3970 | if (!skdev->msix_entries) { | ||
3971 | rc = -ENOMEM; | ||
3972 | skdev->msix_count = 0; | ||
3973 | pr_err("(%s): msix table allocation error\n", | ||
3974 | skd_name(skdev)); | ||
3975 | goto msix_out; | ||
3976 | } | ||
3977 | |||
3978 | qentry = skdev->msix_entries; | ||
3979 | for (i = 0; i < skdev->msix_count; i++) { | ||
3980 | qentry->vector = entries[i].vector; | ||
3981 | qentry->entry = entries[i].entry; | ||
3982 | qentry->rsp = NULL; | ||
3983 | qentry->have_irq = 0; | ||
3984 | pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n", | ||
3985 | skdev->name, __func__, __LINE__, | ||
3986 | pci_name(pdev), skdev->name, | ||
3987 | i, qentry->vector, qentry->entry); | ||
3988 | qentry++; | ||
3989 | } | ||
3990 | |||
3991 | /* Enable MSI-X vectors for the base queue */ | ||
3992 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { | ||
3993 | qentry = &skdev->msix_entries[i]; | ||
3994 | snprintf(qentry->isr_name, sizeof(qentry->isr_name), | ||
3995 | "%s%d-msix %s", DRV_NAME, skdev->devno, | ||
3996 | msix_entries[i].name); | ||
3997 | rc = devm_request_irq(&skdev->pdev->dev, qentry->vector, | ||
3998 | msix_entries[i].handler, 0, | ||
3999 | qentry->isr_name, skdev); | ||
4000 | if (rc) { | ||
4001 | pr_err("(%s): Unable to register(%d) MSI-X " | ||
4002 | "handler %d: %s\n", | ||
4003 | skd_name(skdev), rc, i, qentry->isr_name); | ||
4004 | goto msix_out; | ||
4005 | } else { | ||
4006 | qentry->have_irq = 1; | ||
4007 | qentry->rsp = skdev; | ||
4008 | } | ||
4009 | } | ||
4010 | pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", | ||
4011 | skdev->name, __func__, __LINE__, | ||
4012 | pci_name(pdev), skdev->name, skdev->msix_count); | ||
4013 | return 0; | ||
4014 | |||
4015 | msix_out: | ||
4016 | if (entries) | ||
4017 | kfree(entries); | ||
4018 | skd_release_msix(skdev); | ||
4019 | return rc; | ||
4020 | } | ||
4021 | |||
4022 | static int skd_acquire_irq(struct skd_device *skdev) | ||
4023 | { | ||
4024 | int rc; | ||
4025 | struct pci_dev *pdev; | ||
4026 | |||
4027 | pdev = skdev->pdev; | ||
4028 | skdev->msix_count = 0; | ||
4029 | |||
4030 | RETRY_IRQ_TYPE: | ||
4031 | switch (skdev->irq_type) { | ||
4032 | case SKD_IRQ_MSIX: | ||
4033 | rc = skd_acquire_msix(skdev); | ||
4034 | if (!rc) | ||
4035 | pr_info("(%s): MSI-X %d irqs enabled\n", | ||
4036 | skd_name(skdev), skdev->msix_count); | ||
4037 | else { | ||
4038 | pr_err( | ||
4039 | "(%s): failed to enable MSI-X, re-trying with MSI %d\n", | ||
4040 | skd_name(skdev), rc); | ||
4041 | skdev->irq_type = SKD_IRQ_MSI; | ||
4042 | goto RETRY_IRQ_TYPE; | ||
4043 | } | ||
4044 | break; | ||
4045 | case SKD_IRQ_MSI: | ||
4046 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi", | ||
4047 | DRV_NAME, skdev->devno); | ||
4048 | rc = pci_enable_msi(pdev); | ||
4049 | if (!rc) { | ||
4050 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0, | ||
4051 | skdev->isr_name, skdev); | ||
4052 | if (rc) { | ||
4053 | pci_disable_msi(pdev); | ||
4054 | pr_err( | ||
4055 | "(%s): failed to allocate the MSI interrupt %d\n", | ||
4056 | skd_name(skdev), rc); | ||
4057 | goto RETRY_IRQ_LEGACY; | ||
4058 | } | ||
4059 | pr_info("(%s): MSI irq %d enabled\n", | ||
4060 | skd_name(skdev), pdev->irq); | ||
4061 | } else { | ||
4062 | RETRY_IRQ_LEGACY: | ||
4063 | pr_err( | ||
4064 | "(%s): failed to enable MSI, re-trying with LEGACY %d\n", | ||
4065 | skd_name(skdev), rc); | ||
4066 | skdev->irq_type = SKD_IRQ_LEGACY; | ||
4067 | goto RETRY_IRQ_TYPE; | ||
4068 | } | ||
4069 | break; | ||
4070 | case SKD_IRQ_LEGACY: | ||
4071 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), | ||
4072 | "%s%d-legacy", DRV_NAME, skdev->devno); | ||
4073 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, | ||
4074 | IRQF_SHARED, skdev->isr_name, skdev); | ||
4075 | if (!rc) | ||
4076 | pr_info("(%s): LEGACY irq %d enabled\n", | ||
4077 | skd_name(skdev), pdev->irq); | ||
4078 | else | ||
4079 | pr_err("(%s): request LEGACY irq error %d\n", | ||
4080 | skd_name(skdev), rc); | ||
4081 | break; | ||
4082 | default: | ||
4083 | pr_info("(%s): irq_type %d invalid, re-set to %d\n", | ||
4084 | skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT); | ||
4085 | skdev->irq_type = SKD_IRQ_LEGACY; | ||
4086 | goto RETRY_IRQ_TYPE; | ||
4087 | } | ||
4088 | return rc; | ||
4089 | } | ||
4090 | |||
4091 | static void skd_release_irq(struct skd_device *skdev) | ||
4092 | { | ||
4093 | switch (skdev->irq_type) { | ||
4094 | case SKD_IRQ_MSIX: | ||
4095 | skd_release_msix(skdev); | ||
4096 | break; | ||
4097 | case SKD_IRQ_MSI: | ||
4098 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | ||
4099 | pci_disable_msi(skdev->pdev); | ||
4100 | break; | ||
4101 | case SKD_IRQ_LEGACY: | ||
4102 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | ||
4103 | break; | ||
4104 | default: | ||
4105 | pr_err("(%s): wrong irq type %d!", | ||
4106 | skd_name(skdev), skdev->irq_type); | ||
4107 | break; | ||
4108 | } | ||
4109 | } | ||
4110 | |||
4111 | /* | ||
4112 | ***************************************************************************** | ||
4113 | * CONSTRUCT | ||
4114 | ***************************************************************************** | ||
4115 | */ | ||
4116 | |||
4117 | static int skd_cons_skcomp(struct skd_device *skdev) | ||
4118 | { | ||
4119 | int rc = 0; | ||
4120 | struct fit_completion_entry_v1 *skcomp; | ||
4121 | u32 nbytes; | ||
4122 | |||
4123 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | ||
4124 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | ||
4125 | |||
4126 | pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", | ||
4127 | skdev->name, __func__, __LINE__, | ||
4128 | nbytes, SKD_N_COMPLETION_ENTRY); | ||
4129 | |||
4130 | skcomp = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4131 | &skdev->cq_dma_address); | ||
4132 | |||
4133 | if (skcomp == NULL) { | ||
4134 | rc = -ENOMEM; | ||
4135 | goto err_out; | ||
4136 | } | ||
4137 | |||
4138 | memset(skcomp, 0, nbytes); | ||
4139 | |||
4140 | skdev->skcomp_table = skcomp; | ||
4141 | skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + | ||
4142 | sizeof(*skcomp) * | ||
4143 | SKD_N_COMPLETION_ENTRY); | ||
4144 | |||
4145 | err_out: | ||
4146 | return rc; | ||
4147 | } | ||
4148 | |||
4149 | static int skd_cons_skmsg(struct skd_device *skdev) | ||
4150 | { | ||
4151 | int rc = 0; | ||
4152 | u32 i; | ||
4153 | |||
4154 | pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", | ||
4155 | skdev->name, __func__, __LINE__, | ||
4156 | sizeof(struct skd_fitmsg_context), | ||
4157 | skdev->num_fitmsg_context, | ||
4158 | sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); | ||
4159 | |||
4160 | skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) | ||
4161 | *skdev->num_fitmsg_context, GFP_KERNEL); | ||
4162 | if (skdev->skmsg_table == NULL) { | ||
4163 | rc = -ENOMEM; | ||
4164 | goto err_out; | ||
4165 | } | ||
4166 | |||
4167 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | ||
4168 | struct skd_fitmsg_context *skmsg; | ||
4169 | |||
4170 | skmsg = &skdev->skmsg_table[i]; | ||
4171 | |||
4172 | skmsg->id = i + SKD_ID_FIT_MSG; | ||
4173 | |||
4174 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
4175 | skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, | ||
4176 | SKD_N_FITMSG_BYTES + 64, | ||
4177 | &skmsg->mb_dma_address); | ||
4178 | |||
4179 | if (skmsg->msg_buf == NULL) { | ||
4180 | rc = -ENOMEM; | ||
4181 | goto err_out; | ||
4182 | } | ||
4183 | |||
4184 | skmsg->offset = (u32)((u64)skmsg->msg_buf & | ||
4185 | (~FIT_QCMD_BASE_ADDRESS_MASK)); | ||
4186 | skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; | ||
4187 | skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & | ||
4188 | FIT_QCMD_BASE_ADDRESS_MASK); | ||
4189 | skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; | ||
4190 | skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; | ||
4191 | memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); | ||
4192 | |||
4193 | skmsg->next = &skmsg[1]; | ||
4194 | } | ||
4195 | |||
4196 | /* Free list is in order starting with the 0th entry. */ | ||
4197 | skdev->skmsg_table[i - 1].next = NULL; | ||
4198 | skdev->skmsg_free_list = skdev->skmsg_table; | ||
4199 | |||
4200 | err_out: | ||
4201 | return rc; | ||
4202 | } | ||
4203 | |||
4204 | static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, | ||
4205 | u32 n_sg, | ||
4206 | dma_addr_t *ret_dma_addr) | ||
4207 | { | ||
4208 | struct fit_sg_descriptor *sg_list; | ||
4209 | u32 nbytes; | ||
4210 | |||
4211 | nbytes = sizeof(*sg_list) * n_sg; | ||
4212 | |||
4213 | sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); | ||
4214 | |||
4215 | if (sg_list != NULL) { | ||
4216 | uint64_t dma_address = *ret_dma_addr; | ||
4217 | u32 i; | ||
4218 | |||
4219 | memset(sg_list, 0, nbytes); | ||
4220 | |||
4221 | for (i = 0; i < n_sg - 1; i++) { | ||
4222 | uint64_t ndp_off; | ||
4223 | ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); | ||
4224 | |||
4225 | sg_list[i].next_desc_ptr = dma_address + ndp_off; | ||
4226 | } | ||
4227 | sg_list[i].next_desc_ptr = 0LL; | ||
4228 | } | ||
4229 | |||
4230 | return sg_list; | ||
4231 | } | ||
4232 | |||
4233 | static int skd_cons_skreq(struct skd_device *skdev) | ||
4234 | { | ||
4235 | int rc = 0; | ||
4236 | u32 i; | ||
4237 | |||
4238 | pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", | ||
4239 | skdev->name, __func__, __LINE__, | ||
4240 | sizeof(struct skd_request_context), | ||
4241 | skdev->num_req_context, | ||
4242 | sizeof(struct skd_request_context) * skdev->num_req_context); | ||
4243 | |||
4244 | skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) | ||
4245 | * skdev->num_req_context, GFP_KERNEL); | ||
4246 | if (skdev->skreq_table == NULL) { | ||
4247 | rc = -ENOMEM; | ||
4248 | goto err_out; | ||
4249 | } | ||
4250 | |||
4251 | pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", | ||
4252 | skdev->name, __func__, __LINE__, | ||
4253 | skdev->sgs_per_request, sizeof(struct scatterlist), | ||
4254 | skdev->sgs_per_request * sizeof(struct scatterlist)); | ||
4255 | |||
4256 | for (i = 0; i < skdev->num_req_context; i++) { | ||
4257 | struct skd_request_context *skreq; | ||
4258 | |||
4259 | skreq = &skdev->skreq_table[i]; | ||
4260 | |||
4261 | skreq->id = i + SKD_ID_RW_REQUEST; | ||
4262 | skreq->state = SKD_REQ_STATE_IDLE; | ||
4263 | |||
4264 | skreq->sg = kzalloc(sizeof(struct scatterlist) * | ||
4265 | skdev->sgs_per_request, GFP_KERNEL); | ||
4266 | if (skreq->sg == NULL) { | ||
4267 | rc = -ENOMEM; | ||
4268 | goto err_out; | ||
4269 | } | ||
4270 | sg_init_table(skreq->sg, skdev->sgs_per_request); | ||
4271 | |||
4272 | skreq->sksg_list = skd_cons_sg_list(skdev, | ||
4273 | skdev->sgs_per_request, | ||
4274 | &skreq->sksg_dma_address); | ||
4275 | |||
4276 | if (skreq->sksg_list == NULL) { | ||
4277 | rc = -ENOMEM; | ||
4278 | goto err_out; | ||
4279 | } | ||
4280 | |||
4281 | skreq->next = &skreq[1]; | ||
4282 | } | ||
4283 | |||
4284 | /* Free list is in order starting with the 0th entry. */ | ||
4285 | skdev->skreq_table[i - 1].next = NULL; | ||
4286 | skdev->skreq_free_list = skdev->skreq_table; | ||
4287 | |||
4288 | err_out: | ||
4289 | return rc; | ||
4290 | } | ||
4291 | |||
4292 | static int skd_cons_skspcl(struct skd_device *skdev) | ||
4293 | { | ||
4294 | int rc = 0; | ||
4295 | u32 i, nbytes; | ||
4296 | |||
4297 | pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", | ||
4298 | skdev->name, __func__, __LINE__, | ||
4299 | sizeof(struct skd_special_context), | ||
4300 | skdev->n_special, | ||
4301 | sizeof(struct skd_special_context) * skdev->n_special); | ||
4302 | |||
4303 | skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) | ||
4304 | * skdev->n_special, GFP_KERNEL); | ||
4305 | if (skdev->skspcl_table == NULL) { | ||
4306 | rc = -ENOMEM; | ||
4307 | goto err_out; | ||
4308 | } | ||
4309 | |||
4310 | for (i = 0; i < skdev->n_special; i++) { | ||
4311 | struct skd_special_context *skspcl; | ||
4312 | |||
4313 | skspcl = &skdev->skspcl_table[i]; | ||
4314 | |||
4315 | skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; | ||
4316 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
4317 | |||
4318 | skspcl->req.next = &skspcl[1].req; | ||
4319 | |||
4320 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4321 | |||
4322 | skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4323 | &skspcl->mb_dma_address); | ||
4324 | if (skspcl->msg_buf == NULL) { | ||
4325 | rc = -ENOMEM; | ||
4326 | goto err_out; | ||
4327 | } | ||
4328 | |||
4329 | memset(skspcl->msg_buf, 0, nbytes); | ||
4330 | |||
4331 | skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * | ||
4332 | SKD_N_SG_PER_SPECIAL, GFP_KERNEL); | ||
4333 | if (skspcl->req.sg == NULL) { | ||
4334 | rc = -ENOMEM; | ||
4335 | goto err_out; | ||
4336 | } | ||
4337 | |||
4338 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, | ||
4339 | SKD_N_SG_PER_SPECIAL, | ||
4340 | &skspcl->req. | ||
4341 | sksg_dma_address); | ||
4342 | if (skspcl->req.sksg_list == NULL) { | ||
4343 | rc = -ENOMEM; | ||
4344 | goto err_out; | ||
4345 | } | ||
4346 | } | ||
4347 | |||
4348 | /* Free list is in order starting with the 0th entry. */ | ||
4349 | skdev->skspcl_table[i - 1].req.next = NULL; | ||
4350 | skdev->skspcl_free_list = skdev->skspcl_table; | ||
4351 | |||
4352 | return rc; | ||
4353 | |||
4354 | err_out: | ||
4355 | return rc; | ||
4356 | } | ||
4357 | |||
4358 | static int skd_cons_sksb(struct skd_device *skdev) | ||
4359 | { | ||
4360 | int rc = 0; | ||
4361 | struct skd_special_context *skspcl; | ||
4362 | u32 nbytes; | ||
4363 | |||
4364 | skspcl = &skdev->internal_skspcl; | ||
4365 | |||
4366 | skspcl->req.id = 0 + SKD_ID_INTERNAL; | ||
4367 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
4368 | |||
4369 | nbytes = SKD_N_INTERNAL_BYTES; | ||
4370 | |||
4371 | skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4372 | &skspcl->db_dma_address); | ||
4373 | if (skspcl->data_buf == NULL) { | ||
4374 | rc = -ENOMEM; | ||
4375 | goto err_out; | ||
4376 | } | ||
4377 | |||
4378 | memset(skspcl->data_buf, 0, nbytes); | ||
4379 | |||
4380 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4381 | skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4382 | &skspcl->mb_dma_address); | ||
4383 | if (skspcl->msg_buf == NULL) { | ||
4384 | rc = -ENOMEM; | ||
4385 | goto err_out; | ||
4386 | } | ||
4387 | |||
4388 | memset(skspcl->msg_buf, 0, nbytes); | ||
4389 | |||
4390 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, | ||
4391 | &skspcl->req.sksg_dma_address); | ||
4392 | if (skspcl->req.sksg_list == NULL) { | ||
4393 | rc = -ENOMEM; | ||
4394 | goto err_out; | ||
4395 | } | ||
4396 | |||
4397 | if (!skd_format_internal_skspcl(skdev)) { | ||
4398 | rc = -EINVAL; | ||
4399 | goto err_out; | ||
4400 | } | ||
4401 | |||
4402 | err_out: | ||
4403 | return rc; | ||
4404 | } | ||
4405 | |||
4406 | static int skd_cons_disk(struct skd_device *skdev) | ||
4407 | { | ||
4408 | int rc = 0; | ||
4409 | struct gendisk *disk; | ||
4410 | struct request_queue *q; | ||
4411 | unsigned long flags; | ||
4412 | |||
4413 | disk = alloc_disk(SKD_MINORS_PER_DEVICE); | ||
4414 | if (!disk) { | ||
4415 | rc = -ENOMEM; | ||
4416 | goto err_out; | ||
4417 | } | ||
4418 | |||
4419 | skdev->disk = disk; | ||
4420 | sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); | ||
4421 | |||
4422 | disk->major = skdev->major; | ||
4423 | disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; | ||
4424 | disk->fops = &skd_blockdev_ops; | ||
4425 | disk->private_data = skdev; | ||
4426 | |||
4427 | q = blk_init_queue(skd_request_fn, &skdev->lock); | ||
4428 | if (!q) { | ||
4429 | rc = -ENOMEM; | ||
4430 | goto err_out; | ||
4431 | } | ||
4432 | |||
4433 | skdev->queue = q; | ||
4434 | disk->queue = q; | ||
4435 | q->queuedata = skdev; | ||
4436 | |||
4437 | blk_queue_flush(q, REQ_FLUSH | REQ_FUA); | ||
4438 | blk_queue_max_segments(q, skdev->sgs_per_request); | ||
4439 | blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); | ||
4440 | |||
4441 | /* set sysfs ptimal_io_size to 8K */ | ||
4442 | blk_queue_io_opt(q, 8192); | ||
4443 | |||
4444 | /* DISCARD Flag initialization. */ | ||
4445 | q->limits.discard_granularity = 8192; | ||
4446 | q->limits.discard_alignment = 0; | ||
4447 | q->limits.max_discard_sectors = UINT_MAX >> 9; | ||
4448 | q->limits.discard_zeroes_data = 1; | ||
4449 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
4450 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | ||
4451 | |||
4452 | spin_lock_irqsave(&skdev->lock, flags); | ||
4453 | pr_debug("%s:%s:%d stopping %s queue\n", | ||
4454 | skdev->name, __func__, __LINE__, skdev->name); | ||
4455 | blk_stop_queue(skdev->queue); | ||
4456 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
4457 | |||
4458 | err_out: | ||
4459 | return rc; | ||
4460 | } | ||
4461 | |||
4462 | #define SKD_N_DEV_TABLE 16u | ||
4463 | static u32 skd_next_devno; | ||
4464 | |||
4465 | static struct skd_device *skd_construct(struct pci_dev *pdev) | ||
4466 | { | ||
4467 | struct skd_device *skdev; | ||
4468 | int blk_major = skd_major; | ||
4469 | int rc; | ||
4470 | |||
4471 | skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); | ||
4472 | |||
4473 | if (!skdev) { | ||
4474 | pr_err(PFX "(%s): memory alloc failure\n", | ||
4475 | pci_name(pdev)); | ||
4476 | return NULL; | ||
4477 | } | ||
4478 | |||
4479 | skdev->state = SKD_DRVR_STATE_LOAD; | ||
4480 | skdev->pdev = pdev; | ||
4481 | skdev->devno = skd_next_devno++; | ||
4482 | skdev->major = blk_major; | ||
4483 | skdev->irq_type = skd_isr_type; | ||
4484 | sprintf(skdev->name, DRV_NAME "%d", skdev->devno); | ||
4485 | skdev->dev_max_queue_depth = 0; | ||
4486 | |||
4487 | skdev->num_req_context = skd_max_queue_depth; | ||
4488 | skdev->num_fitmsg_context = skd_max_queue_depth; | ||
4489 | skdev->n_special = skd_max_pass_thru; | ||
4490 | skdev->cur_max_queue_depth = 1; | ||
4491 | skdev->queue_low_water_mark = 1; | ||
4492 | skdev->proto_ver = 99; | ||
4493 | skdev->sgs_per_request = skd_sgs_per_request; | ||
4494 | skdev->dbg_level = skd_dbg_level; | ||
4495 | |||
4496 | atomic_set(&skdev->device_count, 0); | ||
4497 | |||
4498 | spin_lock_init(&skdev->lock); | ||
4499 | |||
4500 | INIT_WORK(&skdev->completion_worker, skd_completion_worker); | ||
4501 | |||
4502 | pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); | ||
4503 | rc = skd_cons_skcomp(skdev); | ||
4504 | if (rc < 0) | ||
4505 | goto err_out; | ||
4506 | |||
4507 | pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); | ||
4508 | rc = skd_cons_skmsg(skdev); | ||
4509 | if (rc < 0) | ||
4510 | goto err_out; | ||
4511 | |||
4512 | pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); | ||
4513 | rc = skd_cons_skreq(skdev); | ||
4514 | if (rc < 0) | ||
4515 | goto err_out; | ||
4516 | |||
4517 | pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); | ||
4518 | rc = skd_cons_skspcl(skdev); | ||
4519 | if (rc < 0) | ||
4520 | goto err_out; | ||
4521 | |||
4522 | pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); | ||
4523 | rc = skd_cons_sksb(skdev); | ||
4524 | if (rc < 0) | ||
4525 | goto err_out; | ||
4526 | |||
4527 | pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); | ||
4528 | rc = skd_cons_disk(skdev); | ||
4529 | if (rc < 0) | ||
4530 | goto err_out; | ||
4531 | |||
4532 | pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); | ||
4533 | return skdev; | ||
4534 | |||
4535 | err_out: | ||
4536 | pr_debug("%s:%s:%d construct failed\n", | ||
4537 | skdev->name, __func__, __LINE__); | ||
4538 | skd_destruct(skdev); | ||
4539 | return NULL; | ||
4540 | } | ||
4541 | |||
4542 | /* | ||
4543 | ***************************************************************************** | ||
4544 | * DESTRUCT (FREE) | ||
4545 | ***************************************************************************** | ||
4546 | */ | ||
4547 | |||
4548 | static void skd_free_skcomp(struct skd_device *skdev) | ||
4549 | { | ||
4550 | if (skdev->skcomp_table != NULL) { | ||
4551 | u32 nbytes; | ||
4552 | |||
4553 | nbytes = sizeof(skdev->skcomp_table[0]) * | ||
4554 | SKD_N_COMPLETION_ENTRY; | ||
4555 | pci_free_consistent(skdev->pdev, nbytes, | ||
4556 | skdev->skcomp_table, skdev->cq_dma_address); | ||
4557 | } | ||
4558 | |||
4559 | skdev->skcomp_table = NULL; | ||
4560 | skdev->cq_dma_address = 0; | ||
4561 | } | ||
4562 | |||
4563 | static void skd_free_skmsg(struct skd_device *skdev) | ||
4564 | { | ||
4565 | u32 i; | ||
4566 | |||
4567 | if (skdev->skmsg_table == NULL) | ||
4568 | return; | ||
4569 | |||
4570 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | ||
4571 | struct skd_fitmsg_context *skmsg; | ||
4572 | |||
4573 | skmsg = &skdev->skmsg_table[i]; | ||
4574 | |||
4575 | if (skmsg->msg_buf != NULL) { | ||
4576 | skmsg->msg_buf += skmsg->offset; | ||
4577 | skmsg->mb_dma_address += skmsg->offset; | ||
4578 | pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, | ||
4579 | skmsg->msg_buf, | ||
4580 | skmsg->mb_dma_address); | ||
4581 | } | ||
4582 | skmsg->msg_buf = NULL; | ||
4583 | skmsg->mb_dma_address = 0; | ||
4584 | } | ||
4585 | |||
4586 | kfree(skdev->skmsg_table); | ||
4587 | skdev->skmsg_table = NULL; | ||
4588 | } | ||
4589 | |||
4590 | static void skd_free_sg_list(struct skd_device *skdev, | ||
4591 | struct fit_sg_descriptor *sg_list, | ||
4592 | u32 n_sg, dma_addr_t dma_addr) | ||
4593 | { | ||
4594 | if (sg_list != NULL) { | ||
4595 | u32 nbytes; | ||
4596 | |||
4597 | nbytes = sizeof(*sg_list) * n_sg; | ||
4598 | |||
4599 | pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); | ||
4600 | } | ||
4601 | } | ||
4602 | |||
4603 | static void skd_free_skreq(struct skd_device *skdev) | ||
4604 | { | ||
4605 | u32 i; | ||
4606 | |||
4607 | if (skdev->skreq_table == NULL) | ||
4608 | return; | ||
4609 | |||
4610 | for (i = 0; i < skdev->num_req_context; i++) { | ||
4611 | struct skd_request_context *skreq; | ||
4612 | |||
4613 | skreq = &skdev->skreq_table[i]; | ||
4614 | |||
4615 | skd_free_sg_list(skdev, skreq->sksg_list, | ||
4616 | skdev->sgs_per_request, | ||
4617 | skreq->sksg_dma_address); | ||
4618 | |||
4619 | skreq->sksg_list = NULL; | ||
4620 | skreq->sksg_dma_address = 0; | ||
4621 | |||
4622 | kfree(skreq->sg); | ||
4623 | } | ||
4624 | |||
4625 | kfree(skdev->skreq_table); | ||
4626 | skdev->skreq_table = NULL; | ||
4627 | } | ||
4628 | |||
4629 | static void skd_free_skspcl(struct skd_device *skdev) | ||
4630 | { | ||
4631 | u32 i; | ||
4632 | u32 nbytes; | ||
4633 | |||
4634 | if (skdev->skspcl_table == NULL) | ||
4635 | return; | ||
4636 | |||
4637 | for (i = 0; i < skdev->n_special; i++) { | ||
4638 | struct skd_special_context *skspcl; | ||
4639 | |||
4640 | skspcl = &skdev->skspcl_table[i]; | ||
4641 | |||
4642 | if (skspcl->msg_buf != NULL) { | ||
4643 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4644 | pci_free_consistent(skdev->pdev, nbytes, | ||
4645 | skspcl->msg_buf, | ||
4646 | skspcl->mb_dma_address); | ||
4647 | } | ||
4648 | |||
4649 | skspcl->msg_buf = NULL; | ||
4650 | skspcl->mb_dma_address = 0; | ||
4651 | |||
4652 | skd_free_sg_list(skdev, skspcl->req.sksg_list, | ||
4653 | SKD_N_SG_PER_SPECIAL, | ||
4654 | skspcl->req.sksg_dma_address); | ||
4655 | |||
4656 | skspcl->req.sksg_list = NULL; | ||
4657 | skspcl->req.sksg_dma_address = 0; | ||
4658 | |||
4659 | kfree(skspcl->req.sg); | ||
4660 | } | ||
4661 | |||
4662 | kfree(skdev->skspcl_table); | ||
4663 | skdev->skspcl_table = NULL; | ||
4664 | } | ||
4665 | |||
4666 | static void skd_free_sksb(struct skd_device *skdev) | ||
4667 | { | ||
4668 | struct skd_special_context *skspcl; | ||
4669 | u32 nbytes; | ||
4670 | |||
4671 | skspcl = &skdev->internal_skspcl; | ||
4672 | |||
4673 | if (skspcl->data_buf != NULL) { | ||
4674 | nbytes = SKD_N_INTERNAL_BYTES; | ||
4675 | |||
4676 | pci_free_consistent(skdev->pdev, nbytes, | ||
4677 | skspcl->data_buf, skspcl->db_dma_address); | ||
4678 | } | ||
4679 | |||
4680 | skspcl->data_buf = NULL; | ||
4681 | skspcl->db_dma_address = 0; | ||
4682 | |||
4683 | if (skspcl->msg_buf != NULL) { | ||
4684 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4685 | pci_free_consistent(skdev->pdev, nbytes, | ||
4686 | skspcl->msg_buf, skspcl->mb_dma_address); | ||
4687 | } | ||
4688 | |||
4689 | skspcl->msg_buf = NULL; | ||
4690 | skspcl->mb_dma_address = 0; | ||
4691 | |||
4692 | skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, | ||
4693 | skspcl->req.sksg_dma_address); | ||
4694 | |||
4695 | skspcl->req.sksg_list = NULL; | ||
4696 | skspcl->req.sksg_dma_address = 0; | ||
4697 | } | ||
4698 | |||
4699 | static void skd_free_disk(struct skd_device *skdev) | ||
4700 | { | ||
4701 | struct gendisk *disk = skdev->disk; | ||
4702 | |||
4703 | if (disk != NULL) { | ||
4704 | struct request_queue *q = disk->queue; | ||
4705 | |||
4706 | if (disk->flags & GENHD_FL_UP) | ||
4707 | del_gendisk(disk); | ||
4708 | if (q) | ||
4709 | blk_cleanup_queue(q); | ||
4710 | put_disk(disk); | ||
4711 | } | ||
4712 | skdev->disk = NULL; | ||
4713 | } | ||
4714 | |||
4715 | static void skd_destruct(struct skd_device *skdev) | ||
4716 | { | ||
4717 | if (skdev == NULL) | ||
4718 | return; | ||
4719 | |||
4720 | |||
4721 | pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); | ||
4722 | skd_free_disk(skdev); | ||
4723 | |||
4724 | pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); | ||
4725 | skd_free_sksb(skdev); | ||
4726 | |||
4727 | pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); | ||
4728 | skd_free_skspcl(skdev); | ||
4729 | |||
4730 | pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); | ||
4731 | skd_free_skreq(skdev); | ||
4732 | |||
4733 | pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); | ||
4734 | skd_free_skmsg(skdev); | ||
4735 | |||
4736 | pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); | ||
4737 | skd_free_skcomp(skdev); | ||
4738 | |||
4739 | pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); | ||
4740 | kfree(skdev); | ||
4741 | } | ||
4742 | |||
4743 | /* | ||
4744 | ***************************************************************************** | ||
4745 | * BLOCK DEVICE (BDEV) GLUE | ||
4746 | ***************************************************************************** | ||
4747 | */ | ||
4748 | |||
4749 | static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
4750 | { | ||
4751 | struct skd_device *skdev; | ||
4752 | u64 capacity; | ||
4753 | |||
4754 | skdev = bdev->bd_disk->private_data; | ||
4755 | |||
4756 | pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", | ||
4757 | skdev->name, __func__, __LINE__, | ||
4758 | bdev->bd_disk->disk_name, current->comm); | ||
4759 | |||
4760 | if (skdev->read_cap_is_valid) { | ||
4761 | capacity = get_capacity(skdev->disk); | ||
4762 | geo->heads = 64; | ||
4763 | geo->sectors = 255; | ||
4764 | geo->cylinders = (capacity) / (255 * 64); | ||
4765 | |||
4766 | return 0; | ||
4767 | } | ||
4768 | return -EIO; | ||
4769 | } | ||
4770 | |||
4771 | static int skd_bdev_attach(struct skd_device *skdev) | ||
4772 | { | ||
4773 | pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); | ||
4774 | add_disk(skdev->disk); | ||
4775 | return 0; | ||
4776 | } | ||
4777 | |||
4778 | static const struct block_device_operations skd_blockdev_ops = { | ||
4779 | .owner = THIS_MODULE, | ||
4780 | .ioctl = skd_bdev_ioctl, | ||
4781 | .getgeo = skd_bdev_getgeo, | ||
4782 | }; | ||
4783 | |||
4784 | |||
4785 | /* | ||
4786 | ***************************************************************************** | ||
4787 | * PCIe DRIVER GLUE | ||
4788 | ***************************************************************************** | ||
4789 | */ | ||
4790 | |||
4791 | static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = { | ||
4792 | { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, | ||
4793 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | ||
4794 | { 0 } /* terminate list */ | ||
4795 | }; | ||
4796 | |||
4797 | MODULE_DEVICE_TABLE(pci, skd_pci_tbl); | ||
4798 | |||
4799 | static char *skd_pci_info(struct skd_device *skdev, char *str) | ||
4800 | { | ||
4801 | int pcie_reg; | ||
4802 | |||
4803 | strcpy(str, "PCIe ("); | ||
4804 | pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); | ||
4805 | |||
4806 | if (pcie_reg) { | ||
4807 | |||
4808 | char lwstr[6]; | ||
4809 | uint16_t pcie_lstat, lspeed, lwidth; | ||
4810 | |||
4811 | pcie_reg += 0x12; | ||
4812 | pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); | ||
4813 | lspeed = pcie_lstat & (0xF); | ||
4814 | lwidth = (pcie_lstat & 0x3F0) >> 4; | ||
4815 | |||
4816 | if (lspeed == 1) | ||
4817 | strcat(str, "2.5GT/s "); | ||
4818 | else if (lspeed == 2) | ||
4819 | strcat(str, "5.0GT/s "); | ||
4820 | else | ||
4821 | strcat(str, "<unknown> "); | ||
4822 | snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); | ||
4823 | strcat(str, lwstr); | ||
4824 | } | ||
4825 | return str; | ||
4826 | } | ||
4827 | |||
4828 | static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
4829 | { | ||
4830 | int i; | ||
4831 | int rc = 0; | ||
4832 | char pci_str[32]; | ||
4833 | struct skd_device *skdev; | ||
4834 | |||
4835 | pr_info("STEC s1120 Driver(%s) version %s-b%s\n", | ||
4836 | DRV_NAME, DRV_VERSION, DRV_BUILD_ID); | ||
4837 | pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", | ||
4838 | pci_name(pdev), pdev->vendor, pdev->device); | ||
4839 | |||
4840 | rc = pci_enable_device(pdev); | ||
4841 | if (rc) | ||
4842 | return rc; | ||
4843 | rc = pci_request_regions(pdev, DRV_NAME); | ||
4844 | if (rc) | ||
4845 | goto err_out; | ||
4846 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
4847 | if (!rc) { | ||
4848 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
4849 | |||
4850 | pr_err("(%s): consistent DMA mask error %d\n", | ||
4851 | pci_name(pdev), rc); | ||
4852 | } | ||
4853 | } else { | ||
4854 | (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); | ||
4855 | if (rc) { | ||
4856 | |||
4857 | pr_err("(%s): DMA mask error %d\n", | ||
4858 | pci_name(pdev), rc); | ||
4859 | goto err_out_regions; | ||
4860 | } | ||
4861 | } | ||
4862 | |||
4863 | if (!skd_major) { | ||
4864 | rc = register_blkdev(0, DRV_NAME); | ||
4865 | if (rc < 0) | ||
4866 | goto err_out_regions; | ||
4867 | BUG_ON(!rc); | ||
4868 | skd_major = rc; | ||
4869 | } | ||
4870 | |||
4871 | skdev = skd_construct(pdev); | ||
4872 | if (skdev == NULL) { | ||
4873 | rc = -ENOMEM; | ||
4874 | goto err_out_regions; | ||
4875 | } | ||
4876 | |||
4877 | skd_pci_info(skdev, pci_str); | ||
4878 | pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); | ||
4879 | |||
4880 | pci_set_master(pdev); | ||
4881 | rc = pci_enable_pcie_error_reporting(pdev); | ||
4882 | if (rc) { | ||
4883 | pr_err( | ||
4884 | "(%s): bad enable of PCIe error reporting rc=%d\n", | ||
4885 | skd_name(skdev), rc); | ||
4886 | skdev->pcie_error_reporting_is_enabled = 0; | ||
4887 | } else | ||
4888 | skdev->pcie_error_reporting_is_enabled = 1; | ||
4889 | |||
4890 | |||
4891 | pci_set_drvdata(pdev, skdev); | ||
4892 | |||
4893 | skdev->disk->driverfs_dev = &pdev->dev; | ||
4894 | |||
4895 | for (i = 0; i < SKD_MAX_BARS; i++) { | ||
4896 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | ||
4897 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | ||
4898 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | ||
4899 | skdev->mem_size[i]); | ||
4900 | if (!skdev->mem_map[i]) { | ||
4901 | pr_err("(%s): Unable to map adapter memory!\n", | ||
4902 | skd_name(skdev)); | ||
4903 | rc = -ENODEV; | ||
4904 | goto err_out_iounmap; | ||
4905 | } | ||
4906 | pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", | ||
4907 | skdev->name, __func__, __LINE__, | ||
4908 | skdev->mem_map[i], | ||
4909 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | ||
4910 | } | ||
4911 | |||
4912 | rc = skd_acquire_irq(skdev); | ||
4913 | if (rc) { | ||
4914 | pr_err("(%s): interrupt resource error %d\n", | ||
4915 | skd_name(skdev), rc); | ||
4916 | goto err_out_iounmap; | ||
4917 | } | ||
4918 | |||
4919 | rc = skd_start_timer(skdev); | ||
4920 | if (rc) | ||
4921 | goto err_out_timer; | ||
4922 | |||
4923 | init_waitqueue_head(&skdev->waitq); | ||
4924 | |||
4925 | skd_start_device(skdev); | ||
4926 | |||
4927 | rc = wait_event_interruptible_timeout(skdev->waitq, | ||
4928 | (skdev->gendisk_on), | ||
4929 | (SKD_START_WAIT_SECONDS * HZ)); | ||
4930 | if (skdev->gendisk_on > 0) { | ||
4931 | /* device came on-line after reset */ | ||
4932 | skd_bdev_attach(skdev); | ||
4933 | rc = 0; | ||
4934 | } else { | ||
4935 | /* we timed out, something is wrong with the device, | ||
4936 | don't add the disk structure */ | ||
4937 | pr_err( | ||
4938 | "(%s): error: waiting for s1120 timed out %d!\n", | ||
4939 | skd_name(skdev), rc); | ||
4940 | /* in case of no error; we timeout with ENXIO */ | ||
4941 | if (!rc) | ||
4942 | rc = -ENXIO; | ||
4943 | goto err_out_timer; | ||
4944 | } | ||
4945 | |||
4946 | |||
4947 | #ifdef SKD_VMK_POLL_HANDLER | ||
4948 | if (skdev->irq_type == SKD_IRQ_MSIX) { | ||
4949 | /* MSIX completion handler is being used for coredump */ | ||
4950 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | ||
4951 | skdev->msix_entries[5].vector, | ||
4952 | skd_comp_q, skdev); | ||
4953 | } else { | ||
4954 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | ||
4955 | skdev->pdev->irq, skd_isr, | ||
4956 | skdev); | ||
4957 | } | ||
4958 | #endif /* SKD_VMK_POLL_HANDLER */ | ||
4959 | |||
4960 | return rc; | ||
4961 | |||
4962 | err_out_timer: | ||
4963 | skd_stop_device(skdev); | ||
4964 | skd_release_irq(skdev); | ||
4965 | |||
4966 | err_out_iounmap: | ||
4967 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
4968 | if (skdev->mem_map[i]) | ||
4969 | iounmap(skdev->mem_map[i]); | ||
4970 | |||
4971 | if (skdev->pcie_error_reporting_is_enabled) | ||
4972 | pci_disable_pcie_error_reporting(pdev); | ||
4973 | |||
4974 | skd_destruct(skdev); | ||
4975 | |||
4976 | err_out_regions: | ||
4977 | pci_release_regions(pdev); | ||
4978 | |||
4979 | err_out: | ||
4980 | pci_disable_device(pdev); | ||
4981 | pci_set_drvdata(pdev, NULL); | ||
4982 | return rc; | ||
4983 | } | ||
4984 | |||
4985 | static void skd_pci_remove(struct pci_dev *pdev) | ||
4986 | { | ||
4987 | int i; | ||
4988 | struct skd_device *skdev; | ||
4989 | |||
4990 | skdev = pci_get_drvdata(pdev); | ||
4991 | if (!skdev) { | ||
4992 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
4993 | return; | ||
4994 | } | ||
4995 | skd_stop_device(skdev); | ||
4996 | skd_release_irq(skdev); | ||
4997 | |||
4998 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
4999 | if (skdev->mem_map[i]) | ||
5000 | iounmap((u32 *)skdev->mem_map[i]); | ||
5001 | |||
5002 | if (skdev->pcie_error_reporting_is_enabled) | ||
5003 | pci_disable_pcie_error_reporting(pdev); | ||
5004 | |||
5005 | skd_destruct(skdev); | ||
5006 | |||
5007 | pci_release_regions(pdev); | ||
5008 | pci_disable_device(pdev); | ||
5009 | pci_set_drvdata(pdev, NULL); | ||
5010 | |||
5011 | return; | ||
5012 | } | ||
5013 | |||
5014 | static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
5015 | { | ||
5016 | int i; | ||
5017 | struct skd_device *skdev; | ||
5018 | |||
5019 | skdev = pci_get_drvdata(pdev); | ||
5020 | if (!skdev) { | ||
5021 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
5022 | return -EIO; | ||
5023 | } | ||
5024 | |||
5025 | skd_stop_device(skdev); | ||
5026 | |||
5027 | skd_release_irq(skdev); | ||
5028 | |||
5029 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
5030 | if (skdev->mem_map[i]) | ||
5031 | iounmap((u32 *)skdev->mem_map[i]); | ||
5032 | |||
5033 | if (skdev->pcie_error_reporting_is_enabled) | ||
5034 | pci_disable_pcie_error_reporting(pdev); | ||
5035 | |||
5036 | pci_release_regions(pdev); | ||
5037 | pci_save_state(pdev); | ||
5038 | pci_disable_device(pdev); | ||
5039 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
5040 | return 0; | ||
5041 | } | ||
5042 | |||
5043 | static int skd_pci_resume(struct pci_dev *pdev) | ||
5044 | { | ||
5045 | int i; | ||
5046 | int rc = 0; | ||
5047 | struct skd_device *skdev; | ||
5048 | |||
5049 | skdev = pci_get_drvdata(pdev); | ||
5050 | if (!skdev) { | ||
5051 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
5052 | return -1; | ||
5053 | } | ||
5054 | |||
5055 | pci_set_power_state(pdev, PCI_D0); | ||
5056 | pci_enable_wake(pdev, PCI_D0, 0); | ||
5057 | pci_restore_state(pdev); | ||
5058 | |||
5059 | rc = pci_enable_device(pdev); | ||
5060 | if (rc) | ||
5061 | return rc; | ||
5062 | rc = pci_request_regions(pdev, DRV_NAME); | ||
5063 | if (rc) | ||
5064 | goto err_out; | ||
5065 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
5066 | if (!rc) { | ||
5067 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
5068 | |||
5069 | pr_err("(%s): consistent DMA mask error %d\n", | ||
5070 | pci_name(pdev), rc); | ||
5071 | } | ||
5072 | } else { | ||
5073 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
5074 | if (rc) { | ||
5075 | |||
5076 | pr_err("(%s): DMA mask error %d\n", | ||
5077 | pci_name(pdev), rc); | ||
5078 | goto err_out_regions; | ||
5079 | } | ||
5080 | } | ||
5081 | |||
5082 | pci_set_master(pdev); | ||
5083 | rc = pci_enable_pcie_error_reporting(pdev); | ||
5084 | if (rc) { | ||
5085 | pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", | ||
5086 | skdev->name, rc); | ||
5087 | skdev->pcie_error_reporting_is_enabled = 0; | ||
5088 | } else | ||
5089 | skdev->pcie_error_reporting_is_enabled = 1; | ||
5090 | |||
5091 | for (i = 0; i < SKD_MAX_BARS; i++) { | ||
5092 | |||
5093 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | ||
5094 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | ||
5095 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | ||
5096 | skdev->mem_size[i]); | ||
5097 | if (!skdev->mem_map[i]) { | ||
5098 | pr_err("(%s): Unable to map adapter memory!\n", | ||
5099 | skd_name(skdev)); | ||
5100 | rc = -ENODEV; | ||
5101 | goto err_out_iounmap; | ||
5102 | } | ||
5103 | pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", | ||
5104 | skdev->name, __func__, __LINE__, | ||
5105 | skdev->mem_map[i], | ||
5106 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | ||
5107 | } | ||
5108 | rc = skd_acquire_irq(skdev); | ||
5109 | if (rc) { | ||
5110 | |||
5111 | pr_err("(%s): interrupt resource error %d\n", | ||
5112 | pci_name(pdev), rc); | ||
5113 | goto err_out_iounmap; | ||
5114 | } | ||
5115 | |||
5116 | rc = skd_start_timer(skdev); | ||
5117 | if (rc) | ||
5118 | goto err_out_timer; | ||
5119 | |||
5120 | init_waitqueue_head(&skdev->waitq); | ||
5121 | |||
5122 | skd_start_device(skdev); | ||
5123 | |||
5124 | return rc; | ||
5125 | |||
5126 | err_out_timer: | ||
5127 | skd_stop_device(skdev); | ||
5128 | skd_release_irq(skdev); | ||
5129 | |||
5130 | err_out_iounmap: | ||
5131 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
5132 | if (skdev->mem_map[i]) | ||
5133 | iounmap(skdev->mem_map[i]); | ||
5134 | |||
5135 | if (skdev->pcie_error_reporting_is_enabled) | ||
5136 | pci_disable_pcie_error_reporting(pdev); | ||
5137 | |||
5138 | err_out_regions: | ||
5139 | pci_release_regions(pdev); | ||
5140 | |||
5141 | err_out: | ||
5142 | pci_disable_device(pdev); | ||
5143 | return rc; | ||
5144 | } | ||
5145 | |||
5146 | static void skd_pci_shutdown(struct pci_dev *pdev) | ||
5147 | { | ||
5148 | struct skd_device *skdev; | ||
5149 | |||
5150 | pr_err("skd_pci_shutdown called\n"); | ||
5151 | |||
5152 | skdev = pci_get_drvdata(pdev); | ||
5153 | if (!skdev) { | ||
5154 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
5155 | return; | ||
5156 | } | ||
5157 | |||
5158 | pr_err("%s: calling stop\n", skd_name(skdev)); | ||
5159 | skd_stop_device(skdev); | ||
5160 | } | ||
5161 | |||
5162 | static struct pci_driver skd_driver = { | ||
5163 | .name = DRV_NAME, | ||
5164 | .id_table = skd_pci_tbl, | ||
5165 | .probe = skd_pci_probe, | ||
5166 | .remove = skd_pci_remove, | ||
5167 | .suspend = skd_pci_suspend, | ||
5168 | .resume = skd_pci_resume, | ||
5169 | .shutdown = skd_pci_shutdown, | ||
5170 | }; | ||
5171 | |||
5172 | /* | ||
5173 | ***************************************************************************** | ||
5174 | * LOGGING SUPPORT | ||
5175 | ***************************************************************************** | ||
5176 | */ | ||
5177 | |||
5178 | static const char *skd_name(struct skd_device *skdev) | ||
5179 | { | ||
5180 | memset(skdev->id_str, 0, sizeof(skdev->id_str)); | ||
5181 | |||
5182 | if (skdev->inquiry_is_valid) | ||
5183 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", | ||
5184 | skdev->name, skdev->inq_serial_num, | ||
5185 | pci_name(skdev->pdev)); | ||
5186 | else | ||
5187 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", | ||
5188 | skdev->name, pci_name(skdev->pdev)); | ||
5189 | |||
5190 | return skdev->id_str; | ||
5191 | } | ||
5192 | |||
5193 | const char *skd_drive_state_to_str(int state) | ||
5194 | { | ||
5195 | switch (state) { | ||
5196 | case FIT_SR_DRIVE_OFFLINE: | ||
5197 | return "OFFLINE"; | ||
5198 | case FIT_SR_DRIVE_INIT: | ||
5199 | return "INIT"; | ||
5200 | case FIT_SR_DRIVE_ONLINE: | ||
5201 | return "ONLINE"; | ||
5202 | case FIT_SR_DRIVE_BUSY: | ||
5203 | return "BUSY"; | ||
5204 | case FIT_SR_DRIVE_FAULT: | ||
5205 | return "FAULT"; | ||
5206 | case FIT_SR_DRIVE_DEGRADED: | ||
5207 | return "DEGRADED"; | ||
5208 | case FIT_SR_PCIE_LINK_DOWN: | ||
5209 | return "INK_DOWN"; | ||
5210 | case FIT_SR_DRIVE_SOFT_RESET: | ||
5211 | return "SOFT_RESET"; | ||
5212 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | ||
5213 | return "NEED_FW"; | ||
5214 | case FIT_SR_DRIVE_INIT_FAULT: | ||
5215 | return "INIT_FAULT"; | ||
5216 | case FIT_SR_DRIVE_BUSY_SANITIZE: | ||
5217 | return "BUSY_SANITIZE"; | ||
5218 | case FIT_SR_DRIVE_BUSY_ERASE: | ||
5219 | return "BUSY_ERASE"; | ||
5220 | case FIT_SR_DRIVE_FW_BOOTING: | ||
5221 | return "FW_BOOTING"; | ||
5222 | default: | ||
5223 | return "???"; | ||
5224 | } | ||
5225 | } | ||
5226 | |||
5227 | const char *skd_skdev_state_to_str(enum skd_drvr_state state) | ||
5228 | { | ||
5229 | switch (state) { | ||
5230 | case SKD_DRVR_STATE_LOAD: | ||
5231 | return "LOAD"; | ||
5232 | case SKD_DRVR_STATE_IDLE: | ||
5233 | return "IDLE"; | ||
5234 | case SKD_DRVR_STATE_BUSY: | ||
5235 | return "BUSY"; | ||
5236 | case SKD_DRVR_STATE_STARTING: | ||
5237 | return "STARTING"; | ||
5238 | case SKD_DRVR_STATE_ONLINE: | ||
5239 | return "ONLINE"; | ||
5240 | case SKD_DRVR_STATE_PAUSING: | ||
5241 | return "PAUSING"; | ||
5242 | case SKD_DRVR_STATE_PAUSED: | ||
5243 | return "PAUSED"; | ||
5244 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | ||
5245 | return "DRAINING_TIMEOUT"; | ||
5246 | case SKD_DRVR_STATE_RESTARTING: | ||
5247 | return "RESTARTING"; | ||
5248 | case SKD_DRVR_STATE_RESUMING: | ||
5249 | return "RESUMING"; | ||
5250 | case SKD_DRVR_STATE_STOPPING: | ||
5251 | return "STOPPING"; | ||
5252 | case SKD_DRVR_STATE_SYNCING: | ||
5253 | return "SYNCING"; | ||
5254 | case SKD_DRVR_STATE_FAULT: | ||
5255 | return "FAULT"; | ||
5256 | case SKD_DRVR_STATE_DISAPPEARED: | ||
5257 | return "DISAPPEARED"; | ||
5258 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
5259 | return "BUSY_ERASE"; | ||
5260 | case SKD_DRVR_STATE_BUSY_SANITIZE: | ||
5261 | return "BUSY_SANITIZE"; | ||
5262 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
5263 | return "BUSY_IMMINENT"; | ||
5264 | case SKD_DRVR_STATE_WAIT_BOOT: | ||
5265 | return "WAIT_BOOT"; | ||
5266 | |||
5267 | default: | ||
5268 | return "???"; | ||
5269 | } | ||
5270 | } | ||
5271 | |||
5272 | const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | ||
5273 | { | ||
5274 | switch (state) { | ||
5275 | case SKD_MSG_STATE_IDLE: | ||
5276 | return "IDLE"; | ||
5277 | case SKD_MSG_STATE_BUSY: | ||
5278 | return "BUSY"; | ||
5279 | default: | ||
5280 | return "???"; | ||
5281 | } | ||
5282 | } | ||
5283 | |||
5284 | const char *skd_skreq_state_to_str(enum skd_req_state state) | ||
5285 | { | ||
5286 | switch (state) { | ||
5287 | case SKD_REQ_STATE_IDLE: | ||
5288 | return "IDLE"; | ||
5289 | case SKD_REQ_STATE_SETUP: | ||
5290 | return "SETUP"; | ||
5291 | case SKD_REQ_STATE_BUSY: | ||
5292 | return "BUSY"; | ||
5293 | case SKD_REQ_STATE_COMPLETED: | ||
5294 | return "COMPLETED"; | ||
5295 | case SKD_REQ_STATE_TIMEOUT: | ||
5296 | return "TIMEOUT"; | ||
5297 | case SKD_REQ_STATE_ABORTED: | ||
5298 | return "ABORTED"; | ||
5299 | default: | ||
5300 | return "???"; | ||
5301 | } | ||
5302 | } | ||
5303 | |||
5304 | static void skd_log_skdev(struct skd_device *skdev, const char *event) | ||
5305 | { | ||
5306 | pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", | ||
5307 | skdev->name, __func__, __LINE__, skdev->name, skdev, event); | ||
5308 | pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", | ||
5309 | skdev->name, __func__, __LINE__, | ||
5310 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | ||
5311 | skd_skdev_state_to_str(skdev->state), skdev->state); | ||
5312 | pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", | ||
5313 | skdev->name, __func__, __LINE__, | ||
5314 | skdev->in_flight, skdev->cur_max_queue_depth, | ||
5315 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | ||
5316 | pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", | ||
5317 | skdev->name, __func__, __LINE__, | ||
5318 | skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); | ||
5319 | } | ||
5320 | |||
5321 | static void skd_log_skmsg(struct skd_device *skdev, | ||
5322 | struct skd_fitmsg_context *skmsg, const char *event) | ||
5323 | { | ||
5324 | pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", | ||
5325 | skdev->name, __func__, __LINE__, skdev->name, skmsg, event); | ||
5326 | pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", | ||
5327 | skdev->name, __func__, __LINE__, | ||
5328 | skd_skmsg_state_to_str(skmsg->state), skmsg->state, | ||
5329 | skmsg->id, skmsg->length); | ||
5330 | } | ||
5331 | |||
5332 | static void skd_log_skreq(struct skd_device *skdev, | ||
5333 | struct skd_request_context *skreq, const char *event) | ||
5334 | { | ||
5335 | pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", | ||
5336 | skdev->name, __func__, __LINE__, skdev->name, skreq, event); | ||
5337 | pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", | ||
5338 | skdev->name, __func__, __LINE__, | ||
5339 | skd_skreq_state_to_str(skreq->state), skreq->state, | ||
5340 | skreq->id, skreq->fitmsg_id); | ||
5341 | pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", | ||
5342 | skdev->name, __func__, __LINE__, | ||
5343 | skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); | ||
5344 | |||
5345 | if (skreq->req != NULL) { | ||
5346 | struct request *req = skreq->req; | ||
5347 | u32 lba = (u32)blk_rq_pos(req); | ||
5348 | u32 count = blk_rq_sectors(req); | ||
5349 | |||
5350 | pr_debug("%s:%s:%d " | ||
5351 | "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | ||
5352 | skdev->name, __func__, __LINE__, | ||
5353 | req, lba, lba, count, count, | ||
5354 | (int)rq_data_dir(req)); | ||
5355 | } else | ||
5356 | pr_debug("%s:%s:%d req=NULL\n", | ||
5357 | skdev->name, __func__, __LINE__); | ||
5358 | } | ||
5359 | |||
5360 | /* | ||
5361 | ***************************************************************************** | ||
5362 | * MODULE GLUE | ||
5363 | ***************************************************************************** | ||
5364 | */ | ||
5365 | |||
5366 | static int __init skd_init(void) | ||
5367 | { | ||
5368 | pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); | ||
5369 | |||
5370 | switch (skd_isr_type) { | ||
5371 | case SKD_IRQ_LEGACY: | ||
5372 | case SKD_IRQ_MSI: | ||
5373 | case SKD_IRQ_MSIX: | ||
5374 | break; | ||
5375 | default: | ||
5376 | pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", | ||
5377 | skd_isr_type, SKD_IRQ_DEFAULT); | ||
5378 | skd_isr_type = SKD_IRQ_DEFAULT; | ||
5379 | } | ||
5380 | |||
5381 | if (skd_max_queue_depth < 1 || | ||
5382 | skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { | ||
5383 | pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", | ||
5384 | skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); | ||
5385 | skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | ||
5386 | } | ||
5387 | |||
5388 | if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { | ||
5389 | pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", | ||
5390 | skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); | ||
5391 | skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | ||
5392 | } | ||
5393 | |||
5394 | if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { | ||
5395 | pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", | ||
5396 | skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); | ||
5397 | skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | ||
5398 | } | ||
5399 | |||
5400 | if (skd_dbg_level < 0 || skd_dbg_level > 2) { | ||
5401 | pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", | ||
5402 | skd_dbg_level, 0); | ||
5403 | skd_dbg_level = 0; | ||
5404 | } | ||
5405 | |||
5406 | if (skd_isr_comp_limit < 0) { | ||
5407 | pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", | ||
5408 | skd_isr_comp_limit, 0); | ||
5409 | skd_isr_comp_limit = 0; | ||
5410 | } | ||
5411 | |||
5412 | if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { | ||
5413 | pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", | ||
5414 | skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); | ||
5415 | skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | ||
5416 | } | ||
5417 | |||
5418 | return pci_register_driver(&skd_driver); | ||
5419 | } | ||
5420 | |||
5421 | static void __exit skd_exit(void) | ||
5422 | { | ||
5423 | pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); | ||
5424 | |||
5425 | pci_unregister_driver(&skd_driver); | ||
5426 | |||
5427 | if (skd_major) | ||
5428 | unregister_blkdev(skd_major, DRV_NAME); | ||
5429 | } | ||
5430 | |||
5431 | module_init(skd_init); | ||
5432 | module_exit(skd_exit); | ||
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h new file mode 100644 index 000000000000..61c757ff0161 --- /dev/null +++ b/drivers/block/skd_s1120.h | |||
@@ -0,0 +1,330 @@ | |||
1 | /* Copyright 2012 STEC, Inc. | ||
2 | * | ||
3 | * This file is licensed under the terms of the 3-clause | ||
4 | * BSD License (http://opensource.org/licenses/BSD-3-Clause) | ||
5 | * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), | ||
6 | * at your option. Both licenses are also available in the LICENSE file | ||
7 | * distributed with this project. This file may not be copied, modified, | ||
8 | * or distributed except in accordance with those terms. | ||
9 | */ | ||
10 | |||
11 | |||
12 | #ifndef SKD_S1120_H | ||
13 | #define SKD_S1120_H | ||
14 | |||
15 | #pragma pack(push, s1120_h, 1) | ||
16 | |||
17 | /* | ||
18 | * Q-channel, 64-bit r/w | ||
19 | */ | ||
20 | #define FIT_Q_COMMAND 0x400u | ||
21 | #define FIT_QCMD_QID_MASK (0x3 << 1) | ||
22 | #define FIT_QCMD_QID0 (0x0 << 1) | ||
23 | #define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0 | ||
24 | #define FIT_QCMD_QID1 (0x1 << 1) | ||
25 | #define FIT_QCMD_QID2 (0x2 << 1) | ||
26 | #define FIT_QCMD_QID3 (0x3 << 1) | ||
27 | #define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */ | ||
28 | #define FIT_QCMD_MSGSIZE_MASK (0x3 << 4) | ||
29 | #define FIT_QCMD_MSGSIZE_64 (0x0 << 4) | ||
30 | #define FIT_QCMD_MSGSIZE_128 (0x1 << 4) | ||
31 | #define FIT_QCMD_MSGSIZE_256 (0x2 << 4) | ||
32 | #define FIT_QCMD_MSGSIZE_512 (0x3 << 4) | ||
33 | #define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull) | ||
34 | |||
35 | /* | ||
36 | * Control, 32-bit r/w | ||
37 | */ | ||
38 | #define FIT_CONTROL 0x500u | ||
39 | #define FIT_CR_HARD_RESET (1u << 0u) | ||
40 | #define FIT_CR_SOFT_RESET (1u << 1u) | ||
41 | #define FIT_CR_DIS_TIMESTAMPS (1u << 6u) | ||
42 | #define FIT_CR_ENABLE_INTERRUPTS (1u << 7u) | ||
43 | |||
44 | /* | ||
45 | * Status, 32-bit, r/o | ||
46 | */ | ||
47 | #define FIT_STATUS 0x510u | ||
48 | #define FIT_SR_DRIVE_STATE_MASK 0x000000FFu | ||
49 | #define FIT_SR_SIGNATURE (0xFF << 8) | ||
50 | #define FIT_SR_PIO_DMA (1 << 16) | ||
51 | #define FIT_SR_DRIVE_OFFLINE 0x00 | ||
52 | #define FIT_SR_DRIVE_INIT 0x01 | ||
53 | /* #define FIT_SR_DRIVE_READY 0x02 */ | ||
54 | #define FIT_SR_DRIVE_ONLINE 0x03 | ||
55 | #define FIT_SR_DRIVE_BUSY 0x04 | ||
56 | #define FIT_SR_DRIVE_FAULT 0x05 | ||
57 | #define FIT_SR_DRIVE_DEGRADED 0x06 | ||
58 | #define FIT_SR_PCIE_LINK_DOWN 0x07 | ||
59 | #define FIT_SR_DRIVE_SOFT_RESET 0x08 | ||
60 | #define FIT_SR_DRIVE_INIT_FAULT 0x09 | ||
61 | #define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A | ||
62 | #define FIT_SR_DRIVE_BUSY_ERASE 0x0B | ||
63 | #define FIT_SR_DRIVE_FW_BOOTING 0x0C | ||
64 | #define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE | ||
65 | #define FIT_SR_DEVICE_MISSING 0xFF | ||
66 | #define FIT_SR__RESERVED 0xFFFFFF00u | ||
67 | |||
68 | /* | ||
69 | * FIT_STATUS - Status register data definition | ||
70 | */ | ||
71 | #define FIT_SR_STATE_MASK (0xFF << 0) | ||
72 | #define FIT_SR_SIGNATURE (0xFF << 8) | ||
73 | #define FIT_SR_PIO_DMA (1 << 16) | ||
74 | |||
75 | /* | ||
76 | * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear) | ||
77 | */ | ||
78 | #define FIT_INT_STATUS_HOST 0x520u | ||
79 | #define FIT_ISH_FW_STATE_CHANGE (1u << 0u) | ||
80 | #define FIT_ISH_COMPLETION_POSTED (1u << 1u) | ||
81 | #define FIT_ISH_MSG_FROM_DEV (1u << 2u) | ||
82 | #define FIT_ISH_UNDEFINED_3 (1u << 3u) | ||
83 | #define FIT_ISH_UNDEFINED_4 (1u << 4u) | ||
84 | #define FIT_ISH_Q0_FULL (1u << 5u) | ||
85 | #define FIT_ISH_Q1_FULL (1u << 6u) | ||
86 | #define FIT_ISH_Q2_FULL (1u << 7u) | ||
87 | #define FIT_ISH_Q3_FULL (1u << 8u) | ||
88 | #define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u) | ||
89 | #define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u) | ||
90 | |||
91 | #define FIT_INT_DEF_MASK \ | ||
92 | (FIT_ISH_FW_STATE_CHANGE | \ | ||
93 | FIT_ISH_COMPLETION_POSTED | \ | ||
94 | FIT_ISH_MSG_FROM_DEV | \ | ||
95 | FIT_ISH_Q0_FULL | \ | ||
96 | FIT_ISH_Q1_FULL | \ | ||
97 | FIT_ISH_Q2_FULL | \ | ||
98 | FIT_ISH_Q3_FULL | \ | ||
99 | FIT_ISH_QCMD_FIFO_OVERRUN | \ | ||
100 | FIT_ISH_BAD_EXP_ROM_READ) | ||
101 | |||
102 | #define FIT_INT_QUEUE_FULL \ | ||
103 | (FIT_ISH_Q0_FULL | \ | ||
104 | FIT_ISH_Q1_FULL | \ | ||
105 | FIT_ISH_Q2_FULL | \ | ||
106 | FIT_ISH_Q3_FULL) | ||
107 | |||
108 | #define MSI_MSG_NWL_ERROR_0 0x00000000 | ||
109 | #define MSI_MSG_NWL_ERROR_1 0x00000001 | ||
110 | #define MSI_MSG_NWL_ERROR_2 0x00000002 | ||
111 | #define MSI_MSG_NWL_ERROR_3 0x00000003 | ||
112 | #define MSI_MSG_STATE_CHANGE 0x00000004 | ||
113 | #define MSI_MSG_COMPLETION_POSTED 0x00000005 | ||
114 | #define MSI_MSG_MSG_FROM_DEV 0x00000006 | ||
115 | #define MSI_MSG_RESERVED_0 0x00000007 | ||
116 | #define MSI_MSG_RESERVED_1 0x00000008 | ||
117 | #define MSI_MSG_QUEUE_0_FULL 0x00000009 | ||
118 | #define MSI_MSG_QUEUE_1_FULL 0x0000000A | ||
119 | #define MSI_MSG_QUEUE_2_FULL 0x0000000B | ||
120 | #define MSI_MSG_QUEUE_3_FULL 0x0000000C | ||
121 | |||
122 | #define FIT_INT_RESERVED_MASK \ | ||
123 | (FIT_ISH_UNDEFINED_3 | \ | ||
124 | FIT_ISH_UNDEFINED_4) | ||
125 | |||
126 | /* | ||
127 | * Interrupt mask, 32-bit r/w | ||
128 | * Bit definitions are the same as FIT_INT_STATUS_HOST | ||
129 | */ | ||
130 | #define FIT_INT_MASK_HOST 0x528u | ||
131 | |||
132 | /* | ||
133 | * Message to device, 32-bit r/w | ||
134 | */ | ||
135 | #define FIT_MSG_TO_DEVICE 0x540u | ||
136 | |||
137 | /* | ||
138 | * Message from device, 32-bit, r/o | ||
139 | */ | ||
140 | #define FIT_MSG_FROM_DEVICE 0x548u | ||
141 | |||
142 | /* | ||
143 | * 32-bit messages to/from device, composition/extraction macros | ||
144 | */ | ||
145 | #define FIT_MXD_CONS(TYPE, PARAM, DATA) \ | ||
146 | ((((TYPE) & 0xFFu) << 24u) | \ | ||
147 | (((PARAM) & 0xFFu) << 16u) | \ | ||
148 | (((DATA) & 0xFFFFu) << 0u)) | ||
149 | #define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu) | ||
150 | #define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu) | ||
151 | #define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu) | ||
152 | |||
153 | /* | ||
154 | * Types of messages to/from device | ||
155 | */ | ||
156 | #define FIT_MTD_FITFW_INIT 0x01u | ||
157 | #define FIT_MTD_GET_CMDQ_DEPTH 0x02u | ||
158 | #define FIT_MTD_SET_COMPQ_DEPTH 0x03u | ||
159 | #define FIT_MTD_SET_COMPQ_ADDR 0x04u | ||
160 | #define FIT_MTD_ARM_QUEUE 0x05u | ||
161 | #define FIT_MTD_CMD_LOG_HOST_ID 0x07u | ||
162 | #define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u | ||
163 | #define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u | ||
164 | #define FIT_MFD_SMART_EXCEEDED 0x10u | ||
165 | #define FIT_MFD_POWER_DOWN 0x11u | ||
166 | #define FIT_MFD_OFFLINE 0x12u | ||
167 | #define FIT_MFD_ONLINE 0x13u | ||
168 | #define FIT_MFD_FW_RESTARTING 0x14u | ||
169 | #define FIT_MFD_PM_ACTIVE 0x15u | ||
170 | #define FIT_MFD_PM_STANDBY 0x16u | ||
171 | #define FIT_MFD_PM_SLEEP 0x17u | ||
172 | #define FIT_MFD_CMD_PROGRESS 0x18u | ||
173 | |||
174 | #define FIT_MTD_DEBUG 0xFEu | ||
175 | #define FIT_MFD_DEBUG 0xFFu | ||
176 | |||
177 | #define FIT_MFD_MASK (0xFFu) | ||
178 | #define FIT_MFD_DATA_MASK (0xFFu) | ||
179 | #define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK) | ||
180 | #define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK) | ||
181 | |||
182 | /* | ||
183 | * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w | ||
184 | * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR) | ||
185 | * (was Response buffer in docs) | ||
186 | */ | ||
187 | #define FIT_MSG_TO_DEVICE_ARG 0x580u | ||
188 | |||
189 | /* | ||
190 | * Hardware (ASIC) version, 32-bit r/o | ||
191 | */ | ||
192 | #define FIT_HW_VERSION 0x588u | ||
193 | |||
194 | /* | ||
195 | * Scatter/gather list descriptor. | ||
196 | * 32-bytes and must be aligned on a 32-byte boundary. | ||
197 | * All fields are in little endian order. | ||
198 | */ | ||
199 | struct fit_sg_descriptor { | ||
200 | uint32_t control; | ||
201 | uint32_t byte_count; | ||
202 | uint64_t host_side_addr; | ||
203 | uint64_t dev_side_addr; | ||
204 | uint64_t next_desc_ptr; | ||
205 | }; | ||
206 | |||
207 | #define FIT_SGD_CONTROL_NOT_LAST 0x000u | ||
208 | #define FIT_SGD_CONTROL_LAST 0x40Eu | ||
209 | |||
210 | /* | ||
211 | * Header at the beginning of a FIT message. The header | ||
212 | * is followed by SSDI requests each 64 bytes. | ||
213 | * A FIT message can be up to 512 bytes long and must start | ||
214 | * on a 64-byte boundary. | ||
215 | */ | ||
216 | struct fit_msg_hdr { | ||
217 | uint8_t protocol_id; | ||
218 | uint8_t num_protocol_cmds_coalesced; | ||
219 | uint8_t _reserved[62]; | ||
220 | }; | ||
221 | |||
222 | #define FIT_PROTOCOL_ID_FIT 1 | ||
223 | #define FIT_PROTOCOL_ID_SSDI 2 | ||
224 | #define FIT_PROTOCOL_ID_SOFIT 3 | ||
225 | |||
226 | |||
227 | #define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF) | ||
228 | #define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF) | ||
229 | |||
230 | /* | ||
231 | * Format of a completion entry. The completion queue is circular | ||
232 | * and must have at least as many entries as the maximum number | ||
233 | * of commands that may be issued to the device. | ||
234 | * | ||
235 | * There are no head/tail pointers. The cycle value is used to | ||
236 | * infer the presence of new completion records. | ||
237 | * Initially the cycle in all entries is 0, the index is 0, and | ||
238 | * the cycle value to expect is 1. When completions are added | ||
239 | * their cycle values are set to 1. When the index wraps the | ||
240 | * cycle value to expect is incremented. | ||
241 | * | ||
242 | * Command_context is opaque and taken verbatim from the SSDI command. | ||
243 | * All other fields are big endian. | ||
244 | */ | ||
245 | #define FIT_PROTOCOL_VERSION_0 0 | ||
246 | |||
247 | /* | ||
248 | * Protocol major version 1 completion entry. | ||
249 | * The major protocol version is found in bits | ||
250 | * 20-23 of the FIT_MTD_FITFW_INIT response. | ||
251 | */ | ||
252 | struct fit_completion_entry_v1 { | ||
253 | uint32_t num_returned_bytes; | ||
254 | uint16_t tag; | ||
255 | uint8_t status; /* SCSI status */ | ||
256 | uint8_t cycle; | ||
257 | }; | ||
258 | #define FIT_PROTOCOL_VERSION_1 1 | ||
259 | #define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1 | ||
260 | |||
261 | struct fit_comp_error_info { | ||
262 | uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */ | ||
263 | uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */ | ||
264 | uint8_t reserved0; /* 01: Obsolete field */ | ||
265 | uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */ | ||
266 | uint8_t reserved2:1; /* 02: Reserved bit. */ | ||
267 | uint8_t bad_length:1; /* 02: Incorrect Length Indicator */ | ||
268 | uint8_t end_medium:1; /* 02: End of Medium */ | ||
269 | uint8_t file_mark:1; /* 02: Filemark */ | ||
270 | uint8_t info[4]; /* 03: */ | ||
271 | uint8_t reserved1; /* 07: Additional Sense Length */ | ||
272 | uint8_t cmd_spec[4]; /* 08: Command Specific Information */ | ||
273 | uint8_t code; /* 0C: Additional Sense Code */ | ||
274 | uint8_t qual; /* 0D: Additional Sense Code Qualifier */ | ||
275 | uint8_t fruc; /* 0E: Field Replaceable Unit Code */ | ||
276 | uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */ | ||
277 | uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */ | ||
278 | uint16_t sks_low; /* 10: Sense Key Specific (LSW) */ | ||
279 | uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */ | ||
280 | uint16_t uec; /* 14: Additional Sense Bytes */ | ||
281 | uint64_t per; /* 16: Additional Sense Bytes */ | ||
282 | uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */ | ||
283 | }; | ||
284 | |||
285 | |||
286 | /* Task management constants */ | ||
287 | #define SOFT_TASK_SIMPLE 0x00 | ||
288 | #define SOFT_TASK_HEAD_OF_QUEUE 0x01 | ||
289 | #define SOFT_TASK_ORDERED 0x02 | ||
290 | |||
291 | /* Version zero has the last 32 bits reserved, | ||
292 | * Version one has the last 32 bits sg_list_len_bytes; | ||
293 | */ | ||
294 | struct skd_command_header { | ||
295 | uint64_t sg_list_dma_address; | ||
296 | uint16_t tag; | ||
297 | uint8_t attribute; | ||
298 | uint8_t add_cdb_len; /* In 32 bit words */ | ||
299 | uint32_t sg_list_len_bytes; | ||
300 | }; | ||
301 | |||
302 | struct skd_scsi_request { | ||
303 | struct skd_command_header hdr; | ||
304 | unsigned char cdb[16]; | ||
305 | /* unsigned char _reserved[16]; */ | ||
306 | }; | ||
307 | |||
308 | struct driver_inquiry_data { | ||
309 | uint8_t peripheral_device_type:5; | ||
310 | uint8_t qualifier:3; | ||
311 | uint8_t page_code; | ||
312 | uint16_t page_length; | ||
313 | uint16_t pcie_bus_number; | ||
314 | uint8_t pcie_device_number; | ||
315 | uint8_t pcie_function_number; | ||
316 | uint8_t pcie_link_speed; | ||
317 | uint8_t pcie_link_lanes; | ||
318 | uint16_t pcie_vendor_id; | ||
319 | uint16_t pcie_device_id; | ||
320 | uint16_t pcie_subsystem_vendor_id; | ||
321 | uint16_t pcie_subsystem_device_id; | ||
322 | uint8_t reserved1[2]; | ||
323 | uint8_t reserved2[3]; | ||
324 | uint8_t driver_version_length; | ||
325 | uint8_t driver_version[0x14]; | ||
326 | }; | ||
327 | |||
328 | #pragma pack(pop, s1120_h) | ||
329 | |||
330 | #endif /* SKD_S1120_H */ | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bf4b9d282c04..6620b73d0490 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -887,6 +887,8 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
887 | unsigned long secure; | 887 | unsigned long secure; |
888 | struct phys_req preq; | 888 | struct phys_req preq; |
889 | 889 | ||
890 | xen_blkif_get(blkif); | ||
891 | |||
890 | preq.sector_number = req->u.discard.sector_number; | 892 | preq.sector_number = req->u.discard.sector_number; |
891 | preq.nr_sects = req->u.discard.nr_sectors; | 893 | preq.nr_sects = req->u.discard.nr_sectors; |
892 | 894 | ||
@@ -899,7 +901,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
899 | } | 901 | } |
900 | blkif->st_ds_req++; | 902 | blkif->st_ds_req++; |
901 | 903 | ||
902 | xen_blkif_get(blkif); | ||
903 | secure = (blkif->vbd.discard_secure && | 904 | secure = (blkif->vbd.discard_secure && |
904 | (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? | 905 | (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? |
905 | BLKDEV_DISCARD_SECURE : 0; | 906 | BLKDEV_DISCARD_SECURE : 0; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8d53ed293606..432db1b59b00 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -121,7 +121,8 @@ struct blkfront_info | |||
121 | struct work_struct work; | 121 | struct work_struct work; |
122 | struct gnttab_free_callback callback; | 122 | struct gnttab_free_callback callback; |
123 | struct blk_shadow shadow[BLK_RING_SIZE]; | 123 | struct blk_shadow shadow[BLK_RING_SIZE]; |
124 | struct list_head persistent_gnts; | 124 | struct list_head grants; |
125 | struct list_head indirect_pages; | ||
125 | unsigned int persistent_gnts_c; | 126 | unsigned int persistent_gnts_c; |
126 | unsigned long shadow_free; | 127 | unsigned long shadow_free; |
127 | unsigned int feature_flush; | 128 | unsigned int feature_flush; |
@@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) | |||
200 | if (!gnt_list_entry) | 201 | if (!gnt_list_entry) |
201 | goto out_of_memory; | 202 | goto out_of_memory; |
202 | 203 | ||
203 | granted_page = alloc_page(GFP_NOIO); | 204 | if (info->feature_persistent) { |
204 | if (!granted_page) { | 205 | granted_page = alloc_page(GFP_NOIO); |
205 | kfree(gnt_list_entry); | 206 | if (!granted_page) { |
206 | goto out_of_memory; | 207 | kfree(gnt_list_entry); |
208 | goto out_of_memory; | ||
209 | } | ||
210 | gnt_list_entry->pfn = page_to_pfn(granted_page); | ||
207 | } | 211 | } |
208 | 212 | ||
209 | gnt_list_entry->pfn = page_to_pfn(granted_page); | ||
210 | gnt_list_entry->gref = GRANT_INVALID_REF; | 213 | gnt_list_entry->gref = GRANT_INVALID_REF; |
211 | list_add(&gnt_list_entry->node, &info->persistent_gnts); | 214 | list_add(&gnt_list_entry->node, &info->grants); |
212 | i++; | 215 | i++; |
213 | } | 216 | } |
214 | 217 | ||
@@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) | |||
216 | 219 | ||
217 | out_of_memory: | 220 | out_of_memory: |
218 | list_for_each_entry_safe(gnt_list_entry, n, | 221 | list_for_each_entry_safe(gnt_list_entry, n, |
219 | &info->persistent_gnts, node) { | 222 | &info->grants, node) { |
220 | list_del(&gnt_list_entry->node); | 223 | list_del(&gnt_list_entry->node); |
221 | __free_page(pfn_to_page(gnt_list_entry->pfn)); | 224 | if (info->feature_persistent) |
225 | __free_page(pfn_to_page(gnt_list_entry->pfn)); | ||
222 | kfree(gnt_list_entry); | 226 | kfree(gnt_list_entry); |
223 | i--; | 227 | i--; |
224 | } | 228 | } |
@@ -227,13 +231,14 @@ out_of_memory: | |||
227 | } | 231 | } |
228 | 232 | ||
229 | static struct grant *get_grant(grant_ref_t *gref_head, | 233 | static struct grant *get_grant(grant_ref_t *gref_head, |
234 | unsigned long pfn, | ||
230 | struct blkfront_info *info) | 235 | struct blkfront_info *info) |
231 | { | 236 | { |
232 | struct grant *gnt_list_entry; | 237 | struct grant *gnt_list_entry; |
233 | unsigned long buffer_mfn; | 238 | unsigned long buffer_mfn; |
234 | 239 | ||
235 | BUG_ON(list_empty(&info->persistent_gnts)); | 240 | BUG_ON(list_empty(&info->grants)); |
236 | gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, | 241 | gnt_list_entry = list_first_entry(&info->grants, struct grant, |
237 | node); | 242 | node); |
238 | list_del(&gnt_list_entry->node); | 243 | list_del(&gnt_list_entry->node); |
239 | 244 | ||
@@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head, | |||
245 | /* Assign a gref to this page */ | 250 | /* Assign a gref to this page */ |
246 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); | 251 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); |
247 | BUG_ON(gnt_list_entry->gref == -ENOSPC); | 252 | BUG_ON(gnt_list_entry->gref == -ENOSPC); |
253 | if (!info->feature_persistent) { | ||
254 | BUG_ON(!pfn); | ||
255 | gnt_list_entry->pfn = pfn; | ||
256 | } | ||
248 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | 257 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); |
249 | gnttab_grant_foreign_access_ref(gnt_list_entry->gref, | 258 | gnttab_grant_foreign_access_ref(gnt_list_entry->gref, |
250 | info->xbdev->otherend_id, | 259 | info->xbdev->otherend_id, |
@@ -400,10 +409,13 @@ static int blkif_queue_request(struct request *req) | |||
400 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) | 409 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) |
401 | return 1; | 410 | return 1; |
402 | 411 | ||
403 | max_grefs = info->max_indirect_segments ? | 412 | max_grefs = req->nr_phys_segments; |
404 | info->max_indirect_segments + | 413 | if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) |
405 | INDIRECT_GREFS(info->max_indirect_segments) : | 414 | /* |
406 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | 415 | * If we are using indirect segments we need to account |
416 | * for the indirect grefs used in the request. | ||
417 | */ | ||
418 | max_grefs += INDIRECT_GREFS(req->nr_phys_segments); | ||
407 | 419 | ||
408 | /* Check if we have enough grants to allocate a requests */ | 420 | /* Check if we have enough grants to allocate a requests */ |
409 | if (info->persistent_gnts_c < max_grefs) { | 421 | if (info->persistent_gnts_c < max_grefs) { |
@@ -477,22 +489,34 @@ static int blkif_queue_request(struct request *req) | |||
477 | 489 | ||
478 | if ((ring_req->operation == BLKIF_OP_INDIRECT) && | 490 | if ((ring_req->operation == BLKIF_OP_INDIRECT) && |
479 | (i % SEGS_PER_INDIRECT_FRAME == 0)) { | 491 | (i % SEGS_PER_INDIRECT_FRAME == 0)) { |
492 | unsigned long pfn; | ||
493 | |||
480 | if (segments) | 494 | if (segments) |
481 | kunmap_atomic(segments); | 495 | kunmap_atomic(segments); |
482 | 496 | ||
483 | n = i / SEGS_PER_INDIRECT_FRAME; | 497 | n = i / SEGS_PER_INDIRECT_FRAME; |
484 | gnt_list_entry = get_grant(&gref_head, info); | 498 | if (!info->feature_persistent) { |
499 | struct page *indirect_page; | ||
500 | |||
501 | /* Fetch a pre-allocated page to use for indirect grefs */ | ||
502 | BUG_ON(list_empty(&info->indirect_pages)); | ||
503 | indirect_page = list_first_entry(&info->indirect_pages, | ||
504 | struct page, lru); | ||
505 | list_del(&indirect_page->lru); | ||
506 | pfn = page_to_pfn(indirect_page); | ||
507 | } | ||
508 | gnt_list_entry = get_grant(&gref_head, pfn, info); | ||
485 | info->shadow[id].indirect_grants[n] = gnt_list_entry; | 509 | info->shadow[id].indirect_grants[n] = gnt_list_entry; |
486 | segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); | 510 | segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); |
487 | ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; | 511 | ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; |
488 | } | 512 | } |
489 | 513 | ||
490 | gnt_list_entry = get_grant(&gref_head, info); | 514 | gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); |
491 | ref = gnt_list_entry->gref; | 515 | ref = gnt_list_entry->gref; |
492 | 516 | ||
493 | info->shadow[id].grants_used[i] = gnt_list_entry; | 517 | info->shadow[id].grants_used[i] = gnt_list_entry; |
494 | 518 | ||
495 | if (rq_data_dir(req)) { | 519 | if (rq_data_dir(req) && info->feature_persistent) { |
496 | char *bvec_data; | 520 | char *bvec_data; |
497 | void *shared_data; | 521 | void *shared_data; |
498 | 522 | ||
@@ -904,21 +928,36 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
904 | blk_stop_queue(info->rq); | 928 | blk_stop_queue(info->rq); |
905 | 929 | ||
906 | /* Remove all persistent grants */ | 930 | /* Remove all persistent grants */ |
907 | if (!list_empty(&info->persistent_gnts)) { | 931 | if (!list_empty(&info->grants)) { |
908 | list_for_each_entry_safe(persistent_gnt, n, | 932 | list_for_each_entry_safe(persistent_gnt, n, |
909 | &info->persistent_gnts, node) { | 933 | &info->grants, node) { |
910 | list_del(&persistent_gnt->node); | 934 | list_del(&persistent_gnt->node); |
911 | if (persistent_gnt->gref != GRANT_INVALID_REF) { | 935 | if (persistent_gnt->gref != GRANT_INVALID_REF) { |
912 | gnttab_end_foreign_access(persistent_gnt->gref, | 936 | gnttab_end_foreign_access(persistent_gnt->gref, |
913 | 0, 0UL); | 937 | 0, 0UL); |
914 | info->persistent_gnts_c--; | 938 | info->persistent_gnts_c--; |
915 | } | 939 | } |
916 | __free_page(pfn_to_page(persistent_gnt->pfn)); | 940 | if (info->feature_persistent) |
941 | __free_page(pfn_to_page(persistent_gnt->pfn)); | ||
917 | kfree(persistent_gnt); | 942 | kfree(persistent_gnt); |
918 | } | 943 | } |
919 | } | 944 | } |
920 | BUG_ON(info->persistent_gnts_c != 0); | 945 | BUG_ON(info->persistent_gnts_c != 0); |
921 | 946 | ||
947 | /* | ||
948 | * Remove indirect pages, this only happens when using indirect | ||
949 | * descriptors but not persistent grants | ||
950 | */ | ||
951 | if (!list_empty(&info->indirect_pages)) { | ||
952 | struct page *indirect_page, *n; | ||
953 | |||
954 | BUG_ON(info->feature_persistent); | ||
955 | list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { | ||
956 | list_del(&indirect_page->lru); | ||
957 | __free_page(indirect_page); | ||
958 | } | ||
959 | } | ||
960 | |||
922 | for (i = 0; i < BLK_RING_SIZE; i++) { | 961 | for (i = 0; i < BLK_RING_SIZE; i++) { |
923 | /* | 962 | /* |
924 | * Clear persistent grants present in requests already | 963 | * Clear persistent grants present in requests already |
@@ -933,7 +972,8 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
933 | for (j = 0; j < segs; j++) { | 972 | for (j = 0; j < segs; j++) { |
934 | persistent_gnt = info->shadow[i].grants_used[j]; | 973 | persistent_gnt = info->shadow[i].grants_used[j]; |
935 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | 974 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); |
936 | __free_page(pfn_to_page(persistent_gnt->pfn)); | 975 | if (info->feature_persistent) |
976 | __free_page(pfn_to_page(persistent_gnt->pfn)); | ||
937 | kfree(persistent_gnt); | 977 | kfree(persistent_gnt); |
938 | } | 978 | } |
939 | 979 | ||
@@ -992,7 +1032,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
992 | nseg = s->req.operation == BLKIF_OP_INDIRECT ? | 1032 | nseg = s->req.operation == BLKIF_OP_INDIRECT ? |
993 | s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; | 1033 | s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; |
994 | 1034 | ||
995 | if (bret->operation == BLKIF_OP_READ) { | 1035 | if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { |
996 | /* | 1036 | /* |
997 | * Copy the data received from the backend into the bvec. | 1037 | * Copy the data received from the backend into the bvec. |
998 | * Since bv_offset can be different than 0, and bv_len different | 1038 | * Since bv_offset can be different than 0, and bv_len different |
@@ -1013,13 +1053,51 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
1013 | } | 1053 | } |
1014 | /* Add the persistent grant into the list of free grants */ | 1054 | /* Add the persistent grant into the list of free grants */ |
1015 | for (i = 0; i < nseg; i++) { | 1055 | for (i = 0; i < nseg; i++) { |
1016 | list_add(&s->grants_used[i]->node, &info->persistent_gnts); | 1056 | if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { |
1017 | info->persistent_gnts_c++; | 1057 | /* |
1058 | * If the grant is still mapped by the backend (the | ||
1059 | * backend has chosen to make this grant persistent) | ||
1060 | * we add it at the head of the list, so it will be | ||
1061 | * reused first. | ||
1062 | */ | ||
1063 | if (!info->feature_persistent) | ||
1064 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", | ||
1065 | s->grants_used[i]->gref); | ||
1066 | list_add(&s->grants_used[i]->node, &info->grants); | ||
1067 | info->persistent_gnts_c++; | ||
1068 | } else { | ||
1069 | /* | ||
1070 | * If the grant is not mapped by the backend we end the | ||
1071 | * foreign access and add it to the tail of the list, | ||
1072 | * so it will not be picked again unless we run out of | ||
1073 | * persistent grants. | ||
1074 | */ | ||
1075 | gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); | ||
1076 | s->grants_used[i]->gref = GRANT_INVALID_REF; | ||
1077 | list_add_tail(&s->grants_used[i]->node, &info->grants); | ||
1078 | } | ||
1018 | } | 1079 | } |
1019 | if (s->req.operation == BLKIF_OP_INDIRECT) { | 1080 | if (s->req.operation == BLKIF_OP_INDIRECT) { |
1020 | for (i = 0; i < INDIRECT_GREFS(nseg); i++) { | 1081 | for (i = 0; i < INDIRECT_GREFS(nseg); i++) { |
1021 | list_add(&s->indirect_grants[i]->node, &info->persistent_gnts); | 1082 | if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { |
1022 | info->persistent_gnts_c++; | 1083 | if (!info->feature_persistent) |
1084 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", | ||
1085 | s->indirect_grants[i]->gref); | ||
1086 | list_add(&s->indirect_grants[i]->node, &info->grants); | ||
1087 | info->persistent_gnts_c++; | ||
1088 | } else { | ||
1089 | struct page *indirect_page; | ||
1090 | |||
1091 | gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); | ||
1092 | /* | ||
1093 | * Add the used indirect page back to the list of | ||
1094 | * available pages for indirect grefs. | ||
1095 | */ | ||
1096 | indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); | ||
1097 | list_add(&indirect_page->lru, &info->indirect_pages); | ||
1098 | s->indirect_grants[i]->gref = GRANT_INVALID_REF; | ||
1099 | list_add_tail(&s->indirect_grants[i]->node, &info->grants); | ||
1100 | } | ||
1023 | } | 1101 | } |
1024 | } | 1102 | } |
1025 | } | 1103 | } |
@@ -1313,7 +1391,8 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1313 | spin_lock_init(&info->io_lock); | 1391 | spin_lock_init(&info->io_lock); |
1314 | info->xbdev = dev; | 1392 | info->xbdev = dev; |
1315 | info->vdevice = vdevice; | 1393 | info->vdevice = vdevice; |
1316 | INIT_LIST_HEAD(&info->persistent_gnts); | 1394 | INIT_LIST_HEAD(&info->grants); |
1395 | INIT_LIST_HEAD(&info->indirect_pages); | ||
1317 | info->persistent_gnts_c = 0; | 1396 | info->persistent_gnts_c = 0; |
1318 | info->connected = BLKIF_STATE_DISCONNECTED; | 1397 | info->connected = BLKIF_STATE_DISCONNECTED; |
1319 | INIT_WORK(&info->work, blkif_restart_queue); | 1398 | INIT_WORK(&info->work, blkif_restart_queue); |
@@ -1609,6 +1688,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info) | |||
1609 | if (err) | 1688 | if (err) |
1610 | goto out_of_memory; | 1689 | goto out_of_memory; |
1611 | 1690 | ||
1691 | if (!info->feature_persistent && info->max_indirect_segments) { | ||
1692 | /* | ||
1693 | * We are using indirect descriptors but not persistent | ||
1694 | * grants, we need to allocate a set of pages that can be | ||
1695 | * used for mapping indirect grefs | ||
1696 | */ | ||
1697 | int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE; | ||
1698 | |||
1699 | BUG_ON(!list_empty(&info->indirect_pages)); | ||
1700 | for (i = 0; i < num; i++) { | ||
1701 | struct page *indirect_page = alloc_page(GFP_NOIO); | ||
1702 | if (!indirect_page) | ||
1703 | goto out_of_memory; | ||
1704 | list_add(&indirect_page->lru, &info->indirect_pages); | ||
1705 | } | ||
1706 | } | ||
1707 | |||
1612 | for (i = 0; i < BLK_RING_SIZE; i++) { | 1708 | for (i = 0; i < BLK_RING_SIZE; i++) { |
1613 | info->shadow[i].grants_used = kzalloc( | 1709 | info->shadow[i].grants_used = kzalloc( |
1614 | sizeof(info->shadow[i].grants_used[0]) * segs, | 1710 | sizeof(info->shadow[i].grants_used[0]) * segs, |
@@ -1639,6 +1735,13 @@ out_of_memory: | |||
1639 | kfree(info->shadow[i].indirect_grants); | 1735 | kfree(info->shadow[i].indirect_grants); |
1640 | info->shadow[i].indirect_grants = NULL; | 1736 | info->shadow[i].indirect_grants = NULL; |
1641 | } | 1737 | } |
1738 | if (!list_empty(&info->indirect_pages)) { | ||
1739 | struct page *indirect_page, *n; | ||
1740 | list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { | ||
1741 | list_del(&indirect_page->lru); | ||
1742 | __free_page(indirect_page); | ||
1743 | } | ||
1744 | } | ||
1642 | return -ENOMEM; | 1745 | return -ENOMEM; |
1643 | } | 1746 | } |
1644 | 1747 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 244f77f844f0..f302efa937ef 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -2979,12 +2979,12 @@ static int dasd_alloc_queue(struct dasd_block *block) | |||
2979 | 2979 | ||
2980 | elevator_exit(block->request_queue->elevator); | 2980 | elevator_exit(block->request_queue->elevator); |
2981 | block->request_queue->elevator = NULL; | 2981 | block->request_queue->elevator = NULL; |
2982 | mutex_lock(&block->request_queue->sysfs_lock); | ||
2982 | rc = elevator_init(block->request_queue, "deadline"); | 2983 | rc = elevator_init(block->request_queue, "deadline"); |
2983 | if (rc) { | 2984 | if (rc) |
2984 | blk_cleanup_queue(block->request_queue); | 2985 | blk_cleanup_queue(block->request_queue); |
2985 | return rc; | 2986 | mutex_unlock(&block->request_queue->sysfs_lock); |
2986 | } | 2987 | return rc; |
2987 | return 0; | ||
2988 | } | 2988 | } |
2989 | 2989 | ||
2990 | /* | 2990 | /* |