diff options
Diffstat (limited to 'drivers/block/skd_main.c')
-rw-r--r-- | drivers/block/skd_main.c | 5432 |
1 files changed, 5432 insertions, 0 deletions
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c new file mode 100644 index 000000000000..9199c93be926 --- /dev/null +++ b/drivers/block/skd_main.c | |||
@@ -0,0 +1,5432 @@ | |||
1 | /* Copyright 2012 STEC, Inc. | ||
2 | * | ||
3 | * This file is licensed under the terms of the 3-clause | ||
4 | * BSD License (http://opensource.org/licenses/BSD-3-Clause) | ||
5 | * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), | ||
6 | * at your option. Both licenses are also available in the LICENSE file | ||
7 | * distributed with this project. This file may not be copied, modified, | ||
8 | * or distributed except in accordance with those terms. | ||
9 | * Gordoni Waidhofer <gwaidhofer@stec-inc.com> | ||
10 | * Initial Driver Design! | ||
11 | * Thomas Swann <tswann@stec-inc.com> | ||
12 | * Interrupt handling. | ||
13 | * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> | ||
14 | * biomode implementation. | ||
15 | * Akhil Bhansali <abhansali@stec-inc.com> | ||
16 | * Added support for DISCARD / FLUSH and FUA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/blkdev.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/compiler.h> | ||
29 | #include <linux/workqueue.h> | ||
30 | #include <linux/bitops.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/time.h> | ||
33 | #include <linux/hdreg.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/completion.h> | ||
36 | #include <linux/scatterlist.h> | ||
37 | #include <linux/version.h> | ||
38 | #include <linux/err.h> | ||
39 | #include <linux/scatterlist.h> | ||
40 | #include <linux/aer.h> | ||
41 | #include <linux/ctype.h> | ||
42 | #include <linux/wait.h> | ||
43 | #include <linux/uio.h> | ||
44 | #include <scsi/scsi.h> | ||
45 | #include <scsi/sg.h> | ||
46 | #include <linux/io.h> | ||
47 | #include <linux/uaccess.h> | ||
48 | #include <asm/unaligned.h> | ||
49 | |||
50 | #include "skd_s1120.h" | ||
51 | |||
52 | static int skd_dbg_level; | ||
53 | static int skd_isr_comp_limit = 4; | ||
54 | |||
55 | enum { | ||
56 | STEC_LINK_2_5GTS = 0, | ||
57 | STEC_LINK_5GTS = 1, | ||
58 | STEC_LINK_8GTS = 2, | ||
59 | STEC_LINK_UNKNOWN = 0xFF | ||
60 | }; | ||
61 | |||
62 | enum { | ||
63 | SKD_FLUSH_INITIALIZER, | ||
64 | SKD_FLUSH_ZERO_SIZE_FIRST, | ||
65 | SKD_FLUSH_DATA_SECOND, | ||
66 | }; | ||
67 | |||
68 | #define SKD_ASSERT(expr) \ | ||
69 | do { \ | ||
70 | if (unlikely(!(expr))) { \ | ||
71 | pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ | ||
72 | # expr, __FILE__, __func__, __LINE__); \ | ||
73 | } \ | ||
74 | } while (0) | ||
75 | |||
76 | #define DRV_NAME "skd" | ||
77 | #define DRV_VERSION "2.2.1" | ||
78 | #define DRV_BUILD_ID "0260" | ||
79 | #define PFX DRV_NAME ": " | ||
80 | #define DRV_BIN_VERSION 0x100 | ||
81 | #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID | ||
82 | |||
83 | MODULE_AUTHOR("bug-reports: support@stec-inc.com"); | ||
84 | MODULE_LICENSE("Dual BSD/GPL"); | ||
85 | |||
86 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); | ||
87 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); | ||
88 | |||
89 | #define PCI_VENDOR_ID_STEC 0x1B39 | ||
90 | #define PCI_DEVICE_ID_S1120 0x0001 | ||
91 | |||
92 | #define SKD_FUA_NV (1 << 1) | ||
93 | #define SKD_MINORS_PER_DEVICE 16 | ||
94 | |||
95 | #define SKD_MAX_QUEUE_DEPTH 200u | ||
96 | |||
97 | #define SKD_PAUSE_TIMEOUT (5 * 1000) | ||
98 | |||
99 | #define SKD_N_FITMSG_BYTES (512u) | ||
100 | |||
101 | #define SKD_N_SPECIAL_CONTEXT 32u | ||
102 | #define SKD_N_SPECIAL_FITMSG_BYTES (128u) | ||
103 | |||
104 | /* SG elements are 32 bytes, so we can make this 4096 and still be under the | ||
105 | * 128KB limit. That allows 4096*4K = 16M xfer size | ||
106 | */ | ||
107 | #define SKD_N_SG_PER_REQ_DEFAULT 256u | ||
108 | #define SKD_N_SG_PER_SPECIAL 256u | ||
109 | |||
110 | #define SKD_N_COMPLETION_ENTRY 256u | ||
111 | #define SKD_N_READ_CAP_BYTES (8u) | ||
112 | |||
113 | #define SKD_N_INTERNAL_BYTES (512u) | ||
114 | |||
115 | /* 5 bits of uniqifier, 0xF800 */ | ||
116 | #define SKD_ID_INCR (0x400) | ||
117 | #define SKD_ID_TABLE_MASK (3u << 8u) | ||
118 | #define SKD_ID_RW_REQUEST (0u << 8u) | ||
119 | #define SKD_ID_INTERNAL (1u << 8u) | ||
120 | #define SKD_ID_SPECIAL_REQUEST (2u << 8u) | ||
121 | #define SKD_ID_FIT_MSG (3u << 8u) | ||
122 | #define SKD_ID_SLOT_MASK 0x00FFu | ||
123 | #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu | ||
124 | |||
125 | #define SKD_N_TIMEOUT_SLOT 4u | ||
126 | #define SKD_TIMEOUT_SLOT_MASK 3u | ||
127 | |||
128 | #define SKD_N_MAX_SECTORS 2048u | ||
129 | |||
130 | #define SKD_MAX_RETRIES 2u | ||
131 | |||
132 | #define SKD_TIMER_SECONDS(seconds) (seconds) | ||
133 | #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) | ||
134 | |||
135 | #define INQ_STD_NBYTES 36 | ||
136 | #define SKD_DISCARD_CDB_LENGTH 24 | ||
137 | |||
138 | enum skd_drvr_state { | ||
139 | SKD_DRVR_STATE_LOAD, | ||
140 | SKD_DRVR_STATE_IDLE, | ||
141 | SKD_DRVR_STATE_BUSY, | ||
142 | SKD_DRVR_STATE_STARTING, | ||
143 | SKD_DRVR_STATE_ONLINE, | ||
144 | SKD_DRVR_STATE_PAUSING, | ||
145 | SKD_DRVR_STATE_PAUSED, | ||
146 | SKD_DRVR_STATE_DRAINING_TIMEOUT, | ||
147 | SKD_DRVR_STATE_RESTARTING, | ||
148 | SKD_DRVR_STATE_RESUMING, | ||
149 | SKD_DRVR_STATE_STOPPING, | ||
150 | SKD_DRVR_STATE_FAULT, | ||
151 | SKD_DRVR_STATE_DISAPPEARED, | ||
152 | SKD_DRVR_STATE_PROTOCOL_MISMATCH, | ||
153 | SKD_DRVR_STATE_BUSY_ERASE, | ||
154 | SKD_DRVR_STATE_BUSY_SANITIZE, | ||
155 | SKD_DRVR_STATE_BUSY_IMMINENT, | ||
156 | SKD_DRVR_STATE_WAIT_BOOT, | ||
157 | SKD_DRVR_STATE_SYNCING, | ||
158 | }; | ||
159 | |||
160 | #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) | ||
161 | #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) | ||
162 | #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) | ||
163 | #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) | ||
164 | #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) | ||
165 | #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) | ||
166 | #define SKD_START_WAIT_SECONDS 90u | ||
167 | |||
168 | enum skd_req_state { | ||
169 | SKD_REQ_STATE_IDLE, | ||
170 | SKD_REQ_STATE_SETUP, | ||
171 | SKD_REQ_STATE_BUSY, | ||
172 | SKD_REQ_STATE_COMPLETED, | ||
173 | SKD_REQ_STATE_TIMEOUT, | ||
174 | SKD_REQ_STATE_ABORTED, | ||
175 | }; | ||
176 | |||
177 | enum skd_fit_msg_state { | ||
178 | SKD_MSG_STATE_IDLE, | ||
179 | SKD_MSG_STATE_BUSY, | ||
180 | }; | ||
181 | |||
182 | enum skd_check_status_action { | ||
183 | SKD_CHECK_STATUS_REPORT_GOOD, | ||
184 | SKD_CHECK_STATUS_REPORT_SMART_ALERT, | ||
185 | SKD_CHECK_STATUS_REQUEUE_REQUEST, | ||
186 | SKD_CHECK_STATUS_REPORT_ERROR, | ||
187 | SKD_CHECK_STATUS_BUSY_IMMINENT, | ||
188 | }; | ||
189 | |||
190 | struct skd_fitmsg_context { | ||
191 | enum skd_fit_msg_state state; | ||
192 | |||
193 | struct skd_fitmsg_context *next; | ||
194 | |||
195 | u32 id; | ||
196 | u16 outstanding; | ||
197 | |||
198 | u32 length; | ||
199 | u32 offset; | ||
200 | |||
201 | u8 *msg_buf; | ||
202 | dma_addr_t mb_dma_address; | ||
203 | }; | ||
204 | |||
205 | struct skd_request_context { | ||
206 | enum skd_req_state state; | ||
207 | |||
208 | struct skd_request_context *next; | ||
209 | |||
210 | u16 id; | ||
211 | u32 fitmsg_id; | ||
212 | |||
213 | struct request *req; | ||
214 | u8 flush_cmd; | ||
215 | u8 discard_page; | ||
216 | |||
217 | u32 timeout_stamp; | ||
218 | u8 sg_data_dir; | ||
219 | struct scatterlist *sg; | ||
220 | u32 n_sg; | ||
221 | u32 sg_byte_count; | ||
222 | |||
223 | struct fit_sg_descriptor *sksg_list; | ||
224 | dma_addr_t sksg_dma_address; | ||
225 | |||
226 | struct fit_completion_entry_v1 completion; | ||
227 | |||
228 | struct fit_comp_error_info err_info; | ||
229 | |||
230 | }; | ||
231 | #define SKD_DATA_DIR_HOST_TO_CARD 1 | ||
232 | #define SKD_DATA_DIR_CARD_TO_HOST 2 | ||
233 | #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */ | ||
234 | |||
235 | struct skd_special_context { | ||
236 | struct skd_request_context req; | ||
237 | |||
238 | u8 orphaned; | ||
239 | |||
240 | void *data_buf; | ||
241 | dma_addr_t db_dma_address; | ||
242 | |||
243 | u8 *msg_buf; | ||
244 | dma_addr_t mb_dma_address; | ||
245 | }; | ||
246 | |||
247 | struct skd_sg_io { | ||
248 | fmode_t mode; | ||
249 | void __user *argp; | ||
250 | |||
251 | struct sg_io_hdr sg; | ||
252 | |||
253 | u8 cdb[16]; | ||
254 | |||
255 | u32 dxfer_len; | ||
256 | u32 iovcnt; | ||
257 | struct sg_iovec *iov; | ||
258 | struct sg_iovec no_iov_iov; | ||
259 | |||
260 | struct skd_special_context *skspcl; | ||
261 | }; | ||
262 | |||
263 | typedef enum skd_irq_type { | ||
264 | SKD_IRQ_LEGACY, | ||
265 | SKD_IRQ_MSI, | ||
266 | SKD_IRQ_MSIX | ||
267 | } skd_irq_type_t; | ||
268 | |||
269 | #define SKD_MAX_BARS 2 | ||
270 | |||
271 | struct skd_device { | ||
272 | volatile void __iomem *mem_map[SKD_MAX_BARS]; | ||
273 | resource_size_t mem_phys[SKD_MAX_BARS]; | ||
274 | u32 mem_size[SKD_MAX_BARS]; | ||
275 | |||
276 | skd_irq_type_t irq_type; | ||
277 | u32 msix_count; | ||
278 | struct skd_msix_entry *msix_entries; | ||
279 | |||
280 | struct pci_dev *pdev; | ||
281 | int pcie_error_reporting_is_enabled; | ||
282 | |||
283 | spinlock_t lock; | ||
284 | struct gendisk *disk; | ||
285 | struct request_queue *queue; | ||
286 | struct device *class_dev; | ||
287 | int gendisk_on; | ||
288 | int sync_done; | ||
289 | |||
290 | atomic_t device_count; | ||
291 | u32 devno; | ||
292 | u32 major; | ||
293 | char name[32]; | ||
294 | char isr_name[30]; | ||
295 | |||
296 | enum skd_drvr_state state; | ||
297 | u32 drive_state; | ||
298 | |||
299 | u32 in_flight; | ||
300 | u32 cur_max_queue_depth; | ||
301 | u32 queue_low_water_mark; | ||
302 | u32 dev_max_queue_depth; | ||
303 | |||
304 | u32 num_fitmsg_context; | ||
305 | u32 num_req_context; | ||
306 | |||
307 | u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; | ||
308 | u32 timeout_stamp; | ||
309 | struct skd_fitmsg_context *skmsg_free_list; | ||
310 | struct skd_fitmsg_context *skmsg_table; | ||
311 | |||
312 | struct skd_request_context *skreq_free_list; | ||
313 | struct skd_request_context *skreq_table; | ||
314 | |||
315 | struct skd_special_context *skspcl_free_list; | ||
316 | struct skd_special_context *skspcl_table; | ||
317 | |||
318 | struct skd_special_context internal_skspcl; | ||
319 | u32 read_cap_blocksize; | ||
320 | u32 read_cap_last_lba; | ||
321 | int read_cap_is_valid; | ||
322 | int inquiry_is_valid; | ||
323 | u8 inq_serial_num[13]; /*12 chars plus null term */ | ||
324 | u8 id_str[80]; /* holds a composite name (pci + sernum) */ | ||
325 | |||
326 | u8 skcomp_cycle; | ||
327 | u32 skcomp_ix; | ||
328 | struct fit_completion_entry_v1 *skcomp_table; | ||
329 | struct fit_comp_error_info *skerr_table; | ||
330 | dma_addr_t cq_dma_address; | ||
331 | |||
332 | wait_queue_head_t waitq; | ||
333 | |||
334 | struct timer_list timer; | ||
335 | u32 timer_countdown; | ||
336 | u32 timer_substate; | ||
337 | |||
338 | int n_special; | ||
339 | int sgs_per_request; | ||
340 | u32 last_mtd; | ||
341 | |||
342 | u32 proto_ver; | ||
343 | |||
344 | int dbg_level; | ||
345 | u32 connect_time_stamp; | ||
346 | int connect_retries; | ||
347 | #define SKD_MAX_CONNECT_RETRIES 16 | ||
348 | u32 drive_jiffies; | ||
349 | |||
350 | u32 timo_slot; | ||
351 | |||
352 | |||
353 | struct work_struct completion_worker; | ||
354 | }; | ||
355 | |||
356 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) | ||
357 | #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) | ||
358 | #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) | ||
359 | |||
360 | static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) | ||
361 | { | ||
362 | u32 val; | ||
363 | |||
364 | if (likely(skdev->dbg_level < 2)) | ||
365 | return readl(skdev->mem_map[1] + offset); | ||
366 | else { | ||
367 | barrier(); | ||
368 | val = readl(skdev->mem_map[1] + offset); | ||
369 | barrier(); | ||
370 | pr_debug("%s:%s:%d offset %x = %x\n", | ||
371 | skdev->name, __func__, __LINE__, offset, val); | ||
372 | return val; | ||
373 | } | ||
374 | |||
375 | } | ||
376 | |||
377 | static inline void skd_reg_write32(struct skd_device *skdev, u32 val, | ||
378 | u32 offset) | ||
379 | { | ||
380 | if (likely(skdev->dbg_level < 2)) { | ||
381 | writel(val, skdev->mem_map[1] + offset); | ||
382 | barrier(); | ||
383 | } else { | ||
384 | barrier(); | ||
385 | writel(val, skdev->mem_map[1] + offset); | ||
386 | barrier(); | ||
387 | pr_debug("%s:%s:%d offset %x = %x\n", | ||
388 | skdev->name, __func__, __LINE__, offset, val); | ||
389 | } | ||
390 | } | ||
391 | |||
392 | static inline void skd_reg_write64(struct skd_device *skdev, u64 val, | ||
393 | u32 offset) | ||
394 | { | ||
395 | if (likely(skdev->dbg_level < 2)) { | ||
396 | writeq(val, skdev->mem_map[1] + offset); | ||
397 | barrier(); | ||
398 | } else { | ||
399 | barrier(); | ||
400 | writeq(val, skdev->mem_map[1] + offset); | ||
401 | barrier(); | ||
402 | pr_debug("%s:%s:%d offset %x = %016llx\n", | ||
403 | skdev->name, __func__, __LINE__, offset, val); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | |||
408 | #define SKD_IRQ_DEFAULT SKD_IRQ_MSI | ||
409 | static int skd_isr_type = SKD_IRQ_DEFAULT; | ||
410 | |||
411 | module_param(skd_isr_type, int, 0444); | ||
412 | MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." | ||
413 | " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); | ||
414 | |||
415 | #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 | ||
416 | static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | ||
417 | |||
418 | module_param(skd_max_req_per_msg, int, 0444); | ||
419 | MODULE_PARM_DESC(skd_max_req_per_msg, | ||
420 | "Maximum SCSI requests packed in a single message." | ||
421 | " (1-14, default==1)"); | ||
422 | |||
423 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 | ||
424 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" | ||
425 | static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | ||
426 | |||
427 | module_param(skd_max_queue_depth, int, 0444); | ||
428 | MODULE_PARM_DESC(skd_max_queue_depth, | ||
429 | "Maximum SCSI requests issued to s1120." | ||
430 | " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); | ||
431 | |||
432 | static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | ||
433 | module_param(skd_sgs_per_request, int, 0444); | ||
434 | MODULE_PARM_DESC(skd_sgs_per_request, | ||
435 | "Maximum SG elements per block request." | ||
436 | " (1-4096, default==256)"); | ||
437 | |||
438 | static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | ||
439 | module_param(skd_max_pass_thru, int, 0444); | ||
440 | MODULE_PARM_DESC(skd_max_pass_thru, | ||
441 | "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); | ||
442 | |||
443 | module_param(skd_dbg_level, int, 0444); | ||
444 | MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); | ||
445 | |||
446 | module_param(skd_isr_comp_limit, int, 0444); | ||
447 | MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); | ||
448 | |||
449 | /* Major device number dynamically assigned. */ | ||
450 | static u32 skd_major; | ||
451 | |||
452 | static void skd_destruct(struct skd_device *skdev); | ||
453 | static const struct block_device_operations skd_blockdev_ops; | ||
454 | static void skd_send_fitmsg(struct skd_device *skdev, | ||
455 | struct skd_fitmsg_context *skmsg); | ||
456 | static void skd_send_special_fitmsg(struct skd_device *skdev, | ||
457 | struct skd_special_context *skspcl); | ||
458 | static void skd_request_fn(struct request_queue *rq); | ||
459 | static void skd_end_request(struct skd_device *skdev, | ||
460 | struct skd_request_context *skreq, int error); | ||
461 | static int skd_preop_sg_list(struct skd_device *skdev, | ||
462 | struct skd_request_context *skreq); | ||
463 | static void skd_postop_sg_list(struct skd_device *skdev, | ||
464 | struct skd_request_context *skreq); | ||
465 | |||
466 | static void skd_restart_device(struct skd_device *skdev); | ||
467 | static int skd_quiesce_dev(struct skd_device *skdev); | ||
468 | static int skd_unquiesce_dev(struct skd_device *skdev); | ||
469 | static void skd_release_special(struct skd_device *skdev, | ||
470 | struct skd_special_context *skspcl); | ||
471 | static void skd_disable_interrupts(struct skd_device *skdev); | ||
472 | static void skd_isr_fwstate(struct skd_device *skdev); | ||
473 | static void skd_recover_requests(struct skd_device *skdev, int requeue); | ||
474 | static void skd_soft_reset(struct skd_device *skdev); | ||
475 | |||
476 | static const char *skd_name(struct skd_device *skdev); | ||
477 | const char *skd_drive_state_to_str(int state); | ||
478 | const char *skd_skdev_state_to_str(enum skd_drvr_state state); | ||
479 | static void skd_log_skdev(struct skd_device *skdev, const char *event); | ||
480 | static void skd_log_skmsg(struct skd_device *skdev, | ||
481 | struct skd_fitmsg_context *skmsg, const char *event); | ||
482 | static void skd_log_skreq(struct skd_device *skdev, | ||
483 | struct skd_request_context *skreq, const char *event); | ||
484 | |||
485 | /* | ||
486 | ***************************************************************************** | ||
487 | * READ/WRITE REQUESTS | ||
488 | ***************************************************************************** | ||
489 | */ | ||
490 | static void skd_fail_all_pending(struct skd_device *skdev) | ||
491 | { | ||
492 | struct request_queue *q = skdev->queue; | ||
493 | struct request *req; | ||
494 | |||
495 | for (;; ) { | ||
496 | req = blk_peek_request(q); | ||
497 | if (req == NULL) | ||
498 | break; | ||
499 | blk_start_request(req); | ||
500 | __blk_end_request_all(req, -EIO); | ||
501 | } | ||
502 | } | ||
503 | |||
504 | static void | ||
505 | skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, | ||
506 | int data_dir, unsigned lba, | ||
507 | unsigned count) | ||
508 | { | ||
509 | if (data_dir == READ) | ||
510 | scsi_req->cdb[0] = 0x28; | ||
511 | else | ||
512 | scsi_req->cdb[0] = 0x2a; | ||
513 | |||
514 | scsi_req->cdb[1] = 0; | ||
515 | scsi_req->cdb[2] = (lba & 0xff000000) >> 24; | ||
516 | scsi_req->cdb[3] = (lba & 0xff0000) >> 16; | ||
517 | scsi_req->cdb[4] = (lba & 0xff00) >> 8; | ||
518 | scsi_req->cdb[5] = (lba & 0xff); | ||
519 | scsi_req->cdb[6] = 0; | ||
520 | scsi_req->cdb[7] = (count & 0xff00) >> 8; | ||
521 | scsi_req->cdb[8] = count & 0xff; | ||
522 | scsi_req->cdb[9] = 0; | ||
523 | } | ||
524 | |||
525 | static void | ||
526 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | ||
527 | struct skd_request_context *skreq) | ||
528 | { | ||
529 | skreq->flush_cmd = 1; | ||
530 | |||
531 | scsi_req->cdb[0] = 0x35; | ||
532 | scsi_req->cdb[1] = 0; | ||
533 | scsi_req->cdb[2] = 0; | ||
534 | scsi_req->cdb[3] = 0; | ||
535 | scsi_req->cdb[4] = 0; | ||
536 | scsi_req->cdb[5] = 0; | ||
537 | scsi_req->cdb[6] = 0; | ||
538 | scsi_req->cdb[7] = 0; | ||
539 | scsi_req->cdb[8] = 0; | ||
540 | scsi_req->cdb[9] = 0; | ||
541 | } | ||
542 | |||
543 | static void | ||
544 | skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, | ||
545 | struct skd_request_context *skreq, | ||
546 | struct page *page, | ||
547 | u32 lba, u32 count) | ||
548 | { | ||
549 | char *buf; | ||
550 | unsigned long len; | ||
551 | struct request *req; | ||
552 | |||
553 | buf = page_address(page); | ||
554 | len = SKD_DISCARD_CDB_LENGTH; | ||
555 | |||
556 | scsi_req->cdb[0] = UNMAP; | ||
557 | scsi_req->cdb[8] = len; | ||
558 | |||
559 | put_unaligned_be16(6 + 16, &buf[0]); | ||
560 | put_unaligned_be16(16, &buf[2]); | ||
561 | put_unaligned_be64(lba, &buf[8]); | ||
562 | put_unaligned_be32(count, &buf[16]); | ||
563 | |||
564 | req = skreq->req; | ||
565 | blk_add_request_payload(req, page, len); | ||
566 | req->buffer = buf; | ||
567 | } | ||
568 | |||
569 | static void skd_request_fn_not_online(struct request_queue *q); | ||
570 | |||
571 | static void skd_request_fn(struct request_queue *q) | ||
572 | { | ||
573 | struct skd_device *skdev = q->queuedata; | ||
574 | struct skd_fitmsg_context *skmsg = NULL; | ||
575 | struct fit_msg_hdr *fmh = NULL; | ||
576 | struct skd_request_context *skreq; | ||
577 | struct request *req = NULL; | ||
578 | struct skd_scsi_request *scsi_req; | ||
579 | struct page *page; | ||
580 | unsigned long io_flags; | ||
581 | int error; | ||
582 | u32 lba; | ||
583 | u32 count; | ||
584 | int data_dir; | ||
585 | u32 be_lba; | ||
586 | u32 be_count; | ||
587 | u64 be_dmaa; | ||
588 | u64 cmdctxt; | ||
589 | u32 timo_slot; | ||
590 | void *cmd_ptr; | ||
591 | int flush, fua; | ||
592 | |||
593 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | ||
594 | skd_request_fn_not_online(q); | ||
595 | return; | ||
596 | } | ||
597 | |||
598 | if (blk_queue_stopped(skdev->queue)) { | ||
599 | if (skdev->skmsg_free_list == NULL || | ||
600 | skdev->skreq_free_list == NULL || | ||
601 | skdev->in_flight >= skdev->queue_low_water_mark) | ||
602 | /* There is still some kind of shortage */ | ||
603 | return; | ||
604 | |||
605 | queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * Stop conditions: | ||
610 | * - There are no more native requests | ||
611 | * - There are already the maximum number of requests in progress | ||
612 | * - There are no more skd_request_context entries | ||
613 | * - There are no more FIT msg buffers | ||
614 | */ | ||
615 | for (;; ) { | ||
616 | |||
617 | flush = fua = 0; | ||
618 | |||
619 | req = blk_peek_request(q); | ||
620 | |||
621 | /* Are there any native requests to start? */ | ||
622 | if (req == NULL) | ||
623 | break; | ||
624 | |||
625 | lba = (u32)blk_rq_pos(req); | ||
626 | count = blk_rq_sectors(req); | ||
627 | data_dir = rq_data_dir(req); | ||
628 | io_flags = req->cmd_flags; | ||
629 | |||
630 | if (io_flags & REQ_FLUSH) | ||
631 | flush++; | ||
632 | |||
633 | if (io_flags & REQ_FUA) | ||
634 | fua++; | ||
635 | |||
636 | pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " | ||
637 | "count=%u(0x%x) dir=%d\n", | ||
638 | skdev->name, __func__, __LINE__, | ||
639 | req, lba, lba, count, count, data_dir); | ||
640 | |||
641 | /* At this point we know there is a request */ | ||
642 | |||
643 | /* Are too many requets already in progress? */ | ||
644 | if (skdev->in_flight >= skdev->cur_max_queue_depth) { | ||
645 | pr_debug("%s:%s:%d qdepth %d, limit %d\n", | ||
646 | skdev->name, __func__, __LINE__, | ||
647 | skdev->in_flight, skdev->cur_max_queue_depth); | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | /* Is a skd_request_context available? */ | ||
652 | skreq = skdev->skreq_free_list; | ||
653 | if (skreq == NULL) { | ||
654 | pr_debug("%s:%s:%d Out of req=%p\n", | ||
655 | skdev->name, __func__, __LINE__, q); | ||
656 | break; | ||
657 | } | ||
658 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); | ||
659 | SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); | ||
660 | |||
661 | /* Now we check to see if we can get a fit msg */ | ||
662 | if (skmsg == NULL) { | ||
663 | if (skdev->skmsg_free_list == NULL) { | ||
664 | pr_debug("%s:%s:%d Out of msg\n", | ||
665 | skdev->name, __func__, __LINE__); | ||
666 | break; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | skreq->flush_cmd = 0; | ||
671 | skreq->n_sg = 0; | ||
672 | skreq->sg_byte_count = 0; | ||
673 | skreq->discard_page = 0; | ||
674 | |||
675 | /* | ||
676 | * OK to now dequeue request from q. | ||
677 | * | ||
678 | * At this point we are comitted to either start or reject | ||
679 | * the native request. Note that skd_request_context is | ||
680 | * available but is still at the head of the free list. | ||
681 | */ | ||
682 | blk_start_request(req); | ||
683 | skreq->req = req; | ||
684 | skreq->fitmsg_id = 0; | ||
685 | |||
686 | /* Either a FIT msg is in progress or we have to start one. */ | ||
687 | if (skmsg == NULL) { | ||
688 | /* Are there any FIT msg buffers available? */ | ||
689 | skmsg = skdev->skmsg_free_list; | ||
690 | if (skmsg == NULL) { | ||
691 | pr_debug("%s:%s:%d Out of msg skdev=%p\n", | ||
692 | skdev->name, __func__, __LINE__, | ||
693 | skdev); | ||
694 | break; | ||
695 | } | ||
696 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); | ||
697 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); | ||
698 | |||
699 | skdev->skmsg_free_list = skmsg->next; | ||
700 | |||
701 | skmsg->state = SKD_MSG_STATE_BUSY; | ||
702 | skmsg->id += SKD_ID_INCR; | ||
703 | |||
704 | /* Initialize the FIT msg header */ | ||
705 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | ||
706 | memset(fmh, 0, sizeof(*fmh)); | ||
707 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | ||
708 | skmsg->length = sizeof(*fmh); | ||
709 | } | ||
710 | |||
711 | skreq->fitmsg_id = skmsg->id; | ||
712 | |||
713 | /* | ||
714 | * Note that a FIT msg may have just been started | ||
715 | * but contains no SoFIT requests yet. | ||
716 | */ | ||
717 | |||
718 | /* | ||
719 | * Transcode the request, checking as we go. The outcome of | ||
720 | * the transcoding is represented by the error variable. | ||
721 | */ | ||
722 | cmd_ptr = &skmsg->msg_buf[skmsg->length]; | ||
723 | memset(cmd_ptr, 0, 32); | ||
724 | |||
725 | be_lba = cpu_to_be32(lba); | ||
726 | be_count = cpu_to_be32(count); | ||
727 | be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); | ||
728 | cmdctxt = skreq->id + SKD_ID_INCR; | ||
729 | |||
730 | scsi_req = cmd_ptr; | ||
731 | scsi_req->hdr.tag = cmdctxt; | ||
732 | scsi_req->hdr.sg_list_dma_address = be_dmaa; | ||
733 | |||
734 | if (data_dir == READ) | ||
735 | skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; | ||
736 | else | ||
737 | skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; | ||
738 | |||
739 | if (io_flags & REQ_DISCARD) { | ||
740 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); | ||
741 | if (!page) { | ||
742 | pr_err("request_fn:Page allocation failed.\n"); | ||
743 | skd_end_request(skdev, skreq, -ENOMEM); | ||
744 | break; | ||
745 | } | ||
746 | skreq->discard_page = 1; | ||
747 | skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); | ||
748 | |||
749 | } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { | ||
750 | skd_prep_zerosize_flush_cdb(scsi_req, skreq); | ||
751 | SKD_ASSERT(skreq->flush_cmd == 1); | ||
752 | |||
753 | } else { | ||
754 | skd_prep_rw_cdb(scsi_req, data_dir, lba, count); | ||
755 | } | ||
756 | |||
757 | if (fua) | ||
758 | scsi_req->cdb[1] |= SKD_FUA_NV; | ||
759 | |||
760 | if (!req->bio) | ||
761 | goto skip_sg; | ||
762 | |||
763 | error = skd_preop_sg_list(skdev, skreq); | ||
764 | |||
765 | if (error != 0) { | ||
766 | /* | ||
767 | * Complete the native request with error. | ||
768 | * Note that the request context is still at the | ||
769 | * head of the free list, and that the SoFIT request | ||
770 | * was encoded into the FIT msg buffer but the FIT | ||
771 | * msg length has not been updated. In short, the | ||
772 | * only resource that has been allocated but might | ||
773 | * not be used is that the FIT msg could be empty. | ||
774 | */ | ||
775 | pr_debug("%s:%s:%d error Out\n", | ||
776 | skdev->name, __func__, __LINE__); | ||
777 | skd_end_request(skdev, skreq, error); | ||
778 | continue; | ||
779 | } | ||
780 | |||
781 | skip_sg: | ||
782 | scsi_req->hdr.sg_list_len_bytes = | ||
783 | cpu_to_be32(skreq->sg_byte_count); | ||
784 | |||
785 | /* Complete resource allocations. */ | ||
786 | skdev->skreq_free_list = skreq->next; | ||
787 | skreq->state = SKD_REQ_STATE_BUSY; | ||
788 | skreq->id += SKD_ID_INCR; | ||
789 | |||
790 | skmsg->length += sizeof(struct skd_scsi_request); | ||
791 | fmh->num_protocol_cmds_coalesced++; | ||
792 | |||
793 | /* | ||
794 | * Update the active request counts. | ||
795 | * Capture the timeout timestamp. | ||
796 | */ | ||
797 | skreq->timeout_stamp = skdev->timeout_stamp; | ||
798 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | ||
799 | skdev->timeout_slot[timo_slot]++; | ||
800 | skdev->in_flight++; | ||
801 | pr_debug("%s:%s:%d req=0x%x busy=%d\n", | ||
802 | skdev->name, __func__, __LINE__, | ||
803 | skreq->id, skdev->in_flight); | ||
804 | |||
805 | /* | ||
806 | * If the FIT msg buffer is full send it. | ||
807 | */ | ||
808 | if (skmsg->length >= SKD_N_FITMSG_BYTES || | ||
809 | fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { | ||
810 | skd_send_fitmsg(skdev, skmsg); | ||
811 | skmsg = NULL; | ||
812 | fmh = NULL; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | /* | ||
817 | * Is a FIT msg in progress? If it is empty put the buffer back | ||
818 | * on the free list. If it is non-empty send what we got. | ||
819 | * This minimizes latency when there are fewer requests than | ||
820 | * what fits in a FIT msg. | ||
821 | */ | ||
822 | if (skmsg != NULL) { | ||
823 | /* Bigger than just a FIT msg header? */ | ||
824 | if (skmsg->length > sizeof(struct fit_msg_hdr)) { | ||
825 | pr_debug("%s:%s:%d sending msg=%p, len %d\n", | ||
826 | skdev->name, __func__, __LINE__, | ||
827 | skmsg, skmsg->length); | ||
828 | skd_send_fitmsg(skdev, skmsg); | ||
829 | } else { | ||
830 | /* | ||
831 | * The FIT msg is empty. It means we got started | ||
832 | * on the msg, but the requests were rejected. | ||
833 | */ | ||
834 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
835 | skmsg->id += SKD_ID_INCR; | ||
836 | skmsg->next = skdev->skmsg_free_list; | ||
837 | skdev->skmsg_free_list = skmsg; | ||
838 | } | ||
839 | skmsg = NULL; | ||
840 | fmh = NULL; | ||
841 | } | ||
842 | |||
843 | /* | ||
844 | * If req is non-NULL it means there is something to do but | ||
845 | * we are out of a resource. | ||
846 | */ | ||
847 | if (req) | ||
848 | blk_stop_queue(skdev->queue); | ||
849 | } | ||
850 | |||
851 | static void skd_end_request(struct skd_device *skdev, | ||
852 | struct skd_request_context *skreq, int error) | ||
853 | { | ||
854 | struct request *req = skreq->req; | ||
855 | unsigned int io_flags = req->cmd_flags; | ||
856 | |||
857 | if ((io_flags & REQ_DISCARD) && | ||
858 | (skreq->discard_page == 1)) { | ||
859 | pr_debug("%s:%s:%d, free the page!", | ||
860 | skdev->name, __func__, __LINE__); | ||
861 | free_page((unsigned long)req->buffer); | ||
862 | req->buffer = NULL; | ||
863 | } | ||
864 | |||
865 | if (unlikely(error)) { | ||
866 | struct request *req = skreq->req; | ||
867 | char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; | ||
868 | u32 lba = (u32)blk_rq_pos(req); | ||
869 | u32 count = blk_rq_sectors(req); | ||
870 | |||
871 | pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", | ||
872 | skd_name(skdev), cmd, lba, count, skreq->id); | ||
873 | } else | ||
874 | pr_debug("%s:%s:%d id=0x%x error=%d\n", | ||
875 | skdev->name, __func__, __LINE__, skreq->id, error); | ||
876 | |||
877 | __blk_end_request_all(skreq->req, error); | ||
878 | } | ||
879 | |||
880 | static int skd_preop_sg_list(struct skd_device *skdev, | ||
881 | struct skd_request_context *skreq) | ||
882 | { | ||
883 | struct request *req = skreq->req; | ||
884 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | ||
885 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | ||
886 | struct scatterlist *sg = &skreq->sg[0]; | ||
887 | int n_sg; | ||
888 | int i; | ||
889 | |||
890 | skreq->sg_byte_count = 0; | ||
891 | |||
892 | /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || | ||
893 | skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ | ||
894 | |||
895 | n_sg = blk_rq_map_sg(skdev->queue, req, sg); | ||
896 | if (n_sg <= 0) | ||
897 | return -EINVAL; | ||
898 | |||
899 | /* | ||
900 | * Map scatterlist to PCI bus addresses. | ||
901 | * Note PCI might change the number of entries. | ||
902 | */ | ||
903 | n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); | ||
904 | if (n_sg <= 0) | ||
905 | return -EINVAL; | ||
906 | |||
907 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); | ||
908 | |||
909 | skreq->n_sg = n_sg; | ||
910 | |||
911 | for (i = 0; i < n_sg; i++) { | ||
912 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | ||
913 | u32 cnt = sg_dma_len(&sg[i]); | ||
914 | uint64_t dma_addr = sg_dma_address(&sg[i]); | ||
915 | |||
916 | sgd->control = FIT_SGD_CONTROL_NOT_LAST; | ||
917 | sgd->byte_count = cnt; | ||
918 | skreq->sg_byte_count += cnt; | ||
919 | sgd->host_side_addr = dma_addr; | ||
920 | sgd->dev_side_addr = 0; | ||
921 | } | ||
922 | |||
923 | skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; | ||
924 | skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; | ||
925 | |||
926 | if (unlikely(skdev->dbg_level > 1)) { | ||
927 | pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", | ||
928 | skdev->name, __func__, __LINE__, | ||
929 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | ||
930 | for (i = 0; i < n_sg; i++) { | ||
931 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | ||
932 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " | ||
933 | "addr=0x%llx next=0x%llx\n", | ||
934 | skdev->name, __func__, __LINE__, | ||
935 | i, sgd->byte_count, sgd->control, | ||
936 | sgd->host_side_addr, sgd->next_desc_ptr); | ||
937 | } | ||
938 | } | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static void skd_postop_sg_list(struct skd_device *skdev, | ||
944 | struct skd_request_context *skreq) | ||
945 | { | ||
946 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | ||
947 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | ||
948 | |||
949 | /* | ||
950 | * restore the next ptr for next IO request so we | ||
951 | * don't have to set it every time. | ||
952 | */ | ||
953 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = | ||
954 | skreq->sksg_dma_address + | ||
955 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); | ||
956 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); | ||
957 | } | ||
958 | |||
959 | static void skd_request_fn_not_online(struct request_queue *q) | ||
960 | { | ||
961 | struct skd_device *skdev = q->queuedata; | ||
962 | int error; | ||
963 | |||
964 | SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); | ||
965 | |||
966 | skd_log_skdev(skdev, "req_not_online"); | ||
967 | switch (skdev->state) { | ||
968 | case SKD_DRVR_STATE_PAUSING: | ||
969 | case SKD_DRVR_STATE_PAUSED: | ||
970 | case SKD_DRVR_STATE_STARTING: | ||
971 | case SKD_DRVR_STATE_RESTARTING: | ||
972 | case SKD_DRVR_STATE_WAIT_BOOT: | ||
973 | /* In case of starting, we haven't started the queue, | ||
974 | * so we can't get here... but requests are | ||
975 | * possibly hanging out waiting for us because we | ||
976 | * reported the dev/skd0 already. They'll wait | ||
977 | * forever if connect doesn't complete. | ||
978 | * What to do??? delay dev/skd0 ?? | ||
979 | */ | ||
980 | case SKD_DRVR_STATE_BUSY: | ||
981 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
982 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
983 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | ||
984 | return; | ||
985 | |||
986 | case SKD_DRVR_STATE_BUSY_SANITIZE: | ||
987 | case SKD_DRVR_STATE_STOPPING: | ||
988 | case SKD_DRVR_STATE_SYNCING: | ||
989 | case SKD_DRVR_STATE_FAULT: | ||
990 | case SKD_DRVR_STATE_DISAPPEARED: | ||
991 | default: | ||
992 | error = -EIO; | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | /* If we get here, terminate all pending block requeusts | ||
997 | * with EIO and any scsi pass thru with appropriate sense | ||
998 | */ | ||
999 | |||
1000 | skd_fail_all_pending(skdev); | ||
1001 | } | ||
1002 | |||
1003 | /* | ||
1004 | ***************************************************************************** | ||
1005 | * TIMER | ||
1006 | ***************************************************************************** | ||
1007 | */ | ||
1008 | |||
1009 | static void skd_timer_tick_not_online(struct skd_device *skdev); | ||
1010 | |||
1011 | static void skd_timer_tick(ulong arg) | ||
1012 | { | ||
1013 | struct skd_device *skdev = (struct skd_device *)arg; | ||
1014 | |||
1015 | u32 timo_slot; | ||
1016 | u32 overdue_timestamp; | ||
1017 | unsigned long reqflags; | ||
1018 | u32 state; | ||
1019 | |||
1020 | if (skdev->state == SKD_DRVR_STATE_FAULT) | ||
1021 | /* The driver has declared fault, and we want it to | ||
1022 | * stay that way until driver is reloaded. | ||
1023 | */ | ||
1024 | return; | ||
1025 | |||
1026 | spin_lock_irqsave(&skdev->lock, reqflags); | ||
1027 | |||
1028 | state = SKD_READL(skdev, FIT_STATUS); | ||
1029 | state &= FIT_SR_DRIVE_STATE_MASK; | ||
1030 | if (state != skdev->drive_state) | ||
1031 | skd_isr_fwstate(skdev); | ||
1032 | |||
1033 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | ||
1034 | skd_timer_tick_not_online(skdev); | ||
1035 | goto timer_func_out; | ||
1036 | } | ||
1037 | skdev->timeout_stamp++; | ||
1038 | timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | ||
1039 | |||
1040 | /* | ||
1041 | * All requests that happened during the previous use of | ||
1042 | * this slot should be done by now. The previous use was | ||
1043 | * over 7 seconds ago. | ||
1044 | */ | ||
1045 | if (skdev->timeout_slot[timo_slot] == 0) | ||
1046 | goto timer_func_out; | ||
1047 | |||
1048 | /* Something is overdue */ | ||
1049 | overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; | ||
1050 | |||
1051 | pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", | ||
1052 | skdev->name, __func__, __LINE__, | ||
1053 | skdev->timeout_slot[timo_slot], skdev->in_flight); | ||
1054 | pr_err("(%s): Overdue IOs (%d), busy %d\n", | ||
1055 | skd_name(skdev), skdev->timeout_slot[timo_slot], | ||
1056 | skdev->in_flight); | ||
1057 | |||
1058 | skdev->timer_countdown = SKD_DRAINING_TIMO; | ||
1059 | skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; | ||
1060 | skdev->timo_slot = timo_slot; | ||
1061 | blk_stop_queue(skdev->queue); | ||
1062 | |||
1063 | timer_func_out: | ||
1064 | mod_timer(&skdev->timer, (jiffies + HZ)); | ||
1065 | |||
1066 | spin_unlock_irqrestore(&skdev->lock, reqflags); | ||
1067 | } | ||
1068 | |||
1069 | static void skd_timer_tick_not_online(struct skd_device *skdev) | ||
1070 | { | ||
1071 | switch (skdev->state) { | ||
1072 | case SKD_DRVR_STATE_IDLE: | ||
1073 | case SKD_DRVR_STATE_LOAD: | ||
1074 | break; | ||
1075 | case SKD_DRVR_STATE_BUSY_SANITIZE: | ||
1076 | pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", | ||
1077 | skdev->name, __func__, __LINE__, | ||
1078 | skdev->drive_state, skdev->state); | ||
1079 | /* If we've been in sanitize for 3 seconds, we figure we're not | ||
1080 | * going to get anymore completions, so recover requests now | ||
1081 | */ | ||
1082 | if (skdev->timer_countdown > 0) { | ||
1083 | skdev->timer_countdown--; | ||
1084 | return; | ||
1085 | } | ||
1086 | skd_recover_requests(skdev, 0); | ||
1087 | break; | ||
1088 | |||
1089 | case SKD_DRVR_STATE_BUSY: | ||
1090 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
1091 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
1092 | pr_debug("%s:%s:%d busy[%x], countdown=%d\n", | ||
1093 | skdev->name, __func__, __LINE__, | ||
1094 | skdev->state, skdev->timer_countdown); | ||
1095 | if (skdev->timer_countdown > 0) { | ||
1096 | skdev->timer_countdown--; | ||
1097 | return; | ||
1098 | } | ||
1099 | pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", | ||
1100 | skdev->name, __func__, __LINE__, | ||
1101 | skdev->state, skdev->timer_countdown); | ||
1102 | skd_restart_device(skdev); | ||
1103 | break; | ||
1104 | |||
1105 | case SKD_DRVR_STATE_WAIT_BOOT: | ||
1106 | case SKD_DRVR_STATE_STARTING: | ||
1107 | if (skdev->timer_countdown > 0) { | ||
1108 | skdev->timer_countdown--; | ||
1109 | return; | ||
1110 | } | ||
1111 | /* For now, we fault the drive. Could attempt resets to | ||
1112 | * revcover at some point. */ | ||
1113 | skdev->state = SKD_DRVR_STATE_FAULT; | ||
1114 | |||
1115 | pr_err("(%s): DriveFault Connect Timeout (%x)\n", | ||
1116 | skd_name(skdev), skdev->drive_state); | ||
1117 | |||
1118 | /*start the queue so we can respond with error to requests */ | ||
1119 | /* wakeup anyone waiting for startup complete */ | ||
1120 | blk_start_queue(skdev->queue); | ||
1121 | skdev->gendisk_on = -1; | ||
1122 | wake_up_interruptible(&skdev->waitq); | ||
1123 | break; | ||
1124 | |||
1125 | case SKD_DRVR_STATE_ONLINE: | ||
1126 | /* shouldn't get here. */ | ||
1127 | break; | ||
1128 | |||
1129 | case SKD_DRVR_STATE_PAUSING: | ||
1130 | case SKD_DRVR_STATE_PAUSED: | ||
1131 | break; | ||
1132 | |||
1133 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | ||
1134 | pr_debug("%s:%s:%d " | ||
1135 | "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", | ||
1136 | skdev->name, __func__, __LINE__, | ||
1137 | skdev->timo_slot, | ||
1138 | skdev->timer_countdown, | ||
1139 | skdev->in_flight, | ||
1140 | skdev->timeout_slot[skdev->timo_slot]); | ||
1141 | /* if the slot has cleared we can let the I/O continue */ | ||
1142 | if (skdev->timeout_slot[skdev->timo_slot] == 0) { | ||
1143 | pr_debug("%s:%s:%d Slot drained, starting queue.\n", | ||
1144 | skdev->name, __func__, __LINE__); | ||
1145 | skdev->state = SKD_DRVR_STATE_ONLINE; | ||
1146 | blk_start_queue(skdev->queue); | ||
1147 | return; | ||
1148 | } | ||
1149 | if (skdev->timer_countdown > 0) { | ||
1150 | skdev->timer_countdown--; | ||
1151 | return; | ||
1152 | } | ||
1153 | skd_restart_device(skdev); | ||
1154 | break; | ||
1155 | |||
1156 | case SKD_DRVR_STATE_RESTARTING: | ||
1157 | if (skdev->timer_countdown > 0) { | ||
1158 | skdev->timer_countdown--; | ||
1159 | return; | ||
1160 | } | ||
1161 | /* For now, we fault the drive. Could attempt resets to | ||
1162 | * revcover at some point. */ | ||
1163 | skdev->state = SKD_DRVR_STATE_FAULT; | ||
1164 | pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", | ||
1165 | skd_name(skdev), skdev->drive_state); | ||
1166 | |||
1167 | /* | ||
1168 | * Recovering does two things: | ||
1169 | * 1. completes IO with error | ||
1170 | * 2. reclaims dma resources | ||
1171 | * When is it safe to recover requests? | ||
1172 | * - if the drive state is faulted | ||
1173 | * - if the state is still soft reset after out timeout | ||
1174 | * - if the drive registers are dead (state = FF) | ||
1175 | * If it is "unsafe", we still need to recover, so we will | ||
1176 | * disable pci bus mastering and disable our interrupts. | ||
1177 | */ | ||
1178 | |||
1179 | if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || | ||
1180 | (skdev->drive_state == FIT_SR_DRIVE_FAULT) || | ||
1181 | (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) | ||
1182 | /* It never came out of soft reset. Try to | ||
1183 | * recover the requests and then let them | ||
1184 | * fail. This is to mitigate hung processes. */ | ||
1185 | skd_recover_requests(skdev, 0); | ||
1186 | else { | ||
1187 | pr_err("(%s): Disable BusMaster (%x)\n", | ||
1188 | skd_name(skdev), skdev->drive_state); | ||
1189 | pci_disable_device(skdev->pdev); | ||
1190 | skd_disable_interrupts(skdev); | ||
1191 | skd_recover_requests(skdev, 0); | ||
1192 | } | ||
1193 | |||
1194 | /*start the queue so we can respond with error to requests */ | ||
1195 | /* wakeup anyone waiting for startup complete */ | ||
1196 | blk_start_queue(skdev->queue); | ||
1197 | skdev->gendisk_on = -1; | ||
1198 | wake_up_interruptible(&skdev->waitq); | ||
1199 | break; | ||
1200 | |||
1201 | case SKD_DRVR_STATE_RESUMING: | ||
1202 | case SKD_DRVR_STATE_STOPPING: | ||
1203 | case SKD_DRVR_STATE_SYNCING: | ||
1204 | case SKD_DRVR_STATE_FAULT: | ||
1205 | case SKD_DRVR_STATE_DISAPPEARED: | ||
1206 | default: | ||
1207 | break; | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | static int skd_start_timer(struct skd_device *skdev) | ||
1212 | { | ||
1213 | int rc; | ||
1214 | |||
1215 | init_timer(&skdev->timer); | ||
1216 | setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); | ||
1217 | |||
1218 | rc = mod_timer(&skdev->timer, (jiffies + HZ)); | ||
1219 | if (rc) | ||
1220 | pr_err("%s: failed to start timer %d\n", | ||
1221 | __func__, rc); | ||
1222 | return rc; | ||
1223 | } | ||
1224 | |||
1225 | static void skd_kill_timer(struct skd_device *skdev) | ||
1226 | { | ||
1227 | del_timer_sync(&skdev->timer); | ||
1228 | } | ||
1229 | |||
1230 | /* | ||
1231 | ***************************************************************************** | ||
1232 | * IOCTL | ||
1233 | ***************************************************************************** | ||
1234 | */ | ||
1235 | static int skd_ioctl_sg_io(struct skd_device *skdev, | ||
1236 | fmode_t mode, void __user *argp); | ||
1237 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | ||
1238 | struct skd_sg_io *sksgio); | ||
1239 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | ||
1240 | struct skd_sg_io *sksgio); | ||
1241 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | ||
1242 | struct skd_sg_io *sksgio); | ||
1243 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | ||
1244 | struct skd_sg_io *sksgio, int dxfer_dir); | ||
1245 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | ||
1246 | struct skd_sg_io *sksgio); | ||
1247 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); | ||
1248 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | ||
1249 | struct skd_sg_io *sksgio); | ||
1250 | static int skd_sg_io_put_status(struct skd_device *skdev, | ||
1251 | struct skd_sg_io *sksgio); | ||
1252 | |||
1253 | static void skd_complete_special(struct skd_device *skdev, | ||
1254 | volatile struct fit_completion_entry_v1 | ||
1255 | *skcomp, | ||
1256 | volatile struct fit_comp_error_info *skerr, | ||
1257 | struct skd_special_context *skspcl); | ||
1258 | |||
1259 | static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, | ||
1260 | uint cmd_in, ulong arg) | ||
1261 | { | ||
1262 | int rc = 0; | ||
1263 | struct gendisk *disk = bdev->bd_disk; | ||
1264 | struct skd_device *skdev = disk->private_data; | ||
1265 | void __user *p = (void *)arg; | ||
1266 | |||
1267 | pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", | ||
1268 | skdev->name, __func__, __LINE__, | ||
1269 | disk->disk_name, current->comm, mode, cmd_in, arg); | ||
1270 | |||
1271 | if (!capable(CAP_SYS_ADMIN)) | ||
1272 | return -EPERM; | ||
1273 | |||
1274 | switch (cmd_in) { | ||
1275 | case SG_SET_TIMEOUT: | ||
1276 | case SG_GET_TIMEOUT: | ||
1277 | case SG_GET_VERSION_NUM: | ||
1278 | rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); | ||
1279 | break; | ||
1280 | case SG_IO: | ||
1281 | rc = skd_ioctl_sg_io(skdev, mode, p); | ||
1282 | break; | ||
1283 | |||
1284 | default: | ||
1285 | rc = -ENOTTY; | ||
1286 | break; | ||
1287 | } | ||
1288 | |||
1289 | pr_debug("%s:%s:%d %s: completion rc %d\n", | ||
1290 | skdev->name, __func__, __LINE__, disk->disk_name, rc); | ||
1291 | return rc; | ||
1292 | } | ||
1293 | |||
1294 | static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, | ||
1295 | void __user *argp) | ||
1296 | { | ||
1297 | int rc; | ||
1298 | struct skd_sg_io sksgio; | ||
1299 | |||
1300 | memset(&sksgio, 0, sizeof(sksgio)); | ||
1301 | sksgio.mode = mode; | ||
1302 | sksgio.argp = argp; | ||
1303 | sksgio.iov = &sksgio.no_iov_iov; | ||
1304 | |||
1305 | switch (skdev->state) { | ||
1306 | case SKD_DRVR_STATE_ONLINE: | ||
1307 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
1308 | break; | ||
1309 | |||
1310 | default: | ||
1311 | pr_debug("%s:%s:%d drive not online\n", | ||
1312 | skdev->name, __func__, __LINE__); | ||
1313 | rc = -ENXIO; | ||
1314 | goto out; | ||
1315 | } | ||
1316 | |||
1317 | rc = skd_sg_io_get_and_check_args(skdev, &sksgio); | ||
1318 | if (rc) | ||
1319 | goto out; | ||
1320 | |||
1321 | rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); | ||
1322 | if (rc) | ||
1323 | goto out; | ||
1324 | |||
1325 | rc = skd_sg_io_prep_buffering(skdev, &sksgio); | ||
1326 | if (rc) | ||
1327 | goto out; | ||
1328 | |||
1329 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); | ||
1330 | if (rc) | ||
1331 | goto out; | ||
1332 | |||
1333 | rc = skd_sg_io_send_fitmsg(skdev, &sksgio); | ||
1334 | if (rc) | ||
1335 | goto out; | ||
1336 | |||
1337 | rc = skd_sg_io_await(skdev, &sksgio); | ||
1338 | if (rc) | ||
1339 | goto out; | ||
1340 | |||
1341 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); | ||
1342 | if (rc) | ||
1343 | goto out; | ||
1344 | |||
1345 | rc = skd_sg_io_put_status(skdev, &sksgio); | ||
1346 | if (rc) | ||
1347 | goto out; | ||
1348 | |||
1349 | rc = 0; | ||
1350 | |||
1351 | out: | ||
1352 | skd_sg_io_release_skspcl(skdev, &sksgio); | ||
1353 | |||
1354 | if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) | ||
1355 | kfree(sksgio.iov); | ||
1356 | return rc; | ||
1357 | } | ||
1358 | |||
1359 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | ||
1360 | struct skd_sg_io *sksgio) | ||
1361 | { | ||
1362 | struct sg_io_hdr *sgp = &sksgio->sg; | ||
1363 | int i, acc; | ||
1364 | |||
1365 | if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { | ||
1366 | pr_debug("%s:%s:%d access sg failed %p\n", | ||
1367 | skdev->name, __func__, __LINE__, sksgio->argp); | ||
1368 | return -EFAULT; | ||
1369 | } | ||
1370 | |||
1371 | if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { | ||
1372 | pr_debug("%s:%s:%d copy_from_user sg failed %p\n", | ||
1373 | skdev->name, __func__, __LINE__, sksgio->argp); | ||
1374 | return -EFAULT; | ||
1375 | } | ||
1376 | |||
1377 | if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { | ||
1378 | pr_debug("%s:%s:%d interface_id invalid 0x%x\n", | ||
1379 | skdev->name, __func__, __LINE__, sgp->interface_id); | ||
1380 | return -EINVAL; | ||
1381 | } | ||
1382 | |||
1383 | if (sgp->cmd_len > sizeof(sksgio->cdb)) { | ||
1384 | pr_debug("%s:%s:%d cmd_len invalid %d\n", | ||
1385 | skdev->name, __func__, __LINE__, sgp->cmd_len); | ||
1386 | return -EINVAL; | ||
1387 | } | ||
1388 | |||
1389 | if (sgp->iovec_count > 256) { | ||
1390 | pr_debug("%s:%s:%d iovec_count invalid %d\n", | ||
1391 | skdev->name, __func__, __LINE__, sgp->iovec_count); | ||
1392 | return -EINVAL; | ||
1393 | } | ||
1394 | |||
1395 | if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { | ||
1396 | pr_debug("%s:%s:%d dxfer_len invalid %d\n", | ||
1397 | skdev->name, __func__, __LINE__, sgp->dxfer_len); | ||
1398 | return -EINVAL; | ||
1399 | } | ||
1400 | |||
1401 | switch (sgp->dxfer_direction) { | ||
1402 | case SG_DXFER_NONE: | ||
1403 | acc = -1; | ||
1404 | break; | ||
1405 | |||
1406 | case SG_DXFER_TO_DEV: | ||
1407 | acc = VERIFY_READ; | ||
1408 | break; | ||
1409 | |||
1410 | case SG_DXFER_FROM_DEV: | ||
1411 | case SG_DXFER_TO_FROM_DEV: | ||
1412 | acc = VERIFY_WRITE; | ||
1413 | break; | ||
1414 | |||
1415 | default: | ||
1416 | pr_debug("%s:%s:%d dxfer_dir invalid %d\n", | ||
1417 | skdev->name, __func__, __LINE__, sgp->dxfer_direction); | ||
1418 | return -EINVAL; | ||
1419 | } | ||
1420 | |||
1421 | if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { | ||
1422 | pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", | ||
1423 | skdev->name, __func__, __LINE__, sgp->cmdp); | ||
1424 | return -EFAULT; | ||
1425 | } | ||
1426 | |||
1427 | if (sgp->mx_sb_len != 0) { | ||
1428 | if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { | ||
1429 | pr_debug("%s:%s:%d access sbp failed %p\n", | ||
1430 | skdev->name, __func__, __LINE__, sgp->sbp); | ||
1431 | return -EFAULT; | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | if (sgp->iovec_count == 0) { | ||
1436 | sksgio->iov[0].iov_base = sgp->dxferp; | ||
1437 | sksgio->iov[0].iov_len = sgp->dxfer_len; | ||
1438 | sksgio->iovcnt = 1; | ||
1439 | sksgio->dxfer_len = sgp->dxfer_len; | ||
1440 | } else { | ||
1441 | struct sg_iovec *iov; | ||
1442 | uint nbytes = sizeof(*iov) * sgp->iovec_count; | ||
1443 | size_t iov_data_len; | ||
1444 | |||
1445 | iov = kmalloc(nbytes, GFP_KERNEL); | ||
1446 | if (iov == NULL) { | ||
1447 | pr_debug("%s:%s:%d alloc iovec failed %d\n", | ||
1448 | skdev->name, __func__, __LINE__, | ||
1449 | sgp->iovec_count); | ||
1450 | return -ENOMEM; | ||
1451 | } | ||
1452 | sksgio->iov = iov; | ||
1453 | sksgio->iovcnt = sgp->iovec_count; | ||
1454 | |||
1455 | if (copy_from_user(iov, sgp->dxferp, nbytes)) { | ||
1456 | pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", | ||
1457 | skdev->name, __func__, __LINE__, sgp->dxferp); | ||
1458 | return -EFAULT; | ||
1459 | } | ||
1460 | |||
1461 | /* | ||
1462 | * Sum up the vecs, making sure they don't overflow | ||
1463 | */ | ||
1464 | iov_data_len = 0; | ||
1465 | for (i = 0; i < sgp->iovec_count; i++) { | ||
1466 | if (iov_data_len + iov[i].iov_len < iov_data_len) | ||
1467 | return -EINVAL; | ||
1468 | iov_data_len += iov[i].iov_len; | ||
1469 | } | ||
1470 | |||
1471 | /* SG_IO howto says that the shorter of the two wins */ | ||
1472 | if (sgp->dxfer_len < iov_data_len) { | ||
1473 | sksgio->iovcnt = iov_shorten((struct iovec *)iov, | ||
1474 | sgp->iovec_count, | ||
1475 | sgp->dxfer_len); | ||
1476 | sksgio->dxfer_len = sgp->dxfer_len; | ||
1477 | } else | ||
1478 | sksgio->dxfer_len = iov_data_len; | ||
1479 | } | ||
1480 | |||
1481 | if (sgp->dxfer_direction != SG_DXFER_NONE) { | ||
1482 | struct sg_iovec *iov = sksgio->iov; | ||
1483 | for (i = 0; i < sksgio->iovcnt; i++, iov++) { | ||
1484 | if (!access_ok(acc, iov->iov_base, iov->iov_len)) { | ||
1485 | pr_debug("%s:%s:%d access data failed %p/%d\n", | ||
1486 | skdev->name, __func__, __LINE__, | ||
1487 | iov->iov_base, (int)iov->iov_len); | ||
1488 | return -EFAULT; | ||
1489 | } | ||
1490 | } | ||
1491 | } | ||
1492 | |||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | ||
1497 | struct skd_sg_io *sksgio) | ||
1498 | { | ||
1499 | struct skd_special_context *skspcl = NULL; | ||
1500 | int rc; | ||
1501 | |||
1502 | for (;;) { | ||
1503 | ulong flags; | ||
1504 | |||
1505 | spin_lock_irqsave(&skdev->lock, flags); | ||
1506 | skspcl = skdev->skspcl_free_list; | ||
1507 | if (skspcl != NULL) { | ||
1508 | skdev->skspcl_free_list = | ||
1509 | (struct skd_special_context *)skspcl->req.next; | ||
1510 | skspcl->req.id += SKD_ID_INCR; | ||
1511 | skspcl->req.state = SKD_REQ_STATE_SETUP; | ||
1512 | skspcl->orphaned = 0; | ||
1513 | skspcl->req.n_sg = 0; | ||
1514 | } | ||
1515 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
1516 | |||
1517 | if (skspcl != NULL) { | ||
1518 | rc = 0; | ||
1519 | break; | ||
1520 | } | ||
1521 | |||
1522 | pr_debug("%s:%s:%d blocking\n", | ||
1523 | skdev->name, __func__, __LINE__); | ||
1524 | |||
1525 | rc = wait_event_interruptible_timeout( | ||
1526 | skdev->waitq, | ||
1527 | (skdev->skspcl_free_list != NULL), | ||
1528 | msecs_to_jiffies(sksgio->sg.timeout)); | ||
1529 | |||
1530 | pr_debug("%s:%s:%d unblocking, rc=%d\n", | ||
1531 | skdev->name, __func__, __LINE__, rc); | ||
1532 | |||
1533 | if (rc <= 0) { | ||
1534 | if (rc == 0) | ||
1535 | rc = -ETIMEDOUT; | ||
1536 | else | ||
1537 | rc = -EINTR; | ||
1538 | break; | ||
1539 | } | ||
1540 | /* | ||
1541 | * If we get here rc > 0 meaning the timeout to | ||
1542 | * wait_event_interruptible_timeout() had time left, hence the | ||
1543 | * sought event -- non-empty free list -- happened. | ||
1544 | * Retry the allocation. | ||
1545 | */ | ||
1546 | } | ||
1547 | sksgio->skspcl = skspcl; | ||
1548 | |||
1549 | return rc; | ||
1550 | } | ||
1551 | |||
1552 | static int skd_skreq_prep_buffering(struct skd_device *skdev, | ||
1553 | struct skd_request_context *skreq, | ||
1554 | u32 dxfer_len) | ||
1555 | { | ||
1556 | u32 resid = dxfer_len; | ||
1557 | |||
1558 | /* | ||
1559 | * The DMA engine must have aligned addresses and byte counts. | ||
1560 | */ | ||
1561 | resid += (-resid) & 3; | ||
1562 | skreq->sg_byte_count = resid; | ||
1563 | |||
1564 | skreq->n_sg = 0; | ||
1565 | |||
1566 | while (resid > 0) { | ||
1567 | u32 nbytes = PAGE_SIZE; | ||
1568 | u32 ix = skreq->n_sg; | ||
1569 | struct scatterlist *sg = &skreq->sg[ix]; | ||
1570 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | ||
1571 | struct page *page; | ||
1572 | |||
1573 | if (nbytes > resid) | ||
1574 | nbytes = resid; | ||
1575 | |||
1576 | page = alloc_page(GFP_KERNEL); | ||
1577 | if (page == NULL) | ||
1578 | return -ENOMEM; | ||
1579 | |||
1580 | sg_set_page(sg, page, nbytes, 0); | ||
1581 | |||
1582 | /* TODO: This should be going through a pci_???() | ||
1583 | * routine to do proper mapping. */ | ||
1584 | sksg->control = FIT_SGD_CONTROL_NOT_LAST; | ||
1585 | sksg->byte_count = nbytes; | ||
1586 | |||
1587 | sksg->host_side_addr = sg_phys(sg); | ||
1588 | |||
1589 | sksg->dev_side_addr = 0; | ||
1590 | sksg->next_desc_ptr = skreq->sksg_dma_address + | ||
1591 | (ix + 1) * sizeof(*sksg); | ||
1592 | |||
1593 | skreq->n_sg++; | ||
1594 | resid -= nbytes; | ||
1595 | } | ||
1596 | |||
1597 | if (skreq->n_sg > 0) { | ||
1598 | u32 ix = skreq->n_sg - 1; | ||
1599 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | ||
1600 | |||
1601 | sksg->control = FIT_SGD_CONTROL_LAST; | ||
1602 | sksg->next_desc_ptr = 0; | ||
1603 | } | ||
1604 | |||
1605 | if (unlikely(skdev->dbg_level > 1)) { | ||
1606 | u32 i; | ||
1607 | |||
1608 | pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", | ||
1609 | skdev->name, __func__, __LINE__, | ||
1610 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | ||
1611 | for (i = 0; i < skreq->n_sg; i++) { | ||
1612 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | ||
1613 | |||
1614 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " | ||
1615 | "addr=0x%llx next=0x%llx\n", | ||
1616 | skdev->name, __func__, __LINE__, | ||
1617 | i, sgd->byte_count, sgd->control, | ||
1618 | sgd->host_side_addr, sgd->next_desc_ptr); | ||
1619 | } | ||
1620 | } | ||
1621 | |||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1625 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | ||
1626 | struct skd_sg_io *sksgio) | ||
1627 | { | ||
1628 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1629 | struct skd_request_context *skreq = &skspcl->req; | ||
1630 | u32 dxfer_len = sksgio->dxfer_len; | ||
1631 | int rc; | ||
1632 | |||
1633 | rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); | ||
1634 | /* | ||
1635 | * Eventually, errors or not, skd_release_special() is called | ||
1636 | * to recover allocations including partial allocations. | ||
1637 | */ | ||
1638 | return rc; | ||
1639 | } | ||
1640 | |||
1641 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | ||
1642 | struct skd_sg_io *sksgio, int dxfer_dir) | ||
1643 | { | ||
1644 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1645 | u32 iov_ix = 0; | ||
1646 | struct sg_iovec curiov; | ||
1647 | u32 sksg_ix = 0; | ||
1648 | u8 *bufp = NULL; | ||
1649 | u32 buf_len = 0; | ||
1650 | u32 resid = sksgio->dxfer_len; | ||
1651 | int rc; | ||
1652 | |||
1653 | curiov.iov_len = 0; | ||
1654 | curiov.iov_base = NULL; | ||
1655 | |||
1656 | if (dxfer_dir != sksgio->sg.dxfer_direction) { | ||
1657 | if (dxfer_dir != SG_DXFER_TO_DEV || | ||
1658 | sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) | ||
1659 | return 0; | ||
1660 | } | ||
1661 | |||
1662 | while (resid > 0) { | ||
1663 | u32 nbytes = PAGE_SIZE; | ||
1664 | |||
1665 | if (curiov.iov_len == 0) { | ||
1666 | curiov = sksgio->iov[iov_ix++]; | ||
1667 | continue; | ||
1668 | } | ||
1669 | |||
1670 | if (buf_len == 0) { | ||
1671 | struct page *page; | ||
1672 | page = sg_page(&skspcl->req.sg[sksg_ix++]); | ||
1673 | bufp = page_address(page); | ||
1674 | buf_len = PAGE_SIZE; | ||
1675 | } | ||
1676 | |||
1677 | nbytes = min_t(u32, nbytes, resid); | ||
1678 | nbytes = min_t(u32, nbytes, curiov.iov_len); | ||
1679 | nbytes = min_t(u32, nbytes, buf_len); | ||
1680 | |||
1681 | if (dxfer_dir == SG_DXFER_TO_DEV) | ||
1682 | rc = __copy_from_user(bufp, curiov.iov_base, nbytes); | ||
1683 | else | ||
1684 | rc = __copy_to_user(curiov.iov_base, bufp, nbytes); | ||
1685 | |||
1686 | if (rc) | ||
1687 | return -EFAULT; | ||
1688 | |||
1689 | resid -= nbytes; | ||
1690 | curiov.iov_len -= nbytes; | ||
1691 | curiov.iov_base += nbytes; | ||
1692 | buf_len -= nbytes; | ||
1693 | } | ||
1694 | |||
1695 | return 0; | ||
1696 | } | ||
1697 | |||
1698 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | ||
1699 | struct skd_sg_io *sksgio) | ||
1700 | { | ||
1701 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1702 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | ||
1703 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | ||
1704 | |||
1705 | memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); | ||
1706 | |||
1707 | /* Initialize the FIT msg header */ | ||
1708 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | ||
1709 | fmh->num_protocol_cmds_coalesced = 1; | ||
1710 | |||
1711 | /* Initialize the SCSI request */ | ||
1712 | if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) | ||
1713 | scsi_req->hdr.sg_list_dma_address = | ||
1714 | cpu_to_be64(skspcl->req.sksg_dma_address); | ||
1715 | scsi_req->hdr.tag = skspcl->req.id; | ||
1716 | scsi_req->hdr.sg_list_len_bytes = | ||
1717 | cpu_to_be32(skspcl->req.sg_byte_count); | ||
1718 | memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); | ||
1719 | |||
1720 | skspcl->req.state = SKD_REQ_STATE_BUSY; | ||
1721 | skd_send_special_fitmsg(skdev, skspcl); | ||
1722 | |||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) | ||
1727 | { | ||
1728 | unsigned long flags; | ||
1729 | int rc; | ||
1730 | |||
1731 | rc = wait_event_interruptible_timeout(skdev->waitq, | ||
1732 | (sksgio->skspcl->req.state != | ||
1733 | SKD_REQ_STATE_BUSY), | ||
1734 | msecs_to_jiffies(sksgio->sg. | ||
1735 | timeout)); | ||
1736 | |||
1737 | spin_lock_irqsave(&skdev->lock, flags); | ||
1738 | |||
1739 | if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { | ||
1740 | pr_debug("%s:%s:%d skspcl %p aborted\n", | ||
1741 | skdev->name, __func__, __LINE__, sksgio->skspcl); | ||
1742 | |||
1743 | /* Build check cond, sense and let command finish. */ | ||
1744 | /* For a timeout, we must fabricate completion and sense | ||
1745 | * data to complete the command */ | ||
1746 | sksgio->skspcl->req.completion.status = | ||
1747 | SAM_STAT_CHECK_CONDITION; | ||
1748 | |||
1749 | memset(&sksgio->skspcl->req.err_info, 0, | ||
1750 | sizeof(sksgio->skspcl->req.err_info)); | ||
1751 | sksgio->skspcl->req.err_info.type = 0x70; | ||
1752 | sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; | ||
1753 | sksgio->skspcl->req.err_info.code = 0x44; | ||
1754 | sksgio->skspcl->req.err_info.qual = 0; | ||
1755 | rc = 0; | ||
1756 | } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) | ||
1757 | /* No longer on the adapter. We finish. */ | ||
1758 | rc = 0; | ||
1759 | else { | ||
1760 | /* Something's gone wrong. Still busy. Timeout or | ||
1761 | * user interrupted (control-C). Mark as an orphan | ||
1762 | * so it will be disposed when completed. */ | ||
1763 | sksgio->skspcl->orphaned = 1; | ||
1764 | sksgio->skspcl = NULL; | ||
1765 | if (rc == 0) { | ||
1766 | pr_debug("%s:%s:%d timed out %p (%u ms)\n", | ||
1767 | skdev->name, __func__, __LINE__, | ||
1768 | sksgio, sksgio->sg.timeout); | ||
1769 | rc = -ETIMEDOUT; | ||
1770 | } else { | ||
1771 | pr_debug("%s:%s:%d cntlc %p\n", | ||
1772 | skdev->name, __func__, __LINE__, sksgio); | ||
1773 | rc = -EINTR; | ||
1774 | } | ||
1775 | } | ||
1776 | |||
1777 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
1778 | |||
1779 | return rc; | ||
1780 | } | ||
1781 | |||
1782 | static int skd_sg_io_put_status(struct skd_device *skdev, | ||
1783 | struct skd_sg_io *sksgio) | ||
1784 | { | ||
1785 | struct sg_io_hdr *sgp = &sksgio->sg; | ||
1786 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1787 | int resid = 0; | ||
1788 | |||
1789 | u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); | ||
1790 | |||
1791 | sgp->status = skspcl->req.completion.status; | ||
1792 | resid = sksgio->dxfer_len - nb; | ||
1793 | |||
1794 | sgp->masked_status = sgp->status & STATUS_MASK; | ||
1795 | sgp->msg_status = 0; | ||
1796 | sgp->host_status = 0; | ||
1797 | sgp->driver_status = 0; | ||
1798 | sgp->resid = resid; | ||
1799 | if (sgp->masked_status || sgp->host_status || sgp->driver_status) | ||
1800 | sgp->info |= SG_INFO_CHECK; | ||
1801 | |||
1802 | pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", | ||
1803 | skdev->name, __func__, __LINE__, | ||
1804 | sgp->status, sgp->masked_status, sgp->resid); | ||
1805 | |||
1806 | if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { | ||
1807 | if (sgp->mx_sb_len > 0) { | ||
1808 | struct fit_comp_error_info *ei = &skspcl->req.err_info; | ||
1809 | u32 nbytes = sizeof(*ei); | ||
1810 | |||
1811 | nbytes = min_t(u32, nbytes, sgp->mx_sb_len); | ||
1812 | |||
1813 | sgp->sb_len_wr = nbytes; | ||
1814 | |||
1815 | if (__copy_to_user(sgp->sbp, ei, nbytes)) { | ||
1816 | pr_debug("%s:%s:%d copy_to_user sense failed %p\n", | ||
1817 | skdev->name, __func__, __LINE__, | ||
1818 | sgp->sbp); | ||
1819 | return -EFAULT; | ||
1820 | } | ||
1821 | } | ||
1822 | } | ||
1823 | |||
1824 | if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { | ||
1825 | pr_debug("%s:%s:%d copy_to_user sg failed %p\n", | ||
1826 | skdev->name, __func__, __LINE__, sksgio->argp); | ||
1827 | return -EFAULT; | ||
1828 | } | ||
1829 | |||
1830 | return 0; | ||
1831 | } | ||
1832 | |||
1833 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | ||
1834 | struct skd_sg_io *sksgio) | ||
1835 | { | ||
1836 | struct skd_special_context *skspcl = sksgio->skspcl; | ||
1837 | |||
1838 | if (skspcl != NULL) { | ||
1839 | ulong flags; | ||
1840 | |||
1841 | sksgio->skspcl = NULL; | ||
1842 | |||
1843 | spin_lock_irqsave(&skdev->lock, flags); | ||
1844 | skd_release_special(skdev, skspcl); | ||
1845 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
1846 | } | ||
1847 | |||
1848 | return 0; | ||
1849 | } | ||
1850 | |||
1851 | /* | ||
1852 | ***************************************************************************** | ||
1853 | * INTERNAL REQUESTS -- generated by driver itself | ||
1854 | ***************************************************************************** | ||
1855 | */ | ||
1856 | |||
1857 | static int skd_format_internal_skspcl(struct skd_device *skdev) | ||
1858 | { | ||
1859 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | ||
1860 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | ||
1861 | struct fit_msg_hdr *fmh; | ||
1862 | uint64_t dma_address; | ||
1863 | struct skd_scsi_request *scsi; | ||
1864 | |||
1865 | fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; | ||
1866 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | ||
1867 | fmh->num_protocol_cmds_coalesced = 1; | ||
1868 | |||
1869 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | ||
1870 | memset(scsi, 0, sizeof(*scsi)); | ||
1871 | dma_address = skspcl->req.sksg_dma_address; | ||
1872 | scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); | ||
1873 | sgd->control = FIT_SGD_CONTROL_LAST; | ||
1874 | sgd->byte_count = 0; | ||
1875 | sgd->host_side_addr = skspcl->db_dma_address; | ||
1876 | sgd->dev_side_addr = 0; | ||
1877 | sgd->next_desc_ptr = 0LL; | ||
1878 | |||
1879 | return 1; | ||
1880 | } | ||
1881 | |||
1882 | #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES | ||
1883 | |||
1884 | static void skd_send_internal_skspcl(struct skd_device *skdev, | ||
1885 | struct skd_special_context *skspcl, | ||
1886 | u8 opcode) | ||
1887 | { | ||
1888 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | ||
1889 | struct skd_scsi_request *scsi; | ||
1890 | unsigned char *buf = skspcl->data_buf; | ||
1891 | int i; | ||
1892 | |||
1893 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) | ||
1894 | /* | ||
1895 | * A refresh is already in progress. | ||
1896 | * Just wait for it to finish. | ||
1897 | */ | ||
1898 | return; | ||
1899 | |||
1900 | SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); | ||
1901 | skspcl->req.state = SKD_REQ_STATE_BUSY; | ||
1902 | skspcl->req.id += SKD_ID_INCR; | ||
1903 | |||
1904 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | ||
1905 | scsi->hdr.tag = skspcl->req.id; | ||
1906 | |||
1907 | memset(scsi->cdb, 0, sizeof(scsi->cdb)); | ||
1908 | |||
1909 | switch (opcode) { | ||
1910 | case TEST_UNIT_READY: | ||
1911 | scsi->cdb[0] = TEST_UNIT_READY; | ||
1912 | sgd->byte_count = 0; | ||
1913 | scsi->hdr.sg_list_len_bytes = 0; | ||
1914 | break; | ||
1915 | |||
1916 | case READ_CAPACITY: | ||
1917 | scsi->cdb[0] = READ_CAPACITY; | ||
1918 | sgd->byte_count = SKD_N_READ_CAP_BYTES; | ||
1919 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1920 | break; | ||
1921 | |||
1922 | case INQUIRY: | ||
1923 | scsi->cdb[0] = INQUIRY; | ||
1924 | scsi->cdb[1] = 0x01; /* evpd */ | ||
1925 | scsi->cdb[2] = 0x80; /* serial number page */ | ||
1926 | scsi->cdb[4] = 0x10; | ||
1927 | sgd->byte_count = 16; | ||
1928 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1929 | break; | ||
1930 | |||
1931 | case SYNCHRONIZE_CACHE: | ||
1932 | scsi->cdb[0] = SYNCHRONIZE_CACHE; | ||
1933 | sgd->byte_count = 0; | ||
1934 | scsi->hdr.sg_list_len_bytes = 0; | ||
1935 | break; | ||
1936 | |||
1937 | case WRITE_BUFFER: | ||
1938 | scsi->cdb[0] = WRITE_BUFFER; | ||
1939 | scsi->cdb[1] = 0x02; | ||
1940 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | ||
1941 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | ||
1942 | sgd->byte_count = WR_BUF_SIZE; | ||
1943 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1944 | /* fill incrementing byte pattern */ | ||
1945 | for (i = 0; i < sgd->byte_count; i++) | ||
1946 | buf[i] = i & 0xFF; | ||
1947 | break; | ||
1948 | |||
1949 | case READ_BUFFER: | ||
1950 | scsi->cdb[0] = READ_BUFFER; | ||
1951 | scsi->cdb[1] = 0x02; | ||
1952 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | ||
1953 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | ||
1954 | sgd->byte_count = WR_BUF_SIZE; | ||
1955 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | ||
1956 | memset(skspcl->data_buf, 0, sgd->byte_count); | ||
1957 | break; | ||
1958 | |||
1959 | default: | ||
1960 | SKD_ASSERT("Don't know what to send"); | ||
1961 | return; | ||
1962 | |||
1963 | } | ||
1964 | skd_send_special_fitmsg(skdev, skspcl); | ||
1965 | } | ||
1966 | |||
1967 | static void skd_refresh_device_data(struct skd_device *skdev) | ||
1968 | { | ||
1969 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | ||
1970 | |||
1971 | skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); | ||
1972 | } | ||
1973 | |||
1974 | static int skd_chk_read_buf(struct skd_device *skdev, | ||
1975 | struct skd_special_context *skspcl) | ||
1976 | { | ||
1977 | unsigned char *buf = skspcl->data_buf; | ||
1978 | int i; | ||
1979 | |||
1980 | /* check for incrementing byte pattern */ | ||
1981 | for (i = 0; i < WR_BUF_SIZE; i++) | ||
1982 | if (buf[i] != (i & 0xFF)) | ||
1983 | return 1; | ||
1984 | |||
1985 | return 0; | ||
1986 | } | ||
1987 | |||
1988 | static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, | ||
1989 | u8 code, u8 qual, u8 fruc) | ||
1990 | { | ||
1991 | /* If the check condition is of special interest, log a message */ | ||
1992 | if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) | ||
1993 | && (code == 0x04) && (qual == 0x06)) { | ||
1994 | pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" | ||
1995 | "ascq/fruc %02x/%02x/%02x/%02x\n", | ||
1996 | skd_name(skdev), key, code, qual, fruc); | ||
1997 | } | ||
1998 | } | ||
1999 | |||
2000 | static void skd_complete_internal(struct skd_device *skdev, | ||
2001 | volatile struct fit_completion_entry_v1 | ||
2002 | *skcomp, | ||
2003 | volatile struct fit_comp_error_info *skerr, | ||
2004 | struct skd_special_context *skspcl) | ||
2005 | { | ||
2006 | u8 *buf = skspcl->data_buf; | ||
2007 | u8 status; | ||
2008 | int i; | ||
2009 | struct skd_scsi_request *scsi = | ||
2010 | (struct skd_scsi_request *)&skspcl->msg_buf[64]; | ||
2011 | |||
2012 | SKD_ASSERT(skspcl == &skdev->internal_skspcl); | ||
2013 | |||
2014 | pr_debug("%s:%s:%d complete internal %x\n", | ||
2015 | skdev->name, __func__, __LINE__, scsi->cdb[0]); | ||
2016 | |||
2017 | skspcl->req.completion = *skcomp; | ||
2018 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
2019 | skspcl->req.id += SKD_ID_INCR; | ||
2020 | |||
2021 | status = skspcl->req.completion.status; | ||
2022 | |||
2023 | skd_log_check_status(skdev, status, skerr->key, skerr->code, | ||
2024 | skerr->qual, skerr->fruc); | ||
2025 | |||
2026 | switch (scsi->cdb[0]) { | ||
2027 | case TEST_UNIT_READY: | ||
2028 | if (status == SAM_STAT_GOOD) | ||
2029 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | ||
2030 | else if ((status == SAM_STAT_CHECK_CONDITION) && | ||
2031 | (skerr->key == MEDIUM_ERROR)) | ||
2032 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | ||
2033 | else { | ||
2034 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | ||
2035 | pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", | ||
2036 | skdev->name, __func__, __LINE__, | ||
2037 | skdev->state); | ||
2038 | return; | ||
2039 | } | ||
2040 | pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", | ||
2041 | skdev->name, __func__, __LINE__); | ||
2042 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | ||
2043 | } | ||
2044 | break; | ||
2045 | |||
2046 | case WRITE_BUFFER: | ||
2047 | if (status == SAM_STAT_GOOD) | ||
2048 | skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); | ||
2049 | else { | ||
2050 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | ||
2051 | pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", | ||
2052 | skdev->name, __func__, __LINE__, | ||
2053 | skdev->state); | ||
2054 | return; | ||
2055 | } | ||
2056 | pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", | ||
2057 | skdev->name, __func__, __LINE__); | ||
2058 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | ||
2059 | } | ||
2060 | break; | ||
2061 | |||
2062 | case READ_BUFFER: | ||
2063 | if (status == SAM_STAT_GOOD) { | ||
2064 | if (skd_chk_read_buf(skdev, skspcl) == 0) | ||
2065 | skd_send_internal_skspcl(skdev, skspcl, | ||
2066 | READ_CAPACITY); | ||
2067 | else { | ||
2068 | pr_err( | ||
2069 | "(%s):*** W/R Buffer mismatch %d ***\n", | ||
2070 | skd_name(skdev), skdev->connect_retries); | ||
2071 | if (skdev->connect_retries < | ||
2072 | SKD_MAX_CONNECT_RETRIES) { | ||
2073 | skdev->connect_retries++; | ||
2074 | skd_soft_reset(skdev); | ||
2075 | } else { | ||
2076 | pr_err( | ||
2077 | "(%s): W/R Buffer Connect Error\n", | ||
2078 | skd_name(skdev)); | ||
2079 | return; | ||
2080 | } | ||
2081 | } | ||
2082 | |||
2083 | } else { | ||
2084 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | ||
2085 | pr_debug("%s:%s:%d " | ||
2086 | "read buffer failed, don't send anymore state 0x%x\n", | ||
2087 | skdev->name, __func__, __LINE__, | ||
2088 | skdev->state); | ||
2089 | return; | ||
2090 | } | ||
2091 | pr_debug("%s:%s:%d " | ||
2092 | "**** read buffer failed, retry skerr\n", | ||
2093 | skdev->name, __func__, __LINE__); | ||
2094 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | ||
2095 | } | ||
2096 | break; | ||
2097 | |||
2098 | case READ_CAPACITY: | ||
2099 | skdev->read_cap_is_valid = 0; | ||
2100 | if (status == SAM_STAT_GOOD) { | ||
2101 | skdev->read_cap_last_lba = | ||
2102 | (buf[0] << 24) | (buf[1] << 16) | | ||
2103 | (buf[2] << 8) | buf[3]; | ||
2104 | skdev->read_cap_blocksize = | ||
2105 | (buf[4] << 24) | (buf[5] << 16) | | ||
2106 | (buf[6] << 8) | buf[7]; | ||
2107 | |||
2108 | pr_debug("%s:%s:%d last lba %d, bs %d\n", | ||
2109 | skdev->name, __func__, __LINE__, | ||
2110 | skdev->read_cap_last_lba, | ||
2111 | skdev->read_cap_blocksize); | ||
2112 | |||
2113 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | ||
2114 | |||
2115 | skdev->read_cap_is_valid = 1; | ||
2116 | |||
2117 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | ||
2118 | } else if ((status == SAM_STAT_CHECK_CONDITION) && | ||
2119 | (skerr->key == MEDIUM_ERROR)) { | ||
2120 | skdev->read_cap_last_lba = ~0; | ||
2121 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | ||
2122 | pr_debug("%s:%s:%d " | ||
2123 | "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", | ||
2124 | skdev->name, __func__, __LINE__); | ||
2125 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | ||
2126 | } else { | ||
2127 | pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", | ||
2128 | skdev->name, __func__, __LINE__); | ||
2129 | skd_send_internal_skspcl(skdev, skspcl, | ||
2130 | TEST_UNIT_READY); | ||
2131 | } | ||
2132 | break; | ||
2133 | |||
2134 | case INQUIRY: | ||
2135 | skdev->inquiry_is_valid = 0; | ||
2136 | if (status == SAM_STAT_GOOD) { | ||
2137 | skdev->inquiry_is_valid = 1; | ||
2138 | |||
2139 | for (i = 0; i < 12; i++) | ||
2140 | skdev->inq_serial_num[i] = buf[i + 4]; | ||
2141 | skdev->inq_serial_num[12] = 0; | ||
2142 | } | ||
2143 | |||
2144 | if (skd_unquiesce_dev(skdev) < 0) | ||
2145 | pr_debug("%s:%s:%d **** failed, to ONLINE device\n", | ||
2146 | skdev->name, __func__, __LINE__); | ||
2147 | /* connection is complete */ | ||
2148 | skdev->connect_retries = 0; | ||
2149 | break; | ||
2150 | |||
2151 | case SYNCHRONIZE_CACHE: | ||
2152 | if (status == SAM_STAT_GOOD) | ||
2153 | skdev->sync_done = 1; | ||
2154 | else | ||
2155 | skdev->sync_done = -1; | ||
2156 | wake_up_interruptible(&skdev->waitq); | ||
2157 | break; | ||
2158 | |||
2159 | default: | ||
2160 | SKD_ASSERT("we didn't send this"); | ||
2161 | } | ||
2162 | } | ||
2163 | |||
2164 | /* | ||
2165 | ***************************************************************************** | ||
2166 | * FIT MESSAGES | ||
2167 | ***************************************************************************** | ||
2168 | */ | ||
2169 | |||
2170 | static void skd_send_fitmsg(struct skd_device *skdev, | ||
2171 | struct skd_fitmsg_context *skmsg) | ||
2172 | { | ||
2173 | u64 qcmd; | ||
2174 | struct fit_msg_hdr *fmh; | ||
2175 | |||
2176 | pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", | ||
2177 | skdev->name, __func__, __LINE__, | ||
2178 | skmsg->mb_dma_address, skdev->in_flight); | ||
2179 | pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", | ||
2180 | skdev->name, __func__, __LINE__, | ||
2181 | skmsg->msg_buf, skmsg->offset); | ||
2182 | |||
2183 | qcmd = skmsg->mb_dma_address; | ||
2184 | qcmd |= FIT_QCMD_QID_NORMAL; | ||
2185 | |||
2186 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | ||
2187 | skmsg->outstanding = fmh->num_protocol_cmds_coalesced; | ||
2188 | |||
2189 | if (unlikely(skdev->dbg_level > 1)) { | ||
2190 | u8 *bp = (u8 *)skmsg->msg_buf; | ||
2191 | int i; | ||
2192 | for (i = 0; i < skmsg->length; i += 8) { | ||
2193 | pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x " | ||
2194 | "%02x %02x %02x %02x\n", | ||
2195 | skdev->name, __func__, __LINE__, | ||
2196 | i, bp[i + 0], bp[i + 1], bp[i + 2], | ||
2197 | bp[i + 3], bp[i + 4], bp[i + 5], | ||
2198 | bp[i + 6], bp[i + 7]); | ||
2199 | if (i == 0) | ||
2200 | i = 64 - 8; | ||
2201 | } | ||
2202 | } | ||
2203 | |||
2204 | if (skmsg->length > 256) | ||
2205 | qcmd |= FIT_QCMD_MSGSIZE_512; | ||
2206 | else if (skmsg->length > 128) | ||
2207 | qcmd |= FIT_QCMD_MSGSIZE_256; | ||
2208 | else if (skmsg->length > 64) | ||
2209 | qcmd |= FIT_QCMD_MSGSIZE_128; | ||
2210 | else | ||
2211 | /* | ||
2212 | * This makes no sense because the FIT msg header is | ||
2213 | * 64 bytes. If the msg is only 64 bytes long it has | ||
2214 | * no payload. | ||
2215 | */ | ||
2216 | qcmd |= FIT_QCMD_MSGSIZE_64; | ||
2217 | |||
2218 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | ||
2219 | |||
2220 | } | ||
2221 | |||
2222 | static void skd_send_special_fitmsg(struct skd_device *skdev, | ||
2223 | struct skd_special_context *skspcl) | ||
2224 | { | ||
2225 | u64 qcmd; | ||
2226 | |||
2227 | if (unlikely(skdev->dbg_level > 1)) { | ||
2228 | u8 *bp = (u8 *)skspcl->msg_buf; | ||
2229 | int i; | ||
2230 | |||
2231 | for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { | ||
2232 | pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x " | ||
2233 | "%02x %02x %02x %02x\n", | ||
2234 | skdev->name, __func__, __LINE__, i, | ||
2235 | bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], | ||
2236 | bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); | ||
2237 | if (i == 0) | ||
2238 | i = 64 - 8; | ||
2239 | } | ||
2240 | |||
2241 | pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", | ||
2242 | skdev->name, __func__, __LINE__, | ||
2243 | skspcl, skspcl->req.id, skspcl->req.sksg_list, | ||
2244 | skspcl->req.sksg_dma_address); | ||
2245 | for (i = 0; i < skspcl->req.n_sg; i++) { | ||
2246 | struct fit_sg_descriptor *sgd = | ||
2247 | &skspcl->req.sksg_list[i]; | ||
2248 | |||
2249 | pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " | ||
2250 | "addr=0x%llx next=0x%llx\n", | ||
2251 | skdev->name, __func__, __LINE__, | ||
2252 | i, sgd->byte_count, sgd->control, | ||
2253 | sgd->host_side_addr, sgd->next_desc_ptr); | ||
2254 | } | ||
2255 | } | ||
2256 | |||
2257 | /* | ||
2258 | * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr | ||
2259 | * and one 64-byte SSDI command. | ||
2260 | */ | ||
2261 | qcmd = skspcl->mb_dma_address; | ||
2262 | qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; | ||
2263 | |||
2264 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | ||
2265 | } | ||
2266 | |||
2267 | /* | ||
2268 | ***************************************************************************** | ||
2269 | * COMPLETION QUEUE | ||
2270 | ***************************************************************************** | ||
2271 | */ | ||
2272 | |||
2273 | static void skd_complete_other(struct skd_device *skdev, | ||
2274 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2275 | volatile struct fit_comp_error_info *skerr); | ||
2276 | |||
2277 | struct sns_info { | ||
2278 | u8 type; | ||
2279 | u8 stat; | ||
2280 | u8 key; | ||
2281 | u8 asc; | ||
2282 | u8 ascq; | ||
2283 | u8 mask; | ||
2284 | enum skd_check_status_action action; | ||
2285 | }; | ||
2286 | |||
2287 | static struct sns_info skd_chkstat_table[] = { | ||
2288 | /* Good */ | ||
2289 | { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, | ||
2290 | SKD_CHECK_STATUS_REPORT_GOOD }, | ||
2291 | |||
2292 | /* Smart alerts */ | ||
2293 | { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ | ||
2294 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | ||
2295 | { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ | ||
2296 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | ||
2297 | { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ | ||
2298 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | ||
2299 | |||
2300 | /* Retry (with limits) */ | ||
2301 | { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ | ||
2302 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2303 | { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ | ||
2304 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2305 | { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ | ||
2306 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2307 | { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ | ||
2308 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | ||
2309 | |||
2310 | /* Busy (or about to be) */ | ||
2311 | { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ | ||
2312 | SKD_CHECK_STATUS_BUSY_IMMINENT }, | ||
2313 | }; | ||
2314 | |||
2315 | /* | ||
2316 | * Look up status and sense data to decide how to handle the error | ||
2317 | * from the device. | ||
2318 | * mask says which fields must match e.g., mask=0x18 means check | ||
2319 | * type and stat, ignore key, asc, ascq. | ||
2320 | */ | ||
2321 | |||
2322 | static enum skd_check_status_action | ||
2323 | skd_check_status(struct skd_device *skdev, | ||
2324 | u8 cmp_status, volatile struct fit_comp_error_info *skerr) | ||
2325 | { | ||
2326 | int i, n; | ||
2327 | |||
2328 | pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", | ||
2329 | skd_name(skdev), skerr->key, skerr->code, skerr->qual, | ||
2330 | skerr->fruc); | ||
2331 | |||
2332 | pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", | ||
2333 | skdev->name, __func__, __LINE__, skerr->type, cmp_status, | ||
2334 | skerr->key, skerr->code, skerr->qual, skerr->fruc); | ||
2335 | |||
2336 | /* Does the info match an entry in the good category? */ | ||
2337 | n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); | ||
2338 | for (i = 0; i < n; i++) { | ||
2339 | struct sns_info *sns = &skd_chkstat_table[i]; | ||
2340 | |||
2341 | if (sns->mask & 0x10) | ||
2342 | if (skerr->type != sns->type) | ||
2343 | continue; | ||
2344 | |||
2345 | if (sns->mask & 0x08) | ||
2346 | if (cmp_status != sns->stat) | ||
2347 | continue; | ||
2348 | |||
2349 | if (sns->mask & 0x04) | ||
2350 | if (skerr->key != sns->key) | ||
2351 | continue; | ||
2352 | |||
2353 | if (sns->mask & 0x02) | ||
2354 | if (skerr->code != sns->asc) | ||
2355 | continue; | ||
2356 | |||
2357 | if (sns->mask & 0x01) | ||
2358 | if (skerr->qual != sns->ascq) | ||
2359 | continue; | ||
2360 | |||
2361 | if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { | ||
2362 | pr_err("(%s): SMART Alert: sense key/asc/ascq " | ||
2363 | "%02x/%02x/%02x\n", | ||
2364 | skd_name(skdev), skerr->key, | ||
2365 | skerr->code, skerr->qual); | ||
2366 | } | ||
2367 | return sns->action; | ||
2368 | } | ||
2369 | |||
2370 | /* No other match, so nonzero status means error, | ||
2371 | * zero status means good | ||
2372 | */ | ||
2373 | if (cmp_status) { | ||
2374 | pr_debug("%s:%s:%d status check: error\n", | ||
2375 | skdev->name, __func__, __LINE__); | ||
2376 | return SKD_CHECK_STATUS_REPORT_ERROR; | ||
2377 | } | ||
2378 | |||
2379 | pr_debug("%s:%s:%d status check good default\n", | ||
2380 | skdev->name, __func__, __LINE__); | ||
2381 | return SKD_CHECK_STATUS_REPORT_GOOD; | ||
2382 | } | ||
2383 | |||
2384 | static void skd_resolve_req_exception(struct skd_device *skdev, | ||
2385 | struct skd_request_context *skreq) | ||
2386 | { | ||
2387 | u8 cmp_status = skreq->completion.status; | ||
2388 | |||
2389 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { | ||
2390 | case SKD_CHECK_STATUS_REPORT_GOOD: | ||
2391 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: | ||
2392 | skd_end_request(skdev, skreq, 0); | ||
2393 | break; | ||
2394 | |||
2395 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | ||
2396 | skd_log_skreq(skdev, skreq, "retry(busy)"); | ||
2397 | blk_requeue_request(skdev->queue, skreq->req); | ||
2398 | pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); | ||
2399 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; | ||
2400 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); | ||
2401 | skd_quiesce_dev(skdev); | ||
2402 | break; | ||
2403 | |||
2404 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: | ||
2405 | if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { | ||
2406 | skd_log_skreq(skdev, skreq, "retry"); | ||
2407 | blk_requeue_request(skdev->queue, skreq->req); | ||
2408 | break; | ||
2409 | } | ||
2410 | /* fall through to report error */ | ||
2411 | |||
2412 | case SKD_CHECK_STATUS_REPORT_ERROR: | ||
2413 | default: | ||
2414 | skd_end_request(skdev, skreq, -EIO); | ||
2415 | break; | ||
2416 | } | ||
2417 | } | ||
2418 | |||
2419 | /* assume spinlock is already held */ | ||
2420 | static void skd_release_skreq(struct skd_device *skdev, | ||
2421 | struct skd_request_context *skreq) | ||
2422 | { | ||
2423 | u32 msg_slot; | ||
2424 | struct skd_fitmsg_context *skmsg; | ||
2425 | |||
2426 | u32 timo_slot; | ||
2427 | |||
2428 | /* | ||
2429 | * Reclaim the FIT msg buffer if this is | ||
2430 | * the first of the requests it carried to | ||
2431 | * be completed. The FIT msg buffer used to | ||
2432 | * send this request cannot be reused until | ||
2433 | * we are sure the s1120 card has copied | ||
2434 | * it to its memory. The FIT msg might have | ||
2435 | * contained several requests. As soon as | ||
2436 | * any of them are completed we know that | ||
2437 | * the entire FIT msg was transferred. | ||
2438 | * Only the first completed request will | ||
2439 | * match the FIT msg buffer id. The FIT | ||
2440 | * msg buffer id is immediately updated. | ||
2441 | * When subsequent requests complete the FIT | ||
2442 | * msg buffer id won't match, so we know | ||
2443 | * quite cheaply that it is already done. | ||
2444 | */ | ||
2445 | msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; | ||
2446 | SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); | ||
2447 | |||
2448 | skmsg = &skdev->skmsg_table[msg_slot]; | ||
2449 | if (skmsg->id == skreq->fitmsg_id) { | ||
2450 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); | ||
2451 | SKD_ASSERT(skmsg->outstanding > 0); | ||
2452 | skmsg->outstanding--; | ||
2453 | if (skmsg->outstanding == 0) { | ||
2454 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
2455 | skmsg->id += SKD_ID_INCR; | ||
2456 | skmsg->next = skdev->skmsg_free_list; | ||
2457 | skdev->skmsg_free_list = skmsg; | ||
2458 | } | ||
2459 | } | ||
2460 | |||
2461 | /* | ||
2462 | * Decrease the number of active requests. | ||
2463 | * Also decrements the count in the timeout slot. | ||
2464 | */ | ||
2465 | SKD_ASSERT(skdev->in_flight > 0); | ||
2466 | skdev->in_flight -= 1; | ||
2467 | |||
2468 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | ||
2469 | SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); | ||
2470 | skdev->timeout_slot[timo_slot] -= 1; | ||
2471 | |||
2472 | /* | ||
2473 | * Reset backpointer | ||
2474 | */ | ||
2475 | skreq->req = NULL; | ||
2476 | |||
2477 | /* | ||
2478 | * Reclaim the skd_request_context | ||
2479 | */ | ||
2480 | skreq->state = SKD_REQ_STATE_IDLE; | ||
2481 | skreq->id += SKD_ID_INCR; | ||
2482 | skreq->next = skdev->skreq_free_list; | ||
2483 | skdev->skreq_free_list = skreq; | ||
2484 | } | ||
2485 | |||
2486 | #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA | ||
2487 | |||
2488 | static void skd_do_inq_page_00(struct skd_device *skdev, | ||
2489 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2490 | volatile struct fit_comp_error_info *skerr, | ||
2491 | uint8_t *cdb, uint8_t *buf) | ||
2492 | { | ||
2493 | uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; | ||
2494 | |||
2495 | /* Caller requested "supported pages". The driver needs to insert | ||
2496 | * its page. | ||
2497 | */ | ||
2498 | pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", | ||
2499 | skdev->name, __func__, __LINE__); | ||
2500 | |||
2501 | /* If the device rejected the request because the CDB was | ||
2502 | * improperly formed, then just leave. | ||
2503 | */ | ||
2504 | if (skcomp->status == SAM_STAT_CHECK_CONDITION && | ||
2505 | skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) | ||
2506 | return; | ||
2507 | |||
2508 | /* Get the amount of space the caller allocated */ | ||
2509 | max_bytes = (cdb[3] << 8) | cdb[4]; | ||
2510 | |||
2511 | /* Get the number of pages actually returned by the device */ | ||
2512 | drive_pages = (buf[2] << 8) | buf[3]; | ||
2513 | drive_bytes = drive_pages + 4; | ||
2514 | new_size = drive_pages + 1; | ||
2515 | |||
2516 | /* Supported pages must be in numerical order, so find where | ||
2517 | * the driver page needs to be inserted into the list of | ||
2518 | * pages returned by the device. | ||
2519 | */ | ||
2520 | for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { | ||
2521 | if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) | ||
2522 | return; /* Device using this page code. abort */ | ||
2523 | else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) | ||
2524 | break; | ||
2525 | } | ||
2526 | |||
2527 | if (insert_pt < max_bytes) { | ||
2528 | uint16_t u; | ||
2529 | |||
2530 | /* Shift everything up one byte to make room. */ | ||
2531 | for (u = new_size + 3; u > insert_pt; u--) | ||
2532 | buf[u] = buf[u - 1]; | ||
2533 | buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; | ||
2534 | |||
2535 | /* SCSI byte order increment of num_returned_bytes by 1 */ | ||
2536 | skcomp->num_returned_bytes = | ||
2537 | be32_to_cpu(skcomp->num_returned_bytes) + 1; | ||
2538 | skcomp->num_returned_bytes = | ||
2539 | be32_to_cpu(skcomp->num_returned_bytes); | ||
2540 | } | ||
2541 | |||
2542 | /* update page length field to reflect the driver's page too */ | ||
2543 | buf[2] = (uint8_t)((new_size >> 8) & 0xFF); | ||
2544 | buf[3] = (uint8_t)((new_size >> 0) & 0xFF); | ||
2545 | } | ||
2546 | |||
2547 | static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) | ||
2548 | { | ||
2549 | int pcie_reg; | ||
2550 | u16 pci_bus_speed; | ||
2551 | u8 pci_lanes; | ||
2552 | |||
2553 | pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2554 | if (pcie_reg) { | ||
2555 | u16 linksta; | ||
2556 | pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); | ||
2557 | |||
2558 | pci_bus_speed = linksta & 0xF; | ||
2559 | pci_lanes = (linksta & 0x3F0) >> 4; | ||
2560 | } else { | ||
2561 | *speed = STEC_LINK_UNKNOWN; | ||
2562 | *width = 0xFF; | ||
2563 | return; | ||
2564 | } | ||
2565 | |||
2566 | switch (pci_bus_speed) { | ||
2567 | case 1: | ||
2568 | *speed = STEC_LINK_2_5GTS; | ||
2569 | break; | ||
2570 | case 2: | ||
2571 | *speed = STEC_LINK_5GTS; | ||
2572 | break; | ||
2573 | case 3: | ||
2574 | *speed = STEC_LINK_8GTS; | ||
2575 | break; | ||
2576 | default: | ||
2577 | *speed = STEC_LINK_UNKNOWN; | ||
2578 | break; | ||
2579 | } | ||
2580 | |||
2581 | if (pci_lanes <= 0x20) | ||
2582 | *width = pci_lanes; | ||
2583 | else | ||
2584 | *width = 0xFF; | ||
2585 | } | ||
2586 | |||
2587 | static void skd_do_inq_page_da(struct skd_device *skdev, | ||
2588 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2589 | volatile struct fit_comp_error_info *skerr, | ||
2590 | uint8_t *cdb, uint8_t *buf) | ||
2591 | { | ||
2592 | struct pci_dev *pdev = skdev->pdev; | ||
2593 | unsigned max_bytes; | ||
2594 | struct driver_inquiry_data inq; | ||
2595 | u16 val; | ||
2596 | |||
2597 | pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", | ||
2598 | skdev->name, __func__, __LINE__); | ||
2599 | |||
2600 | memset(&inq, 0, sizeof(inq)); | ||
2601 | |||
2602 | inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; | ||
2603 | |||
2604 | skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); | ||
2605 | inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); | ||
2606 | inq.pcie_device_number = PCI_SLOT(pdev->devfn); | ||
2607 | inq.pcie_function_number = PCI_FUNC(pdev->devfn); | ||
2608 | |||
2609 | pci_read_config_word(pdev, PCI_VENDOR_ID, &val); | ||
2610 | inq.pcie_vendor_id = cpu_to_be16(val); | ||
2611 | |||
2612 | pci_read_config_word(pdev, PCI_DEVICE_ID, &val); | ||
2613 | inq.pcie_device_id = cpu_to_be16(val); | ||
2614 | |||
2615 | pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); | ||
2616 | inq.pcie_subsystem_vendor_id = cpu_to_be16(val); | ||
2617 | |||
2618 | pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); | ||
2619 | inq.pcie_subsystem_device_id = cpu_to_be16(val); | ||
2620 | |||
2621 | /* Driver version, fixed lenth, padded with spaces on the right */ | ||
2622 | inq.driver_version_length = sizeof(inq.driver_version); | ||
2623 | memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); | ||
2624 | memcpy(inq.driver_version, DRV_VER_COMPL, | ||
2625 | min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); | ||
2626 | |||
2627 | inq.page_length = cpu_to_be16((sizeof(inq) - 4)); | ||
2628 | |||
2629 | /* Clear the error set by the device */ | ||
2630 | skcomp->status = SAM_STAT_GOOD; | ||
2631 | memset((void *)skerr, 0, sizeof(*skerr)); | ||
2632 | |||
2633 | /* copy response into output buffer */ | ||
2634 | max_bytes = (cdb[3] << 8) | cdb[4]; | ||
2635 | memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); | ||
2636 | |||
2637 | skcomp->num_returned_bytes = | ||
2638 | be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); | ||
2639 | } | ||
2640 | |||
2641 | static void skd_do_driver_inq(struct skd_device *skdev, | ||
2642 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2643 | volatile struct fit_comp_error_info *skerr, | ||
2644 | uint8_t *cdb, uint8_t *buf) | ||
2645 | { | ||
2646 | if (!buf) | ||
2647 | return; | ||
2648 | else if (cdb[0] != INQUIRY) | ||
2649 | return; /* Not an INQUIRY */ | ||
2650 | else if ((cdb[1] & 1) == 0) | ||
2651 | return; /* EVPD not set */ | ||
2652 | else if (cdb[2] == 0) | ||
2653 | /* Need to add driver's page to supported pages list */ | ||
2654 | skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); | ||
2655 | else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) | ||
2656 | /* Caller requested driver's page */ | ||
2657 | skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); | ||
2658 | } | ||
2659 | |||
2660 | static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) | ||
2661 | { | ||
2662 | if (!sg) | ||
2663 | return NULL; | ||
2664 | if (!sg_page(sg)) | ||
2665 | return NULL; | ||
2666 | return sg_virt(sg); | ||
2667 | } | ||
2668 | |||
2669 | static void skd_process_scsi_inq(struct skd_device *skdev, | ||
2670 | volatile struct fit_completion_entry_v1 | ||
2671 | *skcomp, | ||
2672 | volatile struct fit_comp_error_info *skerr, | ||
2673 | struct skd_special_context *skspcl) | ||
2674 | { | ||
2675 | uint8_t *buf; | ||
2676 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | ||
2677 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | ||
2678 | |||
2679 | dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, | ||
2680 | skspcl->req.sg_data_dir); | ||
2681 | buf = skd_sg_1st_page_ptr(skspcl->req.sg); | ||
2682 | |||
2683 | if (buf) | ||
2684 | skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); | ||
2685 | } | ||
2686 | |||
2687 | |||
2688 | static int skd_isr_completion_posted(struct skd_device *skdev, | ||
2689 | int limit, int *enqueued) | ||
2690 | { | ||
2691 | volatile struct fit_completion_entry_v1 *skcmp = NULL; | ||
2692 | volatile struct fit_comp_error_info *skerr; | ||
2693 | u16 req_id; | ||
2694 | u32 req_slot; | ||
2695 | struct skd_request_context *skreq; | ||
2696 | u16 cmp_cntxt = 0; | ||
2697 | u8 cmp_status = 0; | ||
2698 | u8 cmp_cycle = 0; | ||
2699 | u32 cmp_bytes = 0; | ||
2700 | int rc = 0; | ||
2701 | int processed = 0; | ||
2702 | |||
2703 | for (;; ) { | ||
2704 | SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); | ||
2705 | |||
2706 | skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; | ||
2707 | cmp_cycle = skcmp->cycle; | ||
2708 | cmp_cntxt = skcmp->tag; | ||
2709 | cmp_status = skcmp->status; | ||
2710 | cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); | ||
2711 | |||
2712 | skerr = &skdev->skerr_table[skdev->skcomp_ix]; | ||
2713 | |||
2714 | pr_debug("%s:%s:%d " | ||
2715 | "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " | ||
2716 | "busy=%d rbytes=0x%x proto=%d\n", | ||
2717 | skdev->name, __func__, __LINE__, skdev->skcomp_cycle, | ||
2718 | skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, | ||
2719 | skdev->in_flight, cmp_bytes, skdev->proto_ver); | ||
2720 | |||
2721 | if (cmp_cycle != skdev->skcomp_cycle) { | ||
2722 | pr_debug("%s:%s:%d end of completions\n", | ||
2723 | skdev->name, __func__, __LINE__); | ||
2724 | break; | ||
2725 | } | ||
2726 | /* | ||
2727 | * Update the completion queue head index and possibly | ||
2728 | * the completion cycle count. 8-bit wrap-around. | ||
2729 | */ | ||
2730 | skdev->skcomp_ix++; | ||
2731 | if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { | ||
2732 | skdev->skcomp_ix = 0; | ||
2733 | skdev->skcomp_cycle++; | ||
2734 | } | ||
2735 | |||
2736 | /* | ||
2737 | * The command context is a unique 32-bit ID. The low order | ||
2738 | * bits help locate the request. The request is usually a | ||
2739 | * r/w request (see skd_start() above) or a special request. | ||
2740 | */ | ||
2741 | req_id = cmp_cntxt; | ||
2742 | req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; | ||
2743 | |||
2744 | /* Is this other than a r/w request? */ | ||
2745 | if (req_slot >= skdev->num_req_context) { | ||
2746 | /* | ||
2747 | * This is not a completion for a r/w request. | ||
2748 | */ | ||
2749 | skd_complete_other(skdev, skcmp, skerr); | ||
2750 | continue; | ||
2751 | } | ||
2752 | |||
2753 | skreq = &skdev->skreq_table[req_slot]; | ||
2754 | |||
2755 | /* | ||
2756 | * Make sure the request ID for the slot matches. | ||
2757 | */ | ||
2758 | if (skreq->id != req_id) { | ||
2759 | pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", | ||
2760 | skdev->name, __func__, __LINE__, | ||
2761 | req_id, skreq->id); | ||
2762 | { | ||
2763 | u16 new_id = cmp_cntxt; | ||
2764 | pr_err("(%s): Completion mismatch " | ||
2765 | "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", | ||
2766 | skd_name(skdev), req_id, | ||
2767 | skreq->id, new_id); | ||
2768 | |||
2769 | continue; | ||
2770 | } | ||
2771 | } | ||
2772 | |||
2773 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); | ||
2774 | |||
2775 | if (skreq->state == SKD_REQ_STATE_ABORTED) { | ||
2776 | pr_debug("%s:%s:%d reclaim req %p id=%04x\n", | ||
2777 | skdev->name, __func__, __LINE__, | ||
2778 | skreq, skreq->id); | ||
2779 | /* a previously timed out command can | ||
2780 | * now be cleaned up */ | ||
2781 | skd_release_skreq(skdev, skreq); | ||
2782 | continue; | ||
2783 | } | ||
2784 | |||
2785 | skreq->completion = *skcmp; | ||
2786 | if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { | ||
2787 | skreq->err_info = *skerr; | ||
2788 | skd_log_check_status(skdev, cmp_status, skerr->key, | ||
2789 | skerr->code, skerr->qual, | ||
2790 | skerr->fruc); | ||
2791 | } | ||
2792 | /* Release DMA resources for the request. */ | ||
2793 | if (skreq->n_sg > 0) | ||
2794 | skd_postop_sg_list(skdev, skreq); | ||
2795 | |||
2796 | if (!skreq->req) { | ||
2797 | pr_debug("%s:%s:%d NULL backptr skdreq %p, " | ||
2798 | "req=0x%x req_id=0x%x\n", | ||
2799 | skdev->name, __func__, __LINE__, | ||
2800 | skreq, skreq->id, req_id); | ||
2801 | } else { | ||
2802 | /* | ||
2803 | * Capture the outcome and post it back to the | ||
2804 | * native request. | ||
2805 | */ | ||
2806 | if (likely(cmp_status == SAM_STAT_GOOD)) | ||
2807 | skd_end_request(skdev, skreq, 0); | ||
2808 | else | ||
2809 | skd_resolve_req_exception(skdev, skreq); | ||
2810 | } | ||
2811 | |||
2812 | /* | ||
2813 | * Release the skreq, its FIT msg (if one), timeout slot, | ||
2814 | * and queue depth. | ||
2815 | */ | ||
2816 | skd_release_skreq(skdev, skreq); | ||
2817 | |||
2818 | /* skd_isr_comp_limit equal zero means no limit */ | ||
2819 | if (limit) { | ||
2820 | if (++processed >= limit) { | ||
2821 | rc = 1; | ||
2822 | break; | ||
2823 | } | ||
2824 | } | ||
2825 | } | ||
2826 | |||
2827 | if ((skdev->state == SKD_DRVR_STATE_PAUSING) | ||
2828 | && (skdev->in_flight) == 0) { | ||
2829 | skdev->state = SKD_DRVR_STATE_PAUSED; | ||
2830 | wake_up_interruptible(&skdev->waitq); | ||
2831 | } | ||
2832 | |||
2833 | return rc; | ||
2834 | } | ||
2835 | |||
2836 | static void skd_complete_other(struct skd_device *skdev, | ||
2837 | volatile struct fit_completion_entry_v1 *skcomp, | ||
2838 | volatile struct fit_comp_error_info *skerr) | ||
2839 | { | ||
2840 | u32 req_id = 0; | ||
2841 | u32 req_table; | ||
2842 | u32 req_slot; | ||
2843 | struct skd_special_context *skspcl; | ||
2844 | |||
2845 | req_id = skcomp->tag; | ||
2846 | req_table = req_id & SKD_ID_TABLE_MASK; | ||
2847 | req_slot = req_id & SKD_ID_SLOT_MASK; | ||
2848 | |||
2849 | pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", | ||
2850 | skdev->name, __func__, __LINE__, | ||
2851 | req_table, req_id, req_slot); | ||
2852 | |||
2853 | /* | ||
2854 | * Based on the request id, determine how to dispatch this completion. | ||
2855 | * This swich/case is finding the good cases and forwarding the | ||
2856 | * completion entry. Errors are reported below the switch. | ||
2857 | */ | ||
2858 | switch (req_table) { | ||
2859 | case SKD_ID_RW_REQUEST: | ||
2860 | /* | ||
2861 | * The caller, skd_completion_posted_isr() above, | ||
2862 | * handles r/w requests. The only way we get here | ||
2863 | * is if the req_slot is out of bounds. | ||
2864 | */ | ||
2865 | break; | ||
2866 | |||
2867 | case SKD_ID_SPECIAL_REQUEST: | ||
2868 | /* | ||
2869 | * Make sure the req_slot is in bounds and that the id | ||
2870 | * matches. | ||
2871 | */ | ||
2872 | if (req_slot < skdev->n_special) { | ||
2873 | skspcl = &skdev->skspcl_table[req_slot]; | ||
2874 | if (skspcl->req.id == req_id && | ||
2875 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | ||
2876 | skd_complete_special(skdev, | ||
2877 | skcomp, skerr, skspcl); | ||
2878 | return; | ||
2879 | } | ||
2880 | } | ||
2881 | break; | ||
2882 | |||
2883 | case SKD_ID_INTERNAL: | ||
2884 | if (req_slot == 0) { | ||
2885 | skspcl = &skdev->internal_skspcl; | ||
2886 | if (skspcl->req.id == req_id && | ||
2887 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | ||
2888 | skd_complete_internal(skdev, | ||
2889 | skcomp, skerr, skspcl); | ||
2890 | return; | ||
2891 | } | ||
2892 | } | ||
2893 | break; | ||
2894 | |||
2895 | case SKD_ID_FIT_MSG: | ||
2896 | /* | ||
2897 | * These id's should never appear in a completion record. | ||
2898 | */ | ||
2899 | break; | ||
2900 | |||
2901 | default: | ||
2902 | /* | ||
2903 | * These id's should never appear anywhere; | ||
2904 | */ | ||
2905 | break; | ||
2906 | } | ||
2907 | |||
2908 | /* | ||
2909 | * If we get here it is a bad or stale id. | ||
2910 | */ | ||
2911 | } | ||
2912 | |||
2913 | static void skd_complete_special(struct skd_device *skdev, | ||
2914 | volatile struct fit_completion_entry_v1 | ||
2915 | *skcomp, | ||
2916 | volatile struct fit_comp_error_info *skerr, | ||
2917 | struct skd_special_context *skspcl) | ||
2918 | { | ||
2919 | pr_debug("%s:%s:%d completing special request %p\n", | ||
2920 | skdev->name, __func__, __LINE__, skspcl); | ||
2921 | if (skspcl->orphaned) { | ||
2922 | /* Discard orphaned request */ | ||
2923 | /* ?: Can this release directly or does it need | ||
2924 | * to use a worker? */ | ||
2925 | pr_debug("%s:%s:%d release orphaned %p\n", | ||
2926 | skdev->name, __func__, __LINE__, skspcl); | ||
2927 | skd_release_special(skdev, skspcl); | ||
2928 | return; | ||
2929 | } | ||
2930 | |||
2931 | skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); | ||
2932 | |||
2933 | skspcl->req.state = SKD_REQ_STATE_COMPLETED; | ||
2934 | skspcl->req.completion = *skcomp; | ||
2935 | skspcl->req.err_info = *skerr; | ||
2936 | |||
2937 | skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, | ||
2938 | skerr->code, skerr->qual, skerr->fruc); | ||
2939 | |||
2940 | wake_up_interruptible(&skdev->waitq); | ||
2941 | } | ||
2942 | |||
2943 | /* assume spinlock is already held */ | ||
2944 | static void skd_release_special(struct skd_device *skdev, | ||
2945 | struct skd_special_context *skspcl) | ||
2946 | { | ||
2947 | int i, was_depleted; | ||
2948 | |||
2949 | for (i = 0; i < skspcl->req.n_sg; i++) { | ||
2950 | struct page *page = sg_page(&skspcl->req.sg[i]); | ||
2951 | __free_page(page); | ||
2952 | } | ||
2953 | |||
2954 | was_depleted = (skdev->skspcl_free_list == NULL); | ||
2955 | |||
2956 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
2957 | skspcl->req.id += SKD_ID_INCR; | ||
2958 | skspcl->req.next = | ||
2959 | (struct skd_request_context *)skdev->skspcl_free_list; | ||
2960 | skdev->skspcl_free_list = (struct skd_special_context *)skspcl; | ||
2961 | |||
2962 | if (was_depleted) { | ||
2963 | pr_debug("%s:%s:%d skspcl was depleted\n", | ||
2964 | skdev->name, __func__, __LINE__); | ||
2965 | /* Free list was depleted. Their might be waiters. */ | ||
2966 | wake_up_interruptible(&skdev->waitq); | ||
2967 | } | ||
2968 | } | ||
2969 | |||
2970 | static void skd_reset_skcomp(struct skd_device *skdev) | ||
2971 | { | ||
2972 | u32 nbytes; | ||
2973 | struct fit_completion_entry_v1 *skcomp; | ||
2974 | |||
2975 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | ||
2976 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | ||
2977 | |||
2978 | memset(skdev->skcomp_table, 0, nbytes); | ||
2979 | |||
2980 | skdev->skcomp_ix = 0; | ||
2981 | skdev->skcomp_cycle = 1; | ||
2982 | } | ||
2983 | |||
2984 | /* | ||
2985 | ***************************************************************************** | ||
2986 | * INTERRUPTS | ||
2987 | ***************************************************************************** | ||
2988 | */ | ||
2989 | static void skd_completion_worker(struct work_struct *work) | ||
2990 | { | ||
2991 | struct skd_device *skdev = | ||
2992 | container_of(work, struct skd_device, completion_worker); | ||
2993 | unsigned long flags; | ||
2994 | int flush_enqueued = 0; | ||
2995 | |||
2996 | spin_lock_irqsave(&skdev->lock, flags); | ||
2997 | |||
2998 | /* | ||
2999 | * pass in limit=0, which means no limit.. | ||
3000 | * process everything in compq | ||
3001 | */ | ||
3002 | skd_isr_completion_posted(skdev, 0, &flush_enqueued); | ||
3003 | skd_request_fn(skdev->queue); | ||
3004 | |||
3005 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3006 | } | ||
3007 | |||
3008 | static void skd_isr_msg_from_dev(struct skd_device *skdev); | ||
3009 | |||
3010 | irqreturn_t | ||
3011 | static skd_isr(int irq, void *ptr) | ||
3012 | { | ||
3013 | struct skd_device *skdev; | ||
3014 | u32 intstat; | ||
3015 | u32 ack; | ||
3016 | int rc = 0; | ||
3017 | int deferred = 0; | ||
3018 | int flush_enqueued = 0; | ||
3019 | |||
3020 | skdev = (struct skd_device *)ptr; | ||
3021 | spin_lock(&skdev->lock); | ||
3022 | |||
3023 | for (;; ) { | ||
3024 | intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); | ||
3025 | |||
3026 | ack = FIT_INT_DEF_MASK; | ||
3027 | ack &= intstat; | ||
3028 | |||
3029 | pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", | ||
3030 | skdev->name, __func__, __LINE__, intstat, ack); | ||
3031 | |||
3032 | /* As long as there is an int pending on device, keep | ||
3033 | * running loop. When none, get out, but if we've never | ||
3034 | * done any processing, call completion handler? | ||
3035 | */ | ||
3036 | if (ack == 0) { | ||
3037 | /* No interrupts on device, but run the completion | ||
3038 | * processor anyway? | ||
3039 | */ | ||
3040 | if (rc == 0) | ||
3041 | if (likely (skdev->state | ||
3042 | == SKD_DRVR_STATE_ONLINE)) | ||
3043 | deferred = 1; | ||
3044 | break; | ||
3045 | } | ||
3046 | |||
3047 | rc = IRQ_HANDLED; | ||
3048 | |||
3049 | SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); | ||
3050 | |||
3051 | if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && | ||
3052 | (skdev->state != SKD_DRVR_STATE_STOPPING))) { | ||
3053 | if (intstat & FIT_ISH_COMPLETION_POSTED) { | ||
3054 | /* | ||
3055 | * If we have already deferred completion | ||
3056 | * processing, don't bother running it again | ||
3057 | */ | ||
3058 | if (deferred == 0) | ||
3059 | deferred = | ||
3060 | skd_isr_completion_posted(skdev, | ||
3061 | skd_isr_comp_limit, &flush_enqueued); | ||
3062 | } | ||
3063 | |||
3064 | if (intstat & FIT_ISH_FW_STATE_CHANGE) { | ||
3065 | skd_isr_fwstate(skdev); | ||
3066 | if (skdev->state == SKD_DRVR_STATE_FAULT || | ||
3067 | skdev->state == | ||
3068 | SKD_DRVR_STATE_DISAPPEARED) { | ||
3069 | spin_unlock(&skdev->lock); | ||
3070 | return rc; | ||
3071 | } | ||
3072 | } | ||
3073 | |||
3074 | if (intstat & FIT_ISH_MSG_FROM_DEV) | ||
3075 | skd_isr_msg_from_dev(skdev); | ||
3076 | } | ||
3077 | } | ||
3078 | |||
3079 | if (unlikely(flush_enqueued)) | ||
3080 | skd_request_fn(skdev->queue); | ||
3081 | |||
3082 | if (deferred) | ||
3083 | schedule_work(&skdev->completion_worker); | ||
3084 | else if (!flush_enqueued) | ||
3085 | skd_request_fn(skdev->queue); | ||
3086 | |||
3087 | spin_unlock(&skdev->lock); | ||
3088 | |||
3089 | return rc; | ||
3090 | } | ||
3091 | |||
3092 | static void skd_drive_fault(struct skd_device *skdev) | ||
3093 | { | ||
3094 | skdev->state = SKD_DRVR_STATE_FAULT; | ||
3095 | pr_err("(%s): Drive FAULT\n", skd_name(skdev)); | ||
3096 | } | ||
3097 | |||
3098 | static void skd_drive_disappeared(struct skd_device *skdev) | ||
3099 | { | ||
3100 | skdev->state = SKD_DRVR_STATE_DISAPPEARED; | ||
3101 | pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); | ||
3102 | } | ||
3103 | |||
3104 | static void skd_isr_fwstate(struct skd_device *skdev) | ||
3105 | { | ||
3106 | u32 sense; | ||
3107 | u32 state; | ||
3108 | u32 mtd; | ||
3109 | int prev_driver_state = skdev->state; | ||
3110 | |||
3111 | sense = SKD_READL(skdev, FIT_STATUS); | ||
3112 | state = sense & FIT_SR_DRIVE_STATE_MASK; | ||
3113 | |||
3114 | pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", | ||
3115 | skd_name(skdev), | ||
3116 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | ||
3117 | skd_drive_state_to_str(state), state); | ||
3118 | |||
3119 | skdev->drive_state = state; | ||
3120 | |||
3121 | switch (skdev->drive_state) { | ||
3122 | case FIT_SR_DRIVE_INIT: | ||
3123 | if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { | ||
3124 | skd_disable_interrupts(skdev); | ||
3125 | break; | ||
3126 | } | ||
3127 | if (skdev->state == SKD_DRVR_STATE_RESTARTING) | ||
3128 | skd_recover_requests(skdev, 0); | ||
3129 | if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { | ||
3130 | skdev->timer_countdown = SKD_STARTING_TIMO; | ||
3131 | skdev->state = SKD_DRVR_STATE_STARTING; | ||
3132 | skd_soft_reset(skdev); | ||
3133 | break; | ||
3134 | } | ||
3135 | mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); | ||
3136 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3137 | skdev->last_mtd = mtd; | ||
3138 | break; | ||
3139 | |||
3140 | case FIT_SR_DRIVE_ONLINE: | ||
3141 | skdev->cur_max_queue_depth = skd_max_queue_depth; | ||
3142 | if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) | ||
3143 | skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; | ||
3144 | |||
3145 | skdev->queue_low_water_mark = | ||
3146 | skdev->cur_max_queue_depth * 2 / 3 + 1; | ||
3147 | if (skdev->queue_low_water_mark < 1) | ||
3148 | skdev->queue_low_water_mark = 1; | ||
3149 | pr_info( | ||
3150 | "(%s): Queue depth limit=%d dev=%d lowat=%d\n", | ||
3151 | skd_name(skdev), | ||
3152 | skdev->cur_max_queue_depth, | ||
3153 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | ||
3154 | |||
3155 | skd_refresh_device_data(skdev); | ||
3156 | break; | ||
3157 | |||
3158 | case FIT_SR_DRIVE_BUSY: | ||
3159 | skdev->state = SKD_DRVR_STATE_BUSY; | ||
3160 | skdev->timer_countdown = SKD_BUSY_TIMO; | ||
3161 | skd_quiesce_dev(skdev); | ||
3162 | break; | ||
3163 | case FIT_SR_DRIVE_BUSY_SANITIZE: | ||
3164 | /* set timer for 3 seconds, we'll abort any unfinished | ||
3165 | * commands after that expires | ||
3166 | */ | ||
3167 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | ||
3168 | skdev->timer_countdown = SKD_TIMER_SECONDS(3); | ||
3169 | blk_start_queue(skdev->queue); | ||
3170 | break; | ||
3171 | case FIT_SR_DRIVE_BUSY_ERASE: | ||
3172 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | ||
3173 | skdev->timer_countdown = SKD_BUSY_TIMO; | ||
3174 | break; | ||
3175 | case FIT_SR_DRIVE_OFFLINE: | ||
3176 | skdev->state = SKD_DRVR_STATE_IDLE; | ||
3177 | break; | ||
3178 | case FIT_SR_DRIVE_SOFT_RESET: | ||
3179 | switch (skdev->state) { | ||
3180 | case SKD_DRVR_STATE_STARTING: | ||
3181 | case SKD_DRVR_STATE_RESTARTING: | ||
3182 | /* Expected by a caller of skd_soft_reset() */ | ||
3183 | break; | ||
3184 | default: | ||
3185 | skdev->state = SKD_DRVR_STATE_RESTARTING; | ||
3186 | break; | ||
3187 | } | ||
3188 | break; | ||
3189 | case FIT_SR_DRIVE_FW_BOOTING: | ||
3190 | pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", | ||
3191 | skdev->name, __func__, __LINE__, skdev->name); | ||
3192 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; | ||
3193 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | ||
3194 | break; | ||
3195 | |||
3196 | case FIT_SR_DRIVE_DEGRADED: | ||
3197 | case FIT_SR_PCIE_LINK_DOWN: | ||
3198 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | ||
3199 | break; | ||
3200 | |||
3201 | case FIT_SR_DRIVE_FAULT: | ||
3202 | skd_drive_fault(skdev); | ||
3203 | skd_recover_requests(skdev, 0); | ||
3204 | blk_start_queue(skdev->queue); | ||
3205 | break; | ||
3206 | |||
3207 | /* PCIe bus returned all Fs? */ | ||
3208 | case 0xFF: | ||
3209 | pr_info("(%s): state=0x%x sense=0x%x\n", | ||
3210 | skd_name(skdev), state, sense); | ||
3211 | skd_drive_disappeared(skdev); | ||
3212 | skd_recover_requests(skdev, 0); | ||
3213 | blk_start_queue(skdev->queue); | ||
3214 | break; | ||
3215 | default: | ||
3216 | /* | ||
3217 | * Uknown FW State. Wait for a state we recognize. | ||
3218 | */ | ||
3219 | break; | ||
3220 | } | ||
3221 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | ||
3222 | skd_name(skdev), | ||
3223 | skd_skdev_state_to_str(prev_driver_state), prev_driver_state, | ||
3224 | skd_skdev_state_to_str(skdev->state), skdev->state); | ||
3225 | } | ||
3226 | |||
3227 | static void skd_recover_requests(struct skd_device *skdev, int requeue) | ||
3228 | { | ||
3229 | int i; | ||
3230 | |||
3231 | for (i = 0; i < skdev->num_req_context; i++) { | ||
3232 | struct skd_request_context *skreq = &skdev->skreq_table[i]; | ||
3233 | |||
3234 | if (skreq->state == SKD_REQ_STATE_BUSY) { | ||
3235 | skd_log_skreq(skdev, skreq, "recover"); | ||
3236 | |||
3237 | SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); | ||
3238 | SKD_ASSERT(skreq->req != NULL); | ||
3239 | |||
3240 | /* Release DMA resources for the request. */ | ||
3241 | if (skreq->n_sg > 0) | ||
3242 | skd_postop_sg_list(skdev, skreq); | ||
3243 | |||
3244 | if (requeue && | ||
3245 | (unsigned long) ++skreq->req->special < | ||
3246 | SKD_MAX_RETRIES) | ||
3247 | blk_requeue_request(skdev->queue, skreq->req); | ||
3248 | else | ||
3249 | skd_end_request(skdev, skreq, -EIO); | ||
3250 | |||
3251 | skreq->req = NULL; | ||
3252 | |||
3253 | skreq->state = SKD_REQ_STATE_IDLE; | ||
3254 | skreq->id += SKD_ID_INCR; | ||
3255 | } | ||
3256 | if (i > 0) | ||
3257 | skreq[-1].next = skreq; | ||
3258 | skreq->next = NULL; | ||
3259 | } | ||
3260 | skdev->skreq_free_list = skdev->skreq_table; | ||
3261 | |||
3262 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | ||
3263 | struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; | ||
3264 | |||
3265 | if (skmsg->state == SKD_MSG_STATE_BUSY) { | ||
3266 | skd_log_skmsg(skdev, skmsg, "salvaged"); | ||
3267 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); | ||
3268 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
3269 | skmsg->id += SKD_ID_INCR; | ||
3270 | } | ||
3271 | if (i > 0) | ||
3272 | skmsg[-1].next = skmsg; | ||
3273 | skmsg->next = NULL; | ||
3274 | } | ||
3275 | skdev->skmsg_free_list = skdev->skmsg_table; | ||
3276 | |||
3277 | for (i = 0; i < skdev->n_special; i++) { | ||
3278 | struct skd_special_context *skspcl = &skdev->skspcl_table[i]; | ||
3279 | |||
3280 | /* If orphaned, reclaim it because it has already been reported | ||
3281 | * to the process as an error (it was just waiting for | ||
3282 | * a completion that didn't come, and now it will never come) | ||
3283 | * If busy, change to a state that will cause it to error | ||
3284 | * out in the wait routine and let it do the normal | ||
3285 | * reporting and reclaiming | ||
3286 | */ | ||
3287 | if (skspcl->req.state == SKD_REQ_STATE_BUSY) { | ||
3288 | if (skspcl->orphaned) { | ||
3289 | pr_debug("%s:%s:%d orphaned %p\n", | ||
3290 | skdev->name, __func__, __LINE__, | ||
3291 | skspcl); | ||
3292 | skd_release_special(skdev, skspcl); | ||
3293 | } else { | ||
3294 | pr_debug("%s:%s:%d not orphaned %p\n", | ||
3295 | skdev->name, __func__, __LINE__, | ||
3296 | skspcl); | ||
3297 | skspcl->req.state = SKD_REQ_STATE_ABORTED; | ||
3298 | } | ||
3299 | } | ||
3300 | } | ||
3301 | skdev->skspcl_free_list = skdev->skspcl_table; | ||
3302 | |||
3303 | for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) | ||
3304 | skdev->timeout_slot[i] = 0; | ||
3305 | |||
3306 | skdev->in_flight = 0; | ||
3307 | } | ||
3308 | |||
3309 | static void skd_isr_msg_from_dev(struct skd_device *skdev) | ||
3310 | { | ||
3311 | u32 mfd; | ||
3312 | u32 mtd; | ||
3313 | u32 data; | ||
3314 | |||
3315 | mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | ||
3316 | |||
3317 | pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", | ||
3318 | skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); | ||
3319 | |||
3320 | /* ignore any mtd that is an ack for something we didn't send */ | ||
3321 | if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) | ||
3322 | return; | ||
3323 | |||
3324 | switch (FIT_MXD_TYPE(mfd)) { | ||
3325 | case FIT_MTD_FITFW_INIT: | ||
3326 | skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); | ||
3327 | |||
3328 | if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { | ||
3329 | pr_err("(%s): protocol mismatch\n", | ||
3330 | skdev->name); | ||
3331 | pr_err("(%s): got=%d support=%d\n", | ||
3332 | skdev->name, skdev->proto_ver, | ||
3333 | FIT_PROTOCOL_VERSION_1); | ||
3334 | pr_err("(%s): please upgrade driver\n", | ||
3335 | skdev->name); | ||
3336 | skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; | ||
3337 | skd_soft_reset(skdev); | ||
3338 | break; | ||
3339 | } | ||
3340 | mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); | ||
3341 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3342 | skdev->last_mtd = mtd; | ||
3343 | break; | ||
3344 | |||
3345 | case FIT_MTD_GET_CMDQ_DEPTH: | ||
3346 | skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); | ||
3347 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, | ||
3348 | SKD_N_COMPLETION_ENTRY); | ||
3349 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3350 | skdev->last_mtd = mtd; | ||
3351 | break; | ||
3352 | |||
3353 | case FIT_MTD_SET_COMPQ_DEPTH: | ||
3354 | SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); | ||
3355 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); | ||
3356 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3357 | skdev->last_mtd = mtd; | ||
3358 | break; | ||
3359 | |||
3360 | case FIT_MTD_SET_COMPQ_ADDR: | ||
3361 | skd_reset_skcomp(skdev); | ||
3362 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); | ||
3363 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3364 | skdev->last_mtd = mtd; | ||
3365 | break; | ||
3366 | |||
3367 | case FIT_MTD_CMD_LOG_HOST_ID: | ||
3368 | skdev->connect_time_stamp = get_seconds(); | ||
3369 | data = skdev->connect_time_stamp & 0xFFFF; | ||
3370 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); | ||
3371 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3372 | skdev->last_mtd = mtd; | ||
3373 | break; | ||
3374 | |||
3375 | case FIT_MTD_CMD_LOG_TIME_STAMP_LO: | ||
3376 | skdev->drive_jiffies = FIT_MXD_DATA(mfd); | ||
3377 | data = (skdev->connect_time_stamp >> 16) & 0xFFFF; | ||
3378 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); | ||
3379 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3380 | skdev->last_mtd = mtd; | ||
3381 | break; | ||
3382 | |||
3383 | case FIT_MTD_CMD_LOG_TIME_STAMP_HI: | ||
3384 | skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); | ||
3385 | mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); | ||
3386 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | ||
3387 | skdev->last_mtd = mtd; | ||
3388 | |||
3389 | pr_err("(%s): Time sync driver=0x%x device=0x%x\n", | ||
3390 | skd_name(skdev), | ||
3391 | skdev->connect_time_stamp, skdev->drive_jiffies); | ||
3392 | break; | ||
3393 | |||
3394 | case FIT_MTD_ARM_QUEUE: | ||
3395 | skdev->last_mtd = 0; | ||
3396 | /* | ||
3397 | * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. | ||
3398 | */ | ||
3399 | break; | ||
3400 | |||
3401 | default: | ||
3402 | break; | ||
3403 | } | ||
3404 | } | ||
3405 | |||
3406 | static void skd_disable_interrupts(struct skd_device *skdev) | ||
3407 | { | ||
3408 | u32 sense; | ||
3409 | |||
3410 | sense = SKD_READL(skdev, FIT_CONTROL); | ||
3411 | sense &= ~FIT_CR_ENABLE_INTERRUPTS; | ||
3412 | SKD_WRITEL(skdev, sense, FIT_CONTROL); | ||
3413 | pr_debug("%s:%s:%d sense 0x%x\n", | ||
3414 | skdev->name, __func__, __LINE__, sense); | ||
3415 | |||
3416 | /* Note that the 1s is written. A 1-bit means | ||
3417 | * disable, a 0 means enable. | ||
3418 | */ | ||
3419 | SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); | ||
3420 | } | ||
3421 | |||
3422 | static void skd_enable_interrupts(struct skd_device *skdev) | ||
3423 | { | ||
3424 | u32 val; | ||
3425 | |||
3426 | /* unmask interrupts first */ | ||
3427 | val = FIT_ISH_FW_STATE_CHANGE + | ||
3428 | FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; | ||
3429 | |||
3430 | /* Note that the compliment of mask is written. A 1-bit means | ||
3431 | * disable, a 0 means enable. */ | ||
3432 | SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); | ||
3433 | pr_debug("%s:%s:%d interrupt mask=0x%x\n", | ||
3434 | skdev->name, __func__, __LINE__, ~val); | ||
3435 | |||
3436 | val = SKD_READL(skdev, FIT_CONTROL); | ||
3437 | val |= FIT_CR_ENABLE_INTERRUPTS; | ||
3438 | pr_debug("%s:%s:%d control=0x%x\n", | ||
3439 | skdev->name, __func__, __LINE__, val); | ||
3440 | SKD_WRITEL(skdev, val, FIT_CONTROL); | ||
3441 | } | ||
3442 | |||
3443 | /* | ||
3444 | ***************************************************************************** | ||
3445 | * START, STOP, RESTART, QUIESCE, UNQUIESCE | ||
3446 | ***************************************************************************** | ||
3447 | */ | ||
3448 | |||
3449 | static void skd_soft_reset(struct skd_device *skdev) | ||
3450 | { | ||
3451 | u32 val; | ||
3452 | |||
3453 | val = SKD_READL(skdev, FIT_CONTROL); | ||
3454 | val |= (FIT_CR_SOFT_RESET); | ||
3455 | pr_debug("%s:%s:%d control=0x%x\n", | ||
3456 | skdev->name, __func__, __LINE__, val); | ||
3457 | SKD_WRITEL(skdev, val, FIT_CONTROL); | ||
3458 | } | ||
3459 | |||
3460 | static void skd_start_device(struct skd_device *skdev) | ||
3461 | { | ||
3462 | unsigned long flags; | ||
3463 | u32 sense; | ||
3464 | u32 state; | ||
3465 | |||
3466 | spin_lock_irqsave(&skdev->lock, flags); | ||
3467 | |||
3468 | /* ack all ghost interrupts */ | ||
3469 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | ||
3470 | |||
3471 | sense = SKD_READL(skdev, FIT_STATUS); | ||
3472 | |||
3473 | pr_debug("%s:%s:%d initial status=0x%x\n", | ||
3474 | skdev->name, __func__, __LINE__, sense); | ||
3475 | |||
3476 | state = sense & FIT_SR_DRIVE_STATE_MASK; | ||
3477 | skdev->drive_state = state; | ||
3478 | skdev->last_mtd = 0; | ||
3479 | |||
3480 | skdev->state = SKD_DRVR_STATE_STARTING; | ||
3481 | skdev->timer_countdown = SKD_STARTING_TIMO; | ||
3482 | |||
3483 | skd_enable_interrupts(skdev); | ||
3484 | |||
3485 | switch (skdev->drive_state) { | ||
3486 | case FIT_SR_DRIVE_OFFLINE: | ||
3487 | pr_err("(%s): Drive offline...\n", skd_name(skdev)); | ||
3488 | break; | ||
3489 | |||
3490 | case FIT_SR_DRIVE_FW_BOOTING: | ||
3491 | pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", | ||
3492 | skdev->name, __func__, __LINE__, skdev->name); | ||
3493 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; | ||
3494 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | ||
3495 | break; | ||
3496 | |||
3497 | case FIT_SR_DRIVE_BUSY_SANITIZE: | ||
3498 | pr_info("(%s): Start: BUSY_SANITIZE\n", | ||
3499 | skd_name(skdev)); | ||
3500 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | ||
3501 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | ||
3502 | break; | ||
3503 | |||
3504 | case FIT_SR_DRIVE_BUSY_ERASE: | ||
3505 | pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); | ||
3506 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | ||
3507 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | ||
3508 | break; | ||
3509 | |||
3510 | case FIT_SR_DRIVE_INIT: | ||
3511 | case FIT_SR_DRIVE_ONLINE: | ||
3512 | skd_soft_reset(skdev); | ||
3513 | break; | ||
3514 | |||
3515 | case FIT_SR_DRIVE_BUSY: | ||
3516 | pr_err("(%s): Drive Busy...\n", skd_name(skdev)); | ||
3517 | skdev->state = SKD_DRVR_STATE_BUSY; | ||
3518 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | ||
3519 | break; | ||
3520 | |||
3521 | case FIT_SR_DRIVE_SOFT_RESET: | ||
3522 | pr_err("(%s) drive soft reset in prog\n", | ||
3523 | skd_name(skdev)); | ||
3524 | break; | ||
3525 | |||
3526 | case FIT_SR_DRIVE_FAULT: | ||
3527 | /* Fault state is bad...soft reset won't do it... | ||
3528 | * Hard reset, maybe, but does it work on device? | ||
3529 | * For now, just fault so the system doesn't hang. | ||
3530 | */ | ||
3531 | skd_drive_fault(skdev); | ||
3532 | /*start the queue so we can respond with error to requests */ | ||
3533 | pr_debug("%s:%s:%d starting %s queue\n", | ||
3534 | skdev->name, __func__, __LINE__, skdev->name); | ||
3535 | blk_start_queue(skdev->queue); | ||
3536 | skdev->gendisk_on = -1; | ||
3537 | wake_up_interruptible(&skdev->waitq); | ||
3538 | break; | ||
3539 | |||
3540 | case 0xFF: | ||
3541 | /* Most likely the device isn't there or isn't responding | ||
3542 | * to the BAR1 addresses. */ | ||
3543 | skd_drive_disappeared(skdev); | ||
3544 | /*start the queue so we can respond with error to requests */ | ||
3545 | pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", | ||
3546 | skdev->name, __func__, __LINE__, skdev->name); | ||
3547 | blk_start_queue(skdev->queue); | ||
3548 | skdev->gendisk_on = -1; | ||
3549 | wake_up_interruptible(&skdev->waitq); | ||
3550 | break; | ||
3551 | |||
3552 | default: | ||
3553 | pr_err("(%s) Start: unknown state %x\n", | ||
3554 | skd_name(skdev), skdev->drive_state); | ||
3555 | break; | ||
3556 | } | ||
3557 | |||
3558 | state = SKD_READL(skdev, FIT_CONTROL); | ||
3559 | pr_debug("%s:%s:%d FIT Control Status=0x%x\n", | ||
3560 | skdev->name, __func__, __LINE__, state); | ||
3561 | |||
3562 | state = SKD_READL(skdev, FIT_INT_STATUS_HOST); | ||
3563 | pr_debug("%s:%s:%d Intr Status=0x%x\n", | ||
3564 | skdev->name, __func__, __LINE__, state); | ||
3565 | |||
3566 | state = SKD_READL(skdev, FIT_INT_MASK_HOST); | ||
3567 | pr_debug("%s:%s:%d Intr Mask=0x%x\n", | ||
3568 | skdev->name, __func__, __LINE__, state); | ||
3569 | |||
3570 | state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | ||
3571 | pr_debug("%s:%s:%d Msg from Dev=0x%x\n", | ||
3572 | skdev->name, __func__, __LINE__, state); | ||
3573 | |||
3574 | state = SKD_READL(skdev, FIT_HW_VERSION); | ||
3575 | pr_debug("%s:%s:%d HW version=0x%x\n", | ||
3576 | skdev->name, __func__, __LINE__, state); | ||
3577 | |||
3578 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3579 | } | ||
3580 | |||
3581 | static void skd_stop_device(struct skd_device *skdev) | ||
3582 | { | ||
3583 | unsigned long flags; | ||
3584 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | ||
3585 | u32 dev_state; | ||
3586 | int i; | ||
3587 | |||
3588 | spin_lock_irqsave(&skdev->lock, flags); | ||
3589 | |||
3590 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | ||
3591 | pr_err("(%s): skd_stop_device not online no sync\n", | ||
3592 | skd_name(skdev)); | ||
3593 | goto stop_out; | ||
3594 | } | ||
3595 | |||
3596 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) { | ||
3597 | pr_err("(%s): skd_stop_device no special\n", | ||
3598 | skd_name(skdev)); | ||
3599 | goto stop_out; | ||
3600 | } | ||
3601 | |||
3602 | skdev->state = SKD_DRVR_STATE_SYNCING; | ||
3603 | skdev->sync_done = 0; | ||
3604 | |||
3605 | skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); | ||
3606 | |||
3607 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3608 | |||
3609 | wait_event_interruptible_timeout(skdev->waitq, | ||
3610 | (skdev->sync_done), (10 * HZ)); | ||
3611 | |||
3612 | spin_lock_irqsave(&skdev->lock, flags); | ||
3613 | |||
3614 | switch (skdev->sync_done) { | ||
3615 | case 0: | ||
3616 | pr_err("(%s): skd_stop_device no sync\n", | ||
3617 | skd_name(skdev)); | ||
3618 | break; | ||
3619 | case 1: | ||
3620 | pr_err("(%s): skd_stop_device sync done\n", | ||
3621 | skd_name(skdev)); | ||
3622 | break; | ||
3623 | default: | ||
3624 | pr_err("(%s): skd_stop_device sync error\n", | ||
3625 | skd_name(skdev)); | ||
3626 | } | ||
3627 | |||
3628 | stop_out: | ||
3629 | skdev->state = SKD_DRVR_STATE_STOPPING; | ||
3630 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3631 | |||
3632 | skd_kill_timer(skdev); | ||
3633 | |||
3634 | spin_lock_irqsave(&skdev->lock, flags); | ||
3635 | skd_disable_interrupts(skdev); | ||
3636 | |||
3637 | /* ensure all ints on device are cleared */ | ||
3638 | /* soft reset the device to unload with a clean slate */ | ||
3639 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | ||
3640 | SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); | ||
3641 | |||
3642 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3643 | |||
3644 | /* poll every 100ms, 1 second timeout */ | ||
3645 | for (i = 0; i < 10; i++) { | ||
3646 | dev_state = | ||
3647 | SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; | ||
3648 | if (dev_state == FIT_SR_DRIVE_INIT) | ||
3649 | break; | ||
3650 | set_current_state(TASK_INTERRUPTIBLE); | ||
3651 | schedule_timeout(msecs_to_jiffies(100)); | ||
3652 | } | ||
3653 | |||
3654 | if (dev_state != FIT_SR_DRIVE_INIT) | ||
3655 | pr_err("(%s): skd_stop_device state error 0x%02x\n", | ||
3656 | skd_name(skdev), dev_state); | ||
3657 | } | ||
3658 | |||
3659 | /* assume spinlock is held */ | ||
3660 | static void skd_restart_device(struct skd_device *skdev) | ||
3661 | { | ||
3662 | u32 state; | ||
3663 | |||
3664 | /* ack all ghost interrupts */ | ||
3665 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | ||
3666 | |||
3667 | state = SKD_READL(skdev, FIT_STATUS); | ||
3668 | |||
3669 | pr_debug("%s:%s:%d drive status=0x%x\n", | ||
3670 | skdev->name, __func__, __LINE__, state); | ||
3671 | |||
3672 | state &= FIT_SR_DRIVE_STATE_MASK; | ||
3673 | skdev->drive_state = state; | ||
3674 | skdev->last_mtd = 0; | ||
3675 | |||
3676 | skdev->state = SKD_DRVR_STATE_RESTARTING; | ||
3677 | skdev->timer_countdown = SKD_RESTARTING_TIMO; | ||
3678 | |||
3679 | skd_soft_reset(skdev); | ||
3680 | } | ||
3681 | |||
3682 | /* assume spinlock is held */ | ||
3683 | static int skd_quiesce_dev(struct skd_device *skdev) | ||
3684 | { | ||
3685 | int rc = 0; | ||
3686 | |||
3687 | switch (skdev->state) { | ||
3688 | case SKD_DRVR_STATE_BUSY: | ||
3689 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
3690 | pr_debug("%s:%s:%d stopping %s queue\n", | ||
3691 | skdev->name, __func__, __LINE__, skdev->name); | ||
3692 | blk_stop_queue(skdev->queue); | ||
3693 | break; | ||
3694 | case SKD_DRVR_STATE_ONLINE: | ||
3695 | case SKD_DRVR_STATE_STOPPING: | ||
3696 | case SKD_DRVR_STATE_SYNCING: | ||
3697 | case SKD_DRVR_STATE_PAUSING: | ||
3698 | case SKD_DRVR_STATE_PAUSED: | ||
3699 | case SKD_DRVR_STATE_STARTING: | ||
3700 | case SKD_DRVR_STATE_RESTARTING: | ||
3701 | case SKD_DRVR_STATE_RESUMING: | ||
3702 | default: | ||
3703 | rc = -EINVAL; | ||
3704 | pr_debug("%s:%s:%d state [%d] not implemented\n", | ||
3705 | skdev->name, __func__, __LINE__, skdev->state); | ||
3706 | } | ||
3707 | return rc; | ||
3708 | } | ||
3709 | |||
3710 | /* assume spinlock is held */ | ||
3711 | static int skd_unquiesce_dev(struct skd_device *skdev) | ||
3712 | { | ||
3713 | int prev_driver_state = skdev->state; | ||
3714 | |||
3715 | skd_log_skdev(skdev, "unquiesce"); | ||
3716 | if (skdev->state == SKD_DRVR_STATE_ONLINE) { | ||
3717 | pr_debug("%s:%s:%d **** device already ONLINE\n", | ||
3718 | skdev->name, __func__, __LINE__); | ||
3719 | return 0; | ||
3720 | } | ||
3721 | if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { | ||
3722 | /* | ||
3723 | * If there has been an state change to other than | ||
3724 | * ONLINE, we will rely on controller state change | ||
3725 | * to come back online and restart the queue. | ||
3726 | * The BUSY state means that driver is ready to | ||
3727 | * continue normal processing but waiting for controller | ||
3728 | * to become available. | ||
3729 | */ | ||
3730 | skdev->state = SKD_DRVR_STATE_BUSY; | ||
3731 | pr_debug("%s:%s:%d drive BUSY state\n", | ||
3732 | skdev->name, __func__, __LINE__); | ||
3733 | return 0; | ||
3734 | } | ||
3735 | |||
3736 | /* | ||
3737 | * Drive has just come online, driver is either in startup, | ||
3738 | * paused performing a task, or bust waiting for hardware. | ||
3739 | */ | ||
3740 | switch (skdev->state) { | ||
3741 | case SKD_DRVR_STATE_PAUSED: | ||
3742 | case SKD_DRVR_STATE_BUSY: | ||
3743 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
3744 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
3745 | case SKD_DRVR_STATE_STARTING: | ||
3746 | case SKD_DRVR_STATE_RESTARTING: | ||
3747 | case SKD_DRVR_STATE_FAULT: | ||
3748 | case SKD_DRVR_STATE_IDLE: | ||
3749 | case SKD_DRVR_STATE_LOAD: | ||
3750 | skdev->state = SKD_DRVR_STATE_ONLINE; | ||
3751 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | ||
3752 | skd_name(skdev), | ||
3753 | skd_skdev_state_to_str(prev_driver_state), | ||
3754 | prev_driver_state, skd_skdev_state_to_str(skdev->state), | ||
3755 | skdev->state); | ||
3756 | pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", | ||
3757 | skdev->name, __func__, __LINE__); | ||
3758 | pr_debug("%s:%s:%d starting %s queue\n", | ||
3759 | skdev->name, __func__, __LINE__, skdev->name); | ||
3760 | pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); | ||
3761 | blk_start_queue(skdev->queue); | ||
3762 | skdev->gendisk_on = 1; | ||
3763 | wake_up_interruptible(&skdev->waitq); | ||
3764 | break; | ||
3765 | |||
3766 | case SKD_DRVR_STATE_DISAPPEARED: | ||
3767 | default: | ||
3768 | pr_debug("%s:%s:%d **** driver state %d, not implemented \n", | ||
3769 | skdev->name, __func__, __LINE__, | ||
3770 | skdev->state); | ||
3771 | return -EBUSY; | ||
3772 | } | ||
3773 | return 0; | ||
3774 | } | ||
3775 | |||
3776 | /* | ||
3777 | ***************************************************************************** | ||
3778 | * PCIe MSI/MSI-X INTERRUPT HANDLERS | ||
3779 | ***************************************************************************** | ||
3780 | */ | ||
3781 | |||
3782 | static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) | ||
3783 | { | ||
3784 | struct skd_device *skdev = skd_host_data; | ||
3785 | unsigned long flags; | ||
3786 | |||
3787 | spin_lock_irqsave(&skdev->lock, flags); | ||
3788 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3789 | skdev->name, __func__, __LINE__, | ||
3790 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3791 | pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), | ||
3792 | irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3793 | SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); | ||
3794 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3795 | return IRQ_HANDLED; | ||
3796 | } | ||
3797 | |||
3798 | static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) | ||
3799 | { | ||
3800 | struct skd_device *skdev = skd_host_data; | ||
3801 | unsigned long flags; | ||
3802 | |||
3803 | spin_lock_irqsave(&skdev->lock, flags); | ||
3804 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3805 | skdev->name, __func__, __LINE__, | ||
3806 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3807 | SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); | ||
3808 | skd_isr_fwstate(skdev); | ||
3809 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3810 | return IRQ_HANDLED; | ||
3811 | } | ||
3812 | |||
3813 | static irqreturn_t skd_comp_q(int irq, void *skd_host_data) | ||
3814 | { | ||
3815 | struct skd_device *skdev = skd_host_data; | ||
3816 | unsigned long flags; | ||
3817 | int flush_enqueued = 0; | ||
3818 | int deferred; | ||
3819 | |||
3820 | spin_lock_irqsave(&skdev->lock, flags); | ||
3821 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3822 | skdev->name, __func__, __LINE__, | ||
3823 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3824 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); | ||
3825 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, | ||
3826 | &flush_enqueued); | ||
3827 | if (flush_enqueued) | ||
3828 | skd_request_fn(skdev->queue); | ||
3829 | |||
3830 | if (deferred) | ||
3831 | schedule_work(&skdev->completion_worker); | ||
3832 | else if (!flush_enqueued) | ||
3833 | skd_request_fn(skdev->queue); | ||
3834 | |||
3835 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3836 | |||
3837 | return IRQ_HANDLED; | ||
3838 | } | ||
3839 | |||
3840 | static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) | ||
3841 | { | ||
3842 | struct skd_device *skdev = skd_host_data; | ||
3843 | unsigned long flags; | ||
3844 | |||
3845 | spin_lock_irqsave(&skdev->lock, flags); | ||
3846 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3847 | skdev->name, __func__, __LINE__, | ||
3848 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3849 | SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); | ||
3850 | skd_isr_msg_from_dev(skdev); | ||
3851 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3852 | return IRQ_HANDLED; | ||
3853 | } | ||
3854 | |||
3855 | static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) | ||
3856 | { | ||
3857 | struct skd_device *skdev = skd_host_data; | ||
3858 | unsigned long flags; | ||
3859 | |||
3860 | spin_lock_irqsave(&skdev->lock, flags); | ||
3861 | pr_debug("%s:%s:%d MSIX = 0x%x\n", | ||
3862 | skdev->name, __func__, __LINE__, | ||
3863 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | ||
3864 | SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); | ||
3865 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
3866 | return IRQ_HANDLED; | ||
3867 | } | ||
3868 | |||
3869 | /* | ||
3870 | ***************************************************************************** | ||
3871 | * PCIe MSI/MSI-X SETUP | ||
3872 | ***************************************************************************** | ||
3873 | */ | ||
3874 | |||
3875 | struct skd_msix_entry { | ||
3876 | int have_irq; | ||
3877 | u32 vector; | ||
3878 | u32 entry; | ||
3879 | struct skd_device *rsp; | ||
3880 | char isr_name[30]; | ||
3881 | }; | ||
3882 | |||
3883 | struct skd_init_msix_entry { | ||
3884 | const char *name; | ||
3885 | irq_handler_t handler; | ||
3886 | }; | ||
3887 | |||
3888 | #define SKD_MAX_MSIX_COUNT 13 | ||
3889 | #define SKD_MIN_MSIX_COUNT 7 | ||
3890 | #define SKD_BASE_MSIX_IRQ 4 | ||
3891 | |||
3892 | static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { | ||
3893 | { "(DMA 0)", skd_reserved_isr }, | ||
3894 | { "(DMA 1)", skd_reserved_isr }, | ||
3895 | { "(DMA 2)", skd_reserved_isr }, | ||
3896 | { "(DMA 3)", skd_reserved_isr }, | ||
3897 | { "(State Change)", skd_statec_isr }, | ||
3898 | { "(COMPL_Q)", skd_comp_q }, | ||
3899 | { "(MSG)", skd_msg_isr }, | ||
3900 | { "(Reserved)", skd_reserved_isr }, | ||
3901 | { "(Reserved)", skd_reserved_isr }, | ||
3902 | { "(Queue Full 0)", skd_qfull_isr }, | ||
3903 | { "(Queue Full 1)", skd_qfull_isr }, | ||
3904 | { "(Queue Full 2)", skd_qfull_isr }, | ||
3905 | { "(Queue Full 3)", skd_qfull_isr }, | ||
3906 | }; | ||
3907 | |||
3908 | static void skd_release_msix(struct skd_device *skdev) | ||
3909 | { | ||
3910 | struct skd_msix_entry *qentry; | ||
3911 | int i; | ||
3912 | |||
3913 | if (skdev->msix_entries == NULL) | ||
3914 | return; | ||
3915 | for (i = 0; i < skdev->msix_count; i++) { | ||
3916 | qentry = &skdev->msix_entries[i]; | ||
3917 | skdev = qentry->rsp; | ||
3918 | |||
3919 | if (qentry->have_irq) | ||
3920 | devm_free_irq(&skdev->pdev->dev, | ||
3921 | qentry->vector, qentry->rsp); | ||
3922 | } | ||
3923 | pci_disable_msix(skdev->pdev); | ||
3924 | kfree(skdev->msix_entries); | ||
3925 | skdev->msix_count = 0; | ||
3926 | skdev->msix_entries = NULL; | ||
3927 | } | ||
3928 | |||
3929 | static int skd_acquire_msix(struct skd_device *skdev) | ||
3930 | { | ||
3931 | int i, rc; | ||
3932 | struct pci_dev *pdev; | ||
3933 | struct msix_entry *entries = NULL; | ||
3934 | struct skd_msix_entry *qentry; | ||
3935 | |||
3936 | pdev = skdev->pdev; | ||
3937 | skdev->msix_count = SKD_MAX_MSIX_COUNT; | ||
3938 | entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT, | ||
3939 | GFP_KERNEL); | ||
3940 | if (!entries) | ||
3941 | return -ENOMEM; | ||
3942 | |||
3943 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) | ||
3944 | entries[i].entry = i; | ||
3945 | |||
3946 | rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT); | ||
3947 | if (rc < 0) | ||
3948 | goto msix_out; | ||
3949 | if (rc) { | ||
3950 | if (rc < SKD_MIN_MSIX_COUNT) { | ||
3951 | pr_err("(%s): failed to enable MSI-X %d\n", | ||
3952 | skd_name(skdev), rc); | ||
3953 | goto msix_out; | ||
3954 | } | ||
3955 | pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n", | ||
3956 | skdev->name, __func__, __LINE__, | ||
3957 | pci_name(pdev), skdev->name, rc); | ||
3958 | |||
3959 | skdev->msix_count = rc; | ||
3960 | rc = pci_enable_msix(pdev, entries, skdev->msix_count); | ||
3961 | if (rc) { | ||
3962 | pr_err("(%s): failed to enable MSI-X " | ||
3963 | "support (%d) %d\n", | ||
3964 | skd_name(skdev), skdev->msix_count, rc); | ||
3965 | goto msix_out; | ||
3966 | } | ||
3967 | } | ||
3968 | skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * | ||
3969 | skdev->msix_count, GFP_KERNEL); | ||
3970 | if (!skdev->msix_entries) { | ||
3971 | rc = -ENOMEM; | ||
3972 | skdev->msix_count = 0; | ||
3973 | pr_err("(%s): msix table allocation error\n", | ||
3974 | skd_name(skdev)); | ||
3975 | goto msix_out; | ||
3976 | } | ||
3977 | |||
3978 | qentry = skdev->msix_entries; | ||
3979 | for (i = 0; i < skdev->msix_count; i++) { | ||
3980 | qentry->vector = entries[i].vector; | ||
3981 | qentry->entry = entries[i].entry; | ||
3982 | qentry->rsp = NULL; | ||
3983 | qentry->have_irq = 0; | ||
3984 | pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n", | ||
3985 | skdev->name, __func__, __LINE__, | ||
3986 | pci_name(pdev), skdev->name, | ||
3987 | i, qentry->vector, qentry->entry); | ||
3988 | qentry++; | ||
3989 | } | ||
3990 | |||
3991 | /* Enable MSI-X vectors for the base queue */ | ||
3992 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { | ||
3993 | qentry = &skdev->msix_entries[i]; | ||
3994 | snprintf(qentry->isr_name, sizeof(qentry->isr_name), | ||
3995 | "%s%d-msix %s", DRV_NAME, skdev->devno, | ||
3996 | msix_entries[i].name); | ||
3997 | rc = devm_request_irq(&skdev->pdev->dev, qentry->vector, | ||
3998 | msix_entries[i].handler, 0, | ||
3999 | qentry->isr_name, skdev); | ||
4000 | if (rc) { | ||
4001 | pr_err("(%s): Unable to register(%d) MSI-X " | ||
4002 | "handler %d: %s\n", | ||
4003 | skd_name(skdev), rc, i, qentry->isr_name); | ||
4004 | goto msix_out; | ||
4005 | } else { | ||
4006 | qentry->have_irq = 1; | ||
4007 | qentry->rsp = skdev; | ||
4008 | } | ||
4009 | } | ||
4010 | pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", | ||
4011 | skdev->name, __func__, __LINE__, | ||
4012 | pci_name(pdev), skdev->name, skdev->msix_count); | ||
4013 | return 0; | ||
4014 | |||
4015 | msix_out: | ||
4016 | if (entries) | ||
4017 | kfree(entries); | ||
4018 | skd_release_msix(skdev); | ||
4019 | return rc; | ||
4020 | } | ||
4021 | |||
4022 | static int skd_acquire_irq(struct skd_device *skdev) | ||
4023 | { | ||
4024 | int rc; | ||
4025 | struct pci_dev *pdev; | ||
4026 | |||
4027 | pdev = skdev->pdev; | ||
4028 | skdev->msix_count = 0; | ||
4029 | |||
4030 | RETRY_IRQ_TYPE: | ||
4031 | switch (skdev->irq_type) { | ||
4032 | case SKD_IRQ_MSIX: | ||
4033 | rc = skd_acquire_msix(skdev); | ||
4034 | if (!rc) | ||
4035 | pr_info("(%s): MSI-X %d irqs enabled\n", | ||
4036 | skd_name(skdev), skdev->msix_count); | ||
4037 | else { | ||
4038 | pr_err( | ||
4039 | "(%s): failed to enable MSI-X, re-trying with MSI %d\n", | ||
4040 | skd_name(skdev), rc); | ||
4041 | skdev->irq_type = SKD_IRQ_MSI; | ||
4042 | goto RETRY_IRQ_TYPE; | ||
4043 | } | ||
4044 | break; | ||
4045 | case SKD_IRQ_MSI: | ||
4046 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi", | ||
4047 | DRV_NAME, skdev->devno); | ||
4048 | rc = pci_enable_msi(pdev); | ||
4049 | if (!rc) { | ||
4050 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0, | ||
4051 | skdev->isr_name, skdev); | ||
4052 | if (rc) { | ||
4053 | pci_disable_msi(pdev); | ||
4054 | pr_err( | ||
4055 | "(%s): failed to allocate the MSI interrupt %d\n", | ||
4056 | skd_name(skdev), rc); | ||
4057 | goto RETRY_IRQ_LEGACY; | ||
4058 | } | ||
4059 | pr_info("(%s): MSI irq %d enabled\n", | ||
4060 | skd_name(skdev), pdev->irq); | ||
4061 | } else { | ||
4062 | RETRY_IRQ_LEGACY: | ||
4063 | pr_err( | ||
4064 | "(%s): failed to enable MSI, re-trying with LEGACY %d\n", | ||
4065 | skd_name(skdev), rc); | ||
4066 | skdev->irq_type = SKD_IRQ_LEGACY; | ||
4067 | goto RETRY_IRQ_TYPE; | ||
4068 | } | ||
4069 | break; | ||
4070 | case SKD_IRQ_LEGACY: | ||
4071 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), | ||
4072 | "%s%d-legacy", DRV_NAME, skdev->devno); | ||
4073 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, | ||
4074 | IRQF_SHARED, skdev->isr_name, skdev); | ||
4075 | if (!rc) | ||
4076 | pr_info("(%s): LEGACY irq %d enabled\n", | ||
4077 | skd_name(skdev), pdev->irq); | ||
4078 | else | ||
4079 | pr_err("(%s): request LEGACY irq error %d\n", | ||
4080 | skd_name(skdev), rc); | ||
4081 | break; | ||
4082 | default: | ||
4083 | pr_info("(%s): irq_type %d invalid, re-set to %d\n", | ||
4084 | skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT); | ||
4085 | skdev->irq_type = SKD_IRQ_LEGACY; | ||
4086 | goto RETRY_IRQ_TYPE; | ||
4087 | } | ||
4088 | return rc; | ||
4089 | } | ||
4090 | |||
4091 | static void skd_release_irq(struct skd_device *skdev) | ||
4092 | { | ||
4093 | switch (skdev->irq_type) { | ||
4094 | case SKD_IRQ_MSIX: | ||
4095 | skd_release_msix(skdev); | ||
4096 | break; | ||
4097 | case SKD_IRQ_MSI: | ||
4098 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | ||
4099 | pci_disable_msi(skdev->pdev); | ||
4100 | break; | ||
4101 | case SKD_IRQ_LEGACY: | ||
4102 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | ||
4103 | break; | ||
4104 | default: | ||
4105 | pr_err("(%s): wrong irq type %d!", | ||
4106 | skd_name(skdev), skdev->irq_type); | ||
4107 | break; | ||
4108 | } | ||
4109 | } | ||
4110 | |||
4111 | /* | ||
4112 | ***************************************************************************** | ||
4113 | * CONSTRUCT | ||
4114 | ***************************************************************************** | ||
4115 | */ | ||
4116 | |||
4117 | static int skd_cons_skcomp(struct skd_device *skdev) | ||
4118 | { | ||
4119 | int rc = 0; | ||
4120 | struct fit_completion_entry_v1 *skcomp; | ||
4121 | u32 nbytes; | ||
4122 | |||
4123 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | ||
4124 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | ||
4125 | |||
4126 | pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", | ||
4127 | skdev->name, __func__, __LINE__, | ||
4128 | nbytes, SKD_N_COMPLETION_ENTRY); | ||
4129 | |||
4130 | skcomp = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4131 | &skdev->cq_dma_address); | ||
4132 | |||
4133 | if (skcomp == NULL) { | ||
4134 | rc = -ENOMEM; | ||
4135 | goto err_out; | ||
4136 | } | ||
4137 | |||
4138 | memset(skcomp, 0, nbytes); | ||
4139 | |||
4140 | skdev->skcomp_table = skcomp; | ||
4141 | skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + | ||
4142 | sizeof(*skcomp) * | ||
4143 | SKD_N_COMPLETION_ENTRY); | ||
4144 | |||
4145 | err_out: | ||
4146 | return rc; | ||
4147 | } | ||
4148 | |||
4149 | static int skd_cons_skmsg(struct skd_device *skdev) | ||
4150 | { | ||
4151 | int rc = 0; | ||
4152 | u32 i; | ||
4153 | |||
4154 | pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", | ||
4155 | skdev->name, __func__, __LINE__, | ||
4156 | sizeof(struct skd_fitmsg_context), | ||
4157 | skdev->num_fitmsg_context, | ||
4158 | sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); | ||
4159 | |||
4160 | skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) | ||
4161 | *skdev->num_fitmsg_context, GFP_KERNEL); | ||
4162 | if (skdev->skmsg_table == NULL) { | ||
4163 | rc = -ENOMEM; | ||
4164 | goto err_out; | ||
4165 | } | ||
4166 | |||
4167 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | ||
4168 | struct skd_fitmsg_context *skmsg; | ||
4169 | |||
4170 | skmsg = &skdev->skmsg_table[i]; | ||
4171 | |||
4172 | skmsg->id = i + SKD_ID_FIT_MSG; | ||
4173 | |||
4174 | skmsg->state = SKD_MSG_STATE_IDLE; | ||
4175 | skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, | ||
4176 | SKD_N_FITMSG_BYTES + 64, | ||
4177 | &skmsg->mb_dma_address); | ||
4178 | |||
4179 | if (skmsg->msg_buf == NULL) { | ||
4180 | rc = -ENOMEM; | ||
4181 | goto err_out; | ||
4182 | } | ||
4183 | |||
4184 | skmsg->offset = (u32)((u64)skmsg->msg_buf & | ||
4185 | (~FIT_QCMD_BASE_ADDRESS_MASK)); | ||
4186 | skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; | ||
4187 | skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & | ||
4188 | FIT_QCMD_BASE_ADDRESS_MASK); | ||
4189 | skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; | ||
4190 | skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; | ||
4191 | memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); | ||
4192 | |||
4193 | skmsg->next = &skmsg[1]; | ||
4194 | } | ||
4195 | |||
4196 | /* Free list is in order starting with the 0th entry. */ | ||
4197 | skdev->skmsg_table[i - 1].next = NULL; | ||
4198 | skdev->skmsg_free_list = skdev->skmsg_table; | ||
4199 | |||
4200 | err_out: | ||
4201 | return rc; | ||
4202 | } | ||
4203 | |||
4204 | static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, | ||
4205 | u32 n_sg, | ||
4206 | dma_addr_t *ret_dma_addr) | ||
4207 | { | ||
4208 | struct fit_sg_descriptor *sg_list; | ||
4209 | u32 nbytes; | ||
4210 | |||
4211 | nbytes = sizeof(*sg_list) * n_sg; | ||
4212 | |||
4213 | sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); | ||
4214 | |||
4215 | if (sg_list != NULL) { | ||
4216 | uint64_t dma_address = *ret_dma_addr; | ||
4217 | u32 i; | ||
4218 | |||
4219 | memset(sg_list, 0, nbytes); | ||
4220 | |||
4221 | for (i = 0; i < n_sg - 1; i++) { | ||
4222 | uint64_t ndp_off; | ||
4223 | ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); | ||
4224 | |||
4225 | sg_list[i].next_desc_ptr = dma_address + ndp_off; | ||
4226 | } | ||
4227 | sg_list[i].next_desc_ptr = 0LL; | ||
4228 | } | ||
4229 | |||
4230 | return sg_list; | ||
4231 | } | ||
4232 | |||
4233 | static int skd_cons_skreq(struct skd_device *skdev) | ||
4234 | { | ||
4235 | int rc = 0; | ||
4236 | u32 i; | ||
4237 | |||
4238 | pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", | ||
4239 | skdev->name, __func__, __LINE__, | ||
4240 | sizeof(struct skd_request_context), | ||
4241 | skdev->num_req_context, | ||
4242 | sizeof(struct skd_request_context) * skdev->num_req_context); | ||
4243 | |||
4244 | skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) | ||
4245 | * skdev->num_req_context, GFP_KERNEL); | ||
4246 | if (skdev->skreq_table == NULL) { | ||
4247 | rc = -ENOMEM; | ||
4248 | goto err_out; | ||
4249 | } | ||
4250 | |||
4251 | pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", | ||
4252 | skdev->name, __func__, __LINE__, | ||
4253 | skdev->sgs_per_request, sizeof(struct scatterlist), | ||
4254 | skdev->sgs_per_request * sizeof(struct scatterlist)); | ||
4255 | |||
4256 | for (i = 0; i < skdev->num_req_context; i++) { | ||
4257 | struct skd_request_context *skreq; | ||
4258 | |||
4259 | skreq = &skdev->skreq_table[i]; | ||
4260 | |||
4261 | skreq->id = i + SKD_ID_RW_REQUEST; | ||
4262 | skreq->state = SKD_REQ_STATE_IDLE; | ||
4263 | |||
4264 | skreq->sg = kzalloc(sizeof(struct scatterlist) * | ||
4265 | skdev->sgs_per_request, GFP_KERNEL); | ||
4266 | if (skreq->sg == NULL) { | ||
4267 | rc = -ENOMEM; | ||
4268 | goto err_out; | ||
4269 | } | ||
4270 | sg_init_table(skreq->sg, skdev->sgs_per_request); | ||
4271 | |||
4272 | skreq->sksg_list = skd_cons_sg_list(skdev, | ||
4273 | skdev->sgs_per_request, | ||
4274 | &skreq->sksg_dma_address); | ||
4275 | |||
4276 | if (skreq->sksg_list == NULL) { | ||
4277 | rc = -ENOMEM; | ||
4278 | goto err_out; | ||
4279 | } | ||
4280 | |||
4281 | skreq->next = &skreq[1]; | ||
4282 | } | ||
4283 | |||
4284 | /* Free list is in order starting with the 0th entry. */ | ||
4285 | skdev->skreq_table[i - 1].next = NULL; | ||
4286 | skdev->skreq_free_list = skdev->skreq_table; | ||
4287 | |||
4288 | err_out: | ||
4289 | return rc; | ||
4290 | } | ||
4291 | |||
4292 | static int skd_cons_skspcl(struct skd_device *skdev) | ||
4293 | { | ||
4294 | int rc = 0; | ||
4295 | u32 i, nbytes; | ||
4296 | |||
4297 | pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", | ||
4298 | skdev->name, __func__, __LINE__, | ||
4299 | sizeof(struct skd_special_context), | ||
4300 | skdev->n_special, | ||
4301 | sizeof(struct skd_special_context) * skdev->n_special); | ||
4302 | |||
4303 | skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) | ||
4304 | * skdev->n_special, GFP_KERNEL); | ||
4305 | if (skdev->skspcl_table == NULL) { | ||
4306 | rc = -ENOMEM; | ||
4307 | goto err_out; | ||
4308 | } | ||
4309 | |||
4310 | for (i = 0; i < skdev->n_special; i++) { | ||
4311 | struct skd_special_context *skspcl; | ||
4312 | |||
4313 | skspcl = &skdev->skspcl_table[i]; | ||
4314 | |||
4315 | skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; | ||
4316 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
4317 | |||
4318 | skspcl->req.next = &skspcl[1].req; | ||
4319 | |||
4320 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4321 | |||
4322 | skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4323 | &skspcl->mb_dma_address); | ||
4324 | if (skspcl->msg_buf == NULL) { | ||
4325 | rc = -ENOMEM; | ||
4326 | goto err_out; | ||
4327 | } | ||
4328 | |||
4329 | memset(skspcl->msg_buf, 0, nbytes); | ||
4330 | |||
4331 | skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * | ||
4332 | SKD_N_SG_PER_SPECIAL, GFP_KERNEL); | ||
4333 | if (skspcl->req.sg == NULL) { | ||
4334 | rc = -ENOMEM; | ||
4335 | goto err_out; | ||
4336 | } | ||
4337 | |||
4338 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, | ||
4339 | SKD_N_SG_PER_SPECIAL, | ||
4340 | &skspcl->req. | ||
4341 | sksg_dma_address); | ||
4342 | if (skspcl->req.sksg_list == NULL) { | ||
4343 | rc = -ENOMEM; | ||
4344 | goto err_out; | ||
4345 | } | ||
4346 | } | ||
4347 | |||
4348 | /* Free list is in order starting with the 0th entry. */ | ||
4349 | skdev->skspcl_table[i - 1].req.next = NULL; | ||
4350 | skdev->skspcl_free_list = skdev->skspcl_table; | ||
4351 | |||
4352 | return rc; | ||
4353 | |||
4354 | err_out: | ||
4355 | return rc; | ||
4356 | } | ||
4357 | |||
4358 | static int skd_cons_sksb(struct skd_device *skdev) | ||
4359 | { | ||
4360 | int rc = 0; | ||
4361 | struct skd_special_context *skspcl; | ||
4362 | u32 nbytes; | ||
4363 | |||
4364 | skspcl = &skdev->internal_skspcl; | ||
4365 | |||
4366 | skspcl->req.id = 0 + SKD_ID_INTERNAL; | ||
4367 | skspcl->req.state = SKD_REQ_STATE_IDLE; | ||
4368 | |||
4369 | nbytes = SKD_N_INTERNAL_BYTES; | ||
4370 | |||
4371 | skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4372 | &skspcl->db_dma_address); | ||
4373 | if (skspcl->data_buf == NULL) { | ||
4374 | rc = -ENOMEM; | ||
4375 | goto err_out; | ||
4376 | } | ||
4377 | |||
4378 | memset(skspcl->data_buf, 0, nbytes); | ||
4379 | |||
4380 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4381 | skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, | ||
4382 | &skspcl->mb_dma_address); | ||
4383 | if (skspcl->msg_buf == NULL) { | ||
4384 | rc = -ENOMEM; | ||
4385 | goto err_out; | ||
4386 | } | ||
4387 | |||
4388 | memset(skspcl->msg_buf, 0, nbytes); | ||
4389 | |||
4390 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, | ||
4391 | &skspcl->req.sksg_dma_address); | ||
4392 | if (skspcl->req.sksg_list == NULL) { | ||
4393 | rc = -ENOMEM; | ||
4394 | goto err_out; | ||
4395 | } | ||
4396 | |||
4397 | if (!skd_format_internal_skspcl(skdev)) { | ||
4398 | rc = -EINVAL; | ||
4399 | goto err_out; | ||
4400 | } | ||
4401 | |||
4402 | err_out: | ||
4403 | return rc; | ||
4404 | } | ||
4405 | |||
4406 | static int skd_cons_disk(struct skd_device *skdev) | ||
4407 | { | ||
4408 | int rc = 0; | ||
4409 | struct gendisk *disk; | ||
4410 | struct request_queue *q; | ||
4411 | unsigned long flags; | ||
4412 | |||
4413 | disk = alloc_disk(SKD_MINORS_PER_DEVICE); | ||
4414 | if (!disk) { | ||
4415 | rc = -ENOMEM; | ||
4416 | goto err_out; | ||
4417 | } | ||
4418 | |||
4419 | skdev->disk = disk; | ||
4420 | sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); | ||
4421 | |||
4422 | disk->major = skdev->major; | ||
4423 | disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; | ||
4424 | disk->fops = &skd_blockdev_ops; | ||
4425 | disk->private_data = skdev; | ||
4426 | |||
4427 | q = blk_init_queue(skd_request_fn, &skdev->lock); | ||
4428 | if (!q) { | ||
4429 | rc = -ENOMEM; | ||
4430 | goto err_out; | ||
4431 | } | ||
4432 | |||
4433 | skdev->queue = q; | ||
4434 | disk->queue = q; | ||
4435 | q->queuedata = skdev; | ||
4436 | |||
4437 | blk_queue_flush(q, REQ_FLUSH | REQ_FUA); | ||
4438 | blk_queue_max_segments(q, skdev->sgs_per_request); | ||
4439 | blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); | ||
4440 | |||
4441 | /* set sysfs ptimal_io_size to 8K */ | ||
4442 | blk_queue_io_opt(q, 8192); | ||
4443 | |||
4444 | /* DISCARD Flag initialization. */ | ||
4445 | q->limits.discard_granularity = 8192; | ||
4446 | q->limits.discard_alignment = 0; | ||
4447 | q->limits.max_discard_sectors = UINT_MAX >> 9; | ||
4448 | q->limits.discard_zeroes_data = 1; | ||
4449 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
4450 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | ||
4451 | |||
4452 | spin_lock_irqsave(&skdev->lock, flags); | ||
4453 | pr_debug("%s:%s:%d stopping %s queue\n", | ||
4454 | skdev->name, __func__, __LINE__, skdev->name); | ||
4455 | blk_stop_queue(skdev->queue); | ||
4456 | spin_unlock_irqrestore(&skdev->lock, flags); | ||
4457 | |||
4458 | err_out: | ||
4459 | return rc; | ||
4460 | } | ||
4461 | |||
4462 | #define SKD_N_DEV_TABLE 16u | ||
4463 | static u32 skd_next_devno; | ||
4464 | |||
4465 | static struct skd_device *skd_construct(struct pci_dev *pdev) | ||
4466 | { | ||
4467 | struct skd_device *skdev; | ||
4468 | int blk_major = skd_major; | ||
4469 | int rc; | ||
4470 | |||
4471 | skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); | ||
4472 | |||
4473 | if (!skdev) { | ||
4474 | pr_err(PFX "(%s): memory alloc failure\n", | ||
4475 | pci_name(pdev)); | ||
4476 | return NULL; | ||
4477 | } | ||
4478 | |||
4479 | skdev->state = SKD_DRVR_STATE_LOAD; | ||
4480 | skdev->pdev = pdev; | ||
4481 | skdev->devno = skd_next_devno++; | ||
4482 | skdev->major = blk_major; | ||
4483 | skdev->irq_type = skd_isr_type; | ||
4484 | sprintf(skdev->name, DRV_NAME "%d", skdev->devno); | ||
4485 | skdev->dev_max_queue_depth = 0; | ||
4486 | |||
4487 | skdev->num_req_context = skd_max_queue_depth; | ||
4488 | skdev->num_fitmsg_context = skd_max_queue_depth; | ||
4489 | skdev->n_special = skd_max_pass_thru; | ||
4490 | skdev->cur_max_queue_depth = 1; | ||
4491 | skdev->queue_low_water_mark = 1; | ||
4492 | skdev->proto_ver = 99; | ||
4493 | skdev->sgs_per_request = skd_sgs_per_request; | ||
4494 | skdev->dbg_level = skd_dbg_level; | ||
4495 | |||
4496 | atomic_set(&skdev->device_count, 0); | ||
4497 | |||
4498 | spin_lock_init(&skdev->lock); | ||
4499 | |||
4500 | INIT_WORK(&skdev->completion_worker, skd_completion_worker); | ||
4501 | |||
4502 | pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); | ||
4503 | rc = skd_cons_skcomp(skdev); | ||
4504 | if (rc < 0) | ||
4505 | goto err_out; | ||
4506 | |||
4507 | pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); | ||
4508 | rc = skd_cons_skmsg(skdev); | ||
4509 | if (rc < 0) | ||
4510 | goto err_out; | ||
4511 | |||
4512 | pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); | ||
4513 | rc = skd_cons_skreq(skdev); | ||
4514 | if (rc < 0) | ||
4515 | goto err_out; | ||
4516 | |||
4517 | pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); | ||
4518 | rc = skd_cons_skspcl(skdev); | ||
4519 | if (rc < 0) | ||
4520 | goto err_out; | ||
4521 | |||
4522 | pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); | ||
4523 | rc = skd_cons_sksb(skdev); | ||
4524 | if (rc < 0) | ||
4525 | goto err_out; | ||
4526 | |||
4527 | pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); | ||
4528 | rc = skd_cons_disk(skdev); | ||
4529 | if (rc < 0) | ||
4530 | goto err_out; | ||
4531 | |||
4532 | pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); | ||
4533 | return skdev; | ||
4534 | |||
4535 | err_out: | ||
4536 | pr_debug("%s:%s:%d construct failed\n", | ||
4537 | skdev->name, __func__, __LINE__); | ||
4538 | skd_destruct(skdev); | ||
4539 | return NULL; | ||
4540 | } | ||
4541 | |||
4542 | /* | ||
4543 | ***************************************************************************** | ||
4544 | * DESTRUCT (FREE) | ||
4545 | ***************************************************************************** | ||
4546 | */ | ||
4547 | |||
4548 | static void skd_free_skcomp(struct skd_device *skdev) | ||
4549 | { | ||
4550 | if (skdev->skcomp_table != NULL) { | ||
4551 | u32 nbytes; | ||
4552 | |||
4553 | nbytes = sizeof(skdev->skcomp_table[0]) * | ||
4554 | SKD_N_COMPLETION_ENTRY; | ||
4555 | pci_free_consistent(skdev->pdev, nbytes, | ||
4556 | skdev->skcomp_table, skdev->cq_dma_address); | ||
4557 | } | ||
4558 | |||
4559 | skdev->skcomp_table = NULL; | ||
4560 | skdev->cq_dma_address = 0; | ||
4561 | } | ||
4562 | |||
4563 | static void skd_free_skmsg(struct skd_device *skdev) | ||
4564 | { | ||
4565 | u32 i; | ||
4566 | |||
4567 | if (skdev->skmsg_table == NULL) | ||
4568 | return; | ||
4569 | |||
4570 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | ||
4571 | struct skd_fitmsg_context *skmsg; | ||
4572 | |||
4573 | skmsg = &skdev->skmsg_table[i]; | ||
4574 | |||
4575 | if (skmsg->msg_buf != NULL) { | ||
4576 | skmsg->msg_buf += skmsg->offset; | ||
4577 | skmsg->mb_dma_address += skmsg->offset; | ||
4578 | pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, | ||
4579 | skmsg->msg_buf, | ||
4580 | skmsg->mb_dma_address); | ||
4581 | } | ||
4582 | skmsg->msg_buf = NULL; | ||
4583 | skmsg->mb_dma_address = 0; | ||
4584 | } | ||
4585 | |||
4586 | kfree(skdev->skmsg_table); | ||
4587 | skdev->skmsg_table = NULL; | ||
4588 | } | ||
4589 | |||
4590 | static void skd_free_sg_list(struct skd_device *skdev, | ||
4591 | struct fit_sg_descriptor *sg_list, | ||
4592 | u32 n_sg, dma_addr_t dma_addr) | ||
4593 | { | ||
4594 | if (sg_list != NULL) { | ||
4595 | u32 nbytes; | ||
4596 | |||
4597 | nbytes = sizeof(*sg_list) * n_sg; | ||
4598 | |||
4599 | pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); | ||
4600 | } | ||
4601 | } | ||
4602 | |||
4603 | static void skd_free_skreq(struct skd_device *skdev) | ||
4604 | { | ||
4605 | u32 i; | ||
4606 | |||
4607 | if (skdev->skreq_table == NULL) | ||
4608 | return; | ||
4609 | |||
4610 | for (i = 0; i < skdev->num_req_context; i++) { | ||
4611 | struct skd_request_context *skreq; | ||
4612 | |||
4613 | skreq = &skdev->skreq_table[i]; | ||
4614 | |||
4615 | skd_free_sg_list(skdev, skreq->sksg_list, | ||
4616 | skdev->sgs_per_request, | ||
4617 | skreq->sksg_dma_address); | ||
4618 | |||
4619 | skreq->sksg_list = NULL; | ||
4620 | skreq->sksg_dma_address = 0; | ||
4621 | |||
4622 | kfree(skreq->sg); | ||
4623 | } | ||
4624 | |||
4625 | kfree(skdev->skreq_table); | ||
4626 | skdev->skreq_table = NULL; | ||
4627 | } | ||
4628 | |||
4629 | static void skd_free_skspcl(struct skd_device *skdev) | ||
4630 | { | ||
4631 | u32 i; | ||
4632 | u32 nbytes; | ||
4633 | |||
4634 | if (skdev->skspcl_table == NULL) | ||
4635 | return; | ||
4636 | |||
4637 | for (i = 0; i < skdev->n_special; i++) { | ||
4638 | struct skd_special_context *skspcl; | ||
4639 | |||
4640 | skspcl = &skdev->skspcl_table[i]; | ||
4641 | |||
4642 | if (skspcl->msg_buf != NULL) { | ||
4643 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4644 | pci_free_consistent(skdev->pdev, nbytes, | ||
4645 | skspcl->msg_buf, | ||
4646 | skspcl->mb_dma_address); | ||
4647 | } | ||
4648 | |||
4649 | skspcl->msg_buf = NULL; | ||
4650 | skspcl->mb_dma_address = 0; | ||
4651 | |||
4652 | skd_free_sg_list(skdev, skspcl->req.sksg_list, | ||
4653 | SKD_N_SG_PER_SPECIAL, | ||
4654 | skspcl->req.sksg_dma_address); | ||
4655 | |||
4656 | skspcl->req.sksg_list = NULL; | ||
4657 | skspcl->req.sksg_dma_address = 0; | ||
4658 | |||
4659 | kfree(skspcl->req.sg); | ||
4660 | } | ||
4661 | |||
4662 | kfree(skdev->skspcl_table); | ||
4663 | skdev->skspcl_table = NULL; | ||
4664 | } | ||
4665 | |||
4666 | static void skd_free_sksb(struct skd_device *skdev) | ||
4667 | { | ||
4668 | struct skd_special_context *skspcl; | ||
4669 | u32 nbytes; | ||
4670 | |||
4671 | skspcl = &skdev->internal_skspcl; | ||
4672 | |||
4673 | if (skspcl->data_buf != NULL) { | ||
4674 | nbytes = SKD_N_INTERNAL_BYTES; | ||
4675 | |||
4676 | pci_free_consistent(skdev->pdev, nbytes, | ||
4677 | skspcl->data_buf, skspcl->db_dma_address); | ||
4678 | } | ||
4679 | |||
4680 | skspcl->data_buf = NULL; | ||
4681 | skspcl->db_dma_address = 0; | ||
4682 | |||
4683 | if (skspcl->msg_buf != NULL) { | ||
4684 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | ||
4685 | pci_free_consistent(skdev->pdev, nbytes, | ||
4686 | skspcl->msg_buf, skspcl->mb_dma_address); | ||
4687 | } | ||
4688 | |||
4689 | skspcl->msg_buf = NULL; | ||
4690 | skspcl->mb_dma_address = 0; | ||
4691 | |||
4692 | skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, | ||
4693 | skspcl->req.sksg_dma_address); | ||
4694 | |||
4695 | skspcl->req.sksg_list = NULL; | ||
4696 | skspcl->req.sksg_dma_address = 0; | ||
4697 | } | ||
4698 | |||
4699 | static void skd_free_disk(struct skd_device *skdev) | ||
4700 | { | ||
4701 | struct gendisk *disk = skdev->disk; | ||
4702 | |||
4703 | if (disk != NULL) { | ||
4704 | struct request_queue *q = disk->queue; | ||
4705 | |||
4706 | if (disk->flags & GENHD_FL_UP) | ||
4707 | del_gendisk(disk); | ||
4708 | if (q) | ||
4709 | blk_cleanup_queue(q); | ||
4710 | put_disk(disk); | ||
4711 | } | ||
4712 | skdev->disk = NULL; | ||
4713 | } | ||
4714 | |||
4715 | static void skd_destruct(struct skd_device *skdev) | ||
4716 | { | ||
4717 | if (skdev == NULL) | ||
4718 | return; | ||
4719 | |||
4720 | |||
4721 | pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); | ||
4722 | skd_free_disk(skdev); | ||
4723 | |||
4724 | pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); | ||
4725 | skd_free_sksb(skdev); | ||
4726 | |||
4727 | pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); | ||
4728 | skd_free_skspcl(skdev); | ||
4729 | |||
4730 | pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); | ||
4731 | skd_free_skreq(skdev); | ||
4732 | |||
4733 | pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); | ||
4734 | skd_free_skmsg(skdev); | ||
4735 | |||
4736 | pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); | ||
4737 | skd_free_skcomp(skdev); | ||
4738 | |||
4739 | pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); | ||
4740 | kfree(skdev); | ||
4741 | } | ||
4742 | |||
4743 | /* | ||
4744 | ***************************************************************************** | ||
4745 | * BLOCK DEVICE (BDEV) GLUE | ||
4746 | ***************************************************************************** | ||
4747 | */ | ||
4748 | |||
4749 | static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
4750 | { | ||
4751 | struct skd_device *skdev; | ||
4752 | u64 capacity; | ||
4753 | |||
4754 | skdev = bdev->bd_disk->private_data; | ||
4755 | |||
4756 | pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", | ||
4757 | skdev->name, __func__, __LINE__, | ||
4758 | bdev->bd_disk->disk_name, current->comm); | ||
4759 | |||
4760 | if (skdev->read_cap_is_valid) { | ||
4761 | capacity = get_capacity(skdev->disk); | ||
4762 | geo->heads = 64; | ||
4763 | geo->sectors = 255; | ||
4764 | geo->cylinders = (capacity) / (255 * 64); | ||
4765 | |||
4766 | return 0; | ||
4767 | } | ||
4768 | return -EIO; | ||
4769 | } | ||
4770 | |||
4771 | static int skd_bdev_attach(struct skd_device *skdev) | ||
4772 | { | ||
4773 | pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); | ||
4774 | add_disk(skdev->disk); | ||
4775 | return 0; | ||
4776 | } | ||
4777 | |||
4778 | static const struct block_device_operations skd_blockdev_ops = { | ||
4779 | .owner = THIS_MODULE, | ||
4780 | .ioctl = skd_bdev_ioctl, | ||
4781 | .getgeo = skd_bdev_getgeo, | ||
4782 | }; | ||
4783 | |||
4784 | |||
4785 | /* | ||
4786 | ***************************************************************************** | ||
4787 | * PCIe DRIVER GLUE | ||
4788 | ***************************************************************************** | ||
4789 | */ | ||
4790 | |||
4791 | static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = { | ||
4792 | { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, | ||
4793 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | ||
4794 | { 0 } /* terminate list */ | ||
4795 | }; | ||
4796 | |||
4797 | MODULE_DEVICE_TABLE(pci, skd_pci_tbl); | ||
4798 | |||
4799 | static char *skd_pci_info(struct skd_device *skdev, char *str) | ||
4800 | { | ||
4801 | int pcie_reg; | ||
4802 | |||
4803 | strcpy(str, "PCIe ("); | ||
4804 | pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); | ||
4805 | |||
4806 | if (pcie_reg) { | ||
4807 | |||
4808 | char lwstr[6]; | ||
4809 | uint16_t pcie_lstat, lspeed, lwidth; | ||
4810 | |||
4811 | pcie_reg += 0x12; | ||
4812 | pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); | ||
4813 | lspeed = pcie_lstat & (0xF); | ||
4814 | lwidth = (pcie_lstat & 0x3F0) >> 4; | ||
4815 | |||
4816 | if (lspeed == 1) | ||
4817 | strcat(str, "2.5GT/s "); | ||
4818 | else if (lspeed == 2) | ||
4819 | strcat(str, "5.0GT/s "); | ||
4820 | else | ||
4821 | strcat(str, "<unknown> "); | ||
4822 | snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); | ||
4823 | strcat(str, lwstr); | ||
4824 | } | ||
4825 | return str; | ||
4826 | } | ||
4827 | |||
4828 | static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
4829 | { | ||
4830 | int i; | ||
4831 | int rc = 0; | ||
4832 | char pci_str[32]; | ||
4833 | struct skd_device *skdev; | ||
4834 | |||
4835 | pr_info("STEC s1120 Driver(%s) version %s-b%s\n", | ||
4836 | DRV_NAME, DRV_VERSION, DRV_BUILD_ID); | ||
4837 | pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", | ||
4838 | pci_name(pdev), pdev->vendor, pdev->device); | ||
4839 | |||
4840 | rc = pci_enable_device(pdev); | ||
4841 | if (rc) | ||
4842 | return rc; | ||
4843 | rc = pci_request_regions(pdev, DRV_NAME); | ||
4844 | if (rc) | ||
4845 | goto err_out; | ||
4846 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
4847 | if (!rc) { | ||
4848 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
4849 | |||
4850 | pr_err("(%s): consistent DMA mask error %d\n", | ||
4851 | pci_name(pdev), rc); | ||
4852 | } | ||
4853 | } else { | ||
4854 | (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); | ||
4855 | if (rc) { | ||
4856 | |||
4857 | pr_err("(%s): DMA mask error %d\n", | ||
4858 | pci_name(pdev), rc); | ||
4859 | goto err_out_regions; | ||
4860 | } | ||
4861 | } | ||
4862 | |||
4863 | if (!skd_major) { | ||
4864 | rc = register_blkdev(0, DRV_NAME); | ||
4865 | if (rc < 0) | ||
4866 | goto err_out_regions; | ||
4867 | BUG_ON(!rc); | ||
4868 | skd_major = rc; | ||
4869 | } | ||
4870 | |||
4871 | skdev = skd_construct(pdev); | ||
4872 | if (skdev == NULL) { | ||
4873 | rc = -ENOMEM; | ||
4874 | goto err_out_regions; | ||
4875 | } | ||
4876 | |||
4877 | skd_pci_info(skdev, pci_str); | ||
4878 | pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); | ||
4879 | |||
4880 | pci_set_master(pdev); | ||
4881 | rc = pci_enable_pcie_error_reporting(pdev); | ||
4882 | if (rc) { | ||
4883 | pr_err( | ||
4884 | "(%s): bad enable of PCIe error reporting rc=%d\n", | ||
4885 | skd_name(skdev), rc); | ||
4886 | skdev->pcie_error_reporting_is_enabled = 0; | ||
4887 | } else | ||
4888 | skdev->pcie_error_reporting_is_enabled = 1; | ||
4889 | |||
4890 | |||
4891 | pci_set_drvdata(pdev, skdev); | ||
4892 | |||
4893 | skdev->disk->driverfs_dev = &pdev->dev; | ||
4894 | |||
4895 | for (i = 0; i < SKD_MAX_BARS; i++) { | ||
4896 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | ||
4897 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | ||
4898 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | ||
4899 | skdev->mem_size[i]); | ||
4900 | if (!skdev->mem_map[i]) { | ||
4901 | pr_err("(%s): Unable to map adapter memory!\n", | ||
4902 | skd_name(skdev)); | ||
4903 | rc = -ENODEV; | ||
4904 | goto err_out_iounmap; | ||
4905 | } | ||
4906 | pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", | ||
4907 | skdev->name, __func__, __LINE__, | ||
4908 | skdev->mem_map[i], | ||
4909 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | ||
4910 | } | ||
4911 | |||
4912 | rc = skd_acquire_irq(skdev); | ||
4913 | if (rc) { | ||
4914 | pr_err("(%s): interrupt resource error %d\n", | ||
4915 | skd_name(skdev), rc); | ||
4916 | goto err_out_iounmap; | ||
4917 | } | ||
4918 | |||
4919 | rc = skd_start_timer(skdev); | ||
4920 | if (rc) | ||
4921 | goto err_out_timer; | ||
4922 | |||
4923 | init_waitqueue_head(&skdev->waitq); | ||
4924 | |||
4925 | skd_start_device(skdev); | ||
4926 | |||
4927 | rc = wait_event_interruptible_timeout(skdev->waitq, | ||
4928 | (skdev->gendisk_on), | ||
4929 | (SKD_START_WAIT_SECONDS * HZ)); | ||
4930 | if (skdev->gendisk_on > 0) { | ||
4931 | /* device came on-line after reset */ | ||
4932 | skd_bdev_attach(skdev); | ||
4933 | rc = 0; | ||
4934 | } else { | ||
4935 | /* we timed out, something is wrong with the device, | ||
4936 | don't add the disk structure */ | ||
4937 | pr_err( | ||
4938 | "(%s): error: waiting for s1120 timed out %d!\n", | ||
4939 | skd_name(skdev), rc); | ||
4940 | /* in case of no error; we timeout with ENXIO */ | ||
4941 | if (!rc) | ||
4942 | rc = -ENXIO; | ||
4943 | goto err_out_timer; | ||
4944 | } | ||
4945 | |||
4946 | |||
4947 | #ifdef SKD_VMK_POLL_HANDLER | ||
4948 | if (skdev->irq_type == SKD_IRQ_MSIX) { | ||
4949 | /* MSIX completion handler is being used for coredump */ | ||
4950 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | ||
4951 | skdev->msix_entries[5].vector, | ||
4952 | skd_comp_q, skdev); | ||
4953 | } else { | ||
4954 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | ||
4955 | skdev->pdev->irq, skd_isr, | ||
4956 | skdev); | ||
4957 | } | ||
4958 | #endif /* SKD_VMK_POLL_HANDLER */ | ||
4959 | |||
4960 | return rc; | ||
4961 | |||
4962 | err_out_timer: | ||
4963 | skd_stop_device(skdev); | ||
4964 | skd_release_irq(skdev); | ||
4965 | |||
4966 | err_out_iounmap: | ||
4967 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
4968 | if (skdev->mem_map[i]) | ||
4969 | iounmap(skdev->mem_map[i]); | ||
4970 | |||
4971 | if (skdev->pcie_error_reporting_is_enabled) | ||
4972 | pci_disable_pcie_error_reporting(pdev); | ||
4973 | |||
4974 | skd_destruct(skdev); | ||
4975 | |||
4976 | err_out_regions: | ||
4977 | pci_release_regions(pdev); | ||
4978 | |||
4979 | err_out: | ||
4980 | pci_disable_device(pdev); | ||
4981 | pci_set_drvdata(pdev, NULL); | ||
4982 | return rc; | ||
4983 | } | ||
4984 | |||
4985 | static void skd_pci_remove(struct pci_dev *pdev) | ||
4986 | { | ||
4987 | int i; | ||
4988 | struct skd_device *skdev; | ||
4989 | |||
4990 | skdev = pci_get_drvdata(pdev); | ||
4991 | if (!skdev) { | ||
4992 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
4993 | return; | ||
4994 | } | ||
4995 | skd_stop_device(skdev); | ||
4996 | skd_release_irq(skdev); | ||
4997 | |||
4998 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
4999 | if (skdev->mem_map[i]) | ||
5000 | iounmap((u32 *)skdev->mem_map[i]); | ||
5001 | |||
5002 | if (skdev->pcie_error_reporting_is_enabled) | ||
5003 | pci_disable_pcie_error_reporting(pdev); | ||
5004 | |||
5005 | skd_destruct(skdev); | ||
5006 | |||
5007 | pci_release_regions(pdev); | ||
5008 | pci_disable_device(pdev); | ||
5009 | pci_set_drvdata(pdev, NULL); | ||
5010 | |||
5011 | return; | ||
5012 | } | ||
5013 | |||
5014 | static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
5015 | { | ||
5016 | int i; | ||
5017 | struct skd_device *skdev; | ||
5018 | |||
5019 | skdev = pci_get_drvdata(pdev); | ||
5020 | if (!skdev) { | ||
5021 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
5022 | return -EIO; | ||
5023 | } | ||
5024 | |||
5025 | skd_stop_device(skdev); | ||
5026 | |||
5027 | skd_release_irq(skdev); | ||
5028 | |||
5029 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
5030 | if (skdev->mem_map[i]) | ||
5031 | iounmap((u32 *)skdev->mem_map[i]); | ||
5032 | |||
5033 | if (skdev->pcie_error_reporting_is_enabled) | ||
5034 | pci_disable_pcie_error_reporting(pdev); | ||
5035 | |||
5036 | pci_release_regions(pdev); | ||
5037 | pci_save_state(pdev); | ||
5038 | pci_disable_device(pdev); | ||
5039 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
5040 | return 0; | ||
5041 | } | ||
5042 | |||
5043 | static int skd_pci_resume(struct pci_dev *pdev) | ||
5044 | { | ||
5045 | int i; | ||
5046 | int rc = 0; | ||
5047 | struct skd_device *skdev; | ||
5048 | |||
5049 | skdev = pci_get_drvdata(pdev); | ||
5050 | if (!skdev) { | ||
5051 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
5052 | return -1; | ||
5053 | } | ||
5054 | |||
5055 | pci_set_power_state(pdev, PCI_D0); | ||
5056 | pci_enable_wake(pdev, PCI_D0, 0); | ||
5057 | pci_restore_state(pdev); | ||
5058 | |||
5059 | rc = pci_enable_device(pdev); | ||
5060 | if (rc) | ||
5061 | return rc; | ||
5062 | rc = pci_request_regions(pdev, DRV_NAME); | ||
5063 | if (rc) | ||
5064 | goto err_out; | ||
5065 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
5066 | if (!rc) { | ||
5067 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
5068 | |||
5069 | pr_err("(%s): consistent DMA mask error %d\n", | ||
5070 | pci_name(pdev), rc); | ||
5071 | } | ||
5072 | } else { | ||
5073 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
5074 | if (rc) { | ||
5075 | |||
5076 | pr_err("(%s): DMA mask error %d\n", | ||
5077 | pci_name(pdev), rc); | ||
5078 | goto err_out_regions; | ||
5079 | } | ||
5080 | } | ||
5081 | |||
5082 | pci_set_master(pdev); | ||
5083 | rc = pci_enable_pcie_error_reporting(pdev); | ||
5084 | if (rc) { | ||
5085 | pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", | ||
5086 | skdev->name, rc); | ||
5087 | skdev->pcie_error_reporting_is_enabled = 0; | ||
5088 | } else | ||
5089 | skdev->pcie_error_reporting_is_enabled = 1; | ||
5090 | |||
5091 | for (i = 0; i < SKD_MAX_BARS; i++) { | ||
5092 | |||
5093 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | ||
5094 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | ||
5095 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | ||
5096 | skdev->mem_size[i]); | ||
5097 | if (!skdev->mem_map[i]) { | ||
5098 | pr_err("(%s): Unable to map adapter memory!\n", | ||
5099 | skd_name(skdev)); | ||
5100 | rc = -ENODEV; | ||
5101 | goto err_out_iounmap; | ||
5102 | } | ||
5103 | pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", | ||
5104 | skdev->name, __func__, __LINE__, | ||
5105 | skdev->mem_map[i], | ||
5106 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | ||
5107 | } | ||
5108 | rc = skd_acquire_irq(skdev); | ||
5109 | if (rc) { | ||
5110 | |||
5111 | pr_err("(%s): interrupt resource error %d\n", | ||
5112 | pci_name(pdev), rc); | ||
5113 | goto err_out_iounmap; | ||
5114 | } | ||
5115 | |||
5116 | rc = skd_start_timer(skdev); | ||
5117 | if (rc) | ||
5118 | goto err_out_timer; | ||
5119 | |||
5120 | init_waitqueue_head(&skdev->waitq); | ||
5121 | |||
5122 | skd_start_device(skdev); | ||
5123 | |||
5124 | return rc; | ||
5125 | |||
5126 | err_out_timer: | ||
5127 | skd_stop_device(skdev); | ||
5128 | skd_release_irq(skdev); | ||
5129 | |||
5130 | err_out_iounmap: | ||
5131 | for (i = 0; i < SKD_MAX_BARS; i++) | ||
5132 | if (skdev->mem_map[i]) | ||
5133 | iounmap(skdev->mem_map[i]); | ||
5134 | |||
5135 | if (skdev->pcie_error_reporting_is_enabled) | ||
5136 | pci_disable_pcie_error_reporting(pdev); | ||
5137 | |||
5138 | err_out_regions: | ||
5139 | pci_release_regions(pdev); | ||
5140 | |||
5141 | err_out: | ||
5142 | pci_disable_device(pdev); | ||
5143 | return rc; | ||
5144 | } | ||
5145 | |||
5146 | static void skd_pci_shutdown(struct pci_dev *pdev) | ||
5147 | { | ||
5148 | struct skd_device *skdev; | ||
5149 | |||
5150 | pr_err("skd_pci_shutdown called\n"); | ||
5151 | |||
5152 | skdev = pci_get_drvdata(pdev); | ||
5153 | if (!skdev) { | ||
5154 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | ||
5155 | return; | ||
5156 | } | ||
5157 | |||
5158 | pr_err("%s: calling stop\n", skd_name(skdev)); | ||
5159 | skd_stop_device(skdev); | ||
5160 | } | ||
5161 | |||
5162 | static struct pci_driver skd_driver = { | ||
5163 | .name = DRV_NAME, | ||
5164 | .id_table = skd_pci_tbl, | ||
5165 | .probe = skd_pci_probe, | ||
5166 | .remove = skd_pci_remove, | ||
5167 | .suspend = skd_pci_suspend, | ||
5168 | .resume = skd_pci_resume, | ||
5169 | .shutdown = skd_pci_shutdown, | ||
5170 | }; | ||
5171 | |||
5172 | /* | ||
5173 | ***************************************************************************** | ||
5174 | * LOGGING SUPPORT | ||
5175 | ***************************************************************************** | ||
5176 | */ | ||
5177 | |||
5178 | static const char *skd_name(struct skd_device *skdev) | ||
5179 | { | ||
5180 | memset(skdev->id_str, 0, sizeof(skdev->id_str)); | ||
5181 | |||
5182 | if (skdev->inquiry_is_valid) | ||
5183 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", | ||
5184 | skdev->name, skdev->inq_serial_num, | ||
5185 | pci_name(skdev->pdev)); | ||
5186 | else | ||
5187 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", | ||
5188 | skdev->name, pci_name(skdev->pdev)); | ||
5189 | |||
5190 | return skdev->id_str; | ||
5191 | } | ||
5192 | |||
5193 | const char *skd_drive_state_to_str(int state) | ||
5194 | { | ||
5195 | switch (state) { | ||
5196 | case FIT_SR_DRIVE_OFFLINE: | ||
5197 | return "OFFLINE"; | ||
5198 | case FIT_SR_DRIVE_INIT: | ||
5199 | return "INIT"; | ||
5200 | case FIT_SR_DRIVE_ONLINE: | ||
5201 | return "ONLINE"; | ||
5202 | case FIT_SR_DRIVE_BUSY: | ||
5203 | return "BUSY"; | ||
5204 | case FIT_SR_DRIVE_FAULT: | ||
5205 | return "FAULT"; | ||
5206 | case FIT_SR_DRIVE_DEGRADED: | ||
5207 | return "DEGRADED"; | ||
5208 | case FIT_SR_PCIE_LINK_DOWN: | ||
5209 | return "INK_DOWN"; | ||
5210 | case FIT_SR_DRIVE_SOFT_RESET: | ||
5211 | return "SOFT_RESET"; | ||
5212 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | ||
5213 | return "NEED_FW"; | ||
5214 | case FIT_SR_DRIVE_INIT_FAULT: | ||
5215 | return "INIT_FAULT"; | ||
5216 | case FIT_SR_DRIVE_BUSY_SANITIZE: | ||
5217 | return "BUSY_SANITIZE"; | ||
5218 | case FIT_SR_DRIVE_BUSY_ERASE: | ||
5219 | return "BUSY_ERASE"; | ||
5220 | case FIT_SR_DRIVE_FW_BOOTING: | ||
5221 | return "FW_BOOTING"; | ||
5222 | default: | ||
5223 | return "???"; | ||
5224 | } | ||
5225 | } | ||
5226 | |||
5227 | const char *skd_skdev_state_to_str(enum skd_drvr_state state) | ||
5228 | { | ||
5229 | switch (state) { | ||
5230 | case SKD_DRVR_STATE_LOAD: | ||
5231 | return "LOAD"; | ||
5232 | case SKD_DRVR_STATE_IDLE: | ||
5233 | return "IDLE"; | ||
5234 | case SKD_DRVR_STATE_BUSY: | ||
5235 | return "BUSY"; | ||
5236 | case SKD_DRVR_STATE_STARTING: | ||
5237 | return "STARTING"; | ||
5238 | case SKD_DRVR_STATE_ONLINE: | ||
5239 | return "ONLINE"; | ||
5240 | case SKD_DRVR_STATE_PAUSING: | ||
5241 | return "PAUSING"; | ||
5242 | case SKD_DRVR_STATE_PAUSED: | ||
5243 | return "PAUSED"; | ||
5244 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | ||
5245 | return "DRAINING_TIMEOUT"; | ||
5246 | case SKD_DRVR_STATE_RESTARTING: | ||
5247 | return "RESTARTING"; | ||
5248 | case SKD_DRVR_STATE_RESUMING: | ||
5249 | return "RESUMING"; | ||
5250 | case SKD_DRVR_STATE_STOPPING: | ||
5251 | return "STOPPING"; | ||
5252 | case SKD_DRVR_STATE_SYNCING: | ||
5253 | return "SYNCING"; | ||
5254 | case SKD_DRVR_STATE_FAULT: | ||
5255 | return "FAULT"; | ||
5256 | case SKD_DRVR_STATE_DISAPPEARED: | ||
5257 | return "DISAPPEARED"; | ||
5258 | case SKD_DRVR_STATE_BUSY_ERASE: | ||
5259 | return "BUSY_ERASE"; | ||
5260 | case SKD_DRVR_STATE_BUSY_SANITIZE: | ||
5261 | return "BUSY_SANITIZE"; | ||
5262 | case SKD_DRVR_STATE_BUSY_IMMINENT: | ||
5263 | return "BUSY_IMMINENT"; | ||
5264 | case SKD_DRVR_STATE_WAIT_BOOT: | ||
5265 | return "WAIT_BOOT"; | ||
5266 | |||
5267 | default: | ||
5268 | return "???"; | ||
5269 | } | ||
5270 | } | ||
5271 | |||
5272 | const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | ||
5273 | { | ||
5274 | switch (state) { | ||
5275 | case SKD_MSG_STATE_IDLE: | ||
5276 | return "IDLE"; | ||
5277 | case SKD_MSG_STATE_BUSY: | ||
5278 | return "BUSY"; | ||
5279 | default: | ||
5280 | return "???"; | ||
5281 | } | ||
5282 | } | ||
5283 | |||
5284 | const char *skd_skreq_state_to_str(enum skd_req_state state) | ||
5285 | { | ||
5286 | switch (state) { | ||
5287 | case SKD_REQ_STATE_IDLE: | ||
5288 | return "IDLE"; | ||
5289 | case SKD_REQ_STATE_SETUP: | ||
5290 | return "SETUP"; | ||
5291 | case SKD_REQ_STATE_BUSY: | ||
5292 | return "BUSY"; | ||
5293 | case SKD_REQ_STATE_COMPLETED: | ||
5294 | return "COMPLETED"; | ||
5295 | case SKD_REQ_STATE_TIMEOUT: | ||
5296 | return "TIMEOUT"; | ||
5297 | case SKD_REQ_STATE_ABORTED: | ||
5298 | return "ABORTED"; | ||
5299 | default: | ||
5300 | return "???"; | ||
5301 | } | ||
5302 | } | ||
5303 | |||
5304 | static void skd_log_skdev(struct skd_device *skdev, const char *event) | ||
5305 | { | ||
5306 | pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", | ||
5307 | skdev->name, __func__, __LINE__, skdev->name, skdev, event); | ||
5308 | pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", | ||
5309 | skdev->name, __func__, __LINE__, | ||
5310 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | ||
5311 | skd_skdev_state_to_str(skdev->state), skdev->state); | ||
5312 | pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", | ||
5313 | skdev->name, __func__, __LINE__, | ||
5314 | skdev->in_flight, skdev->cur_max_queue_depth, | ||
5315 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | ||
5316 | pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", | ||
5317 | skdev->name, __func__, __LINE__, | ||
5318 | skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); | ||
5319 | } | ||
5320 | |||
5321 | static void skd_log_skmsg(struct skd_device *skdev, | ||
5322 | struct skd_fitmsg_context *skmsg, const char *event) | ||
5323 | { | ||
5324 | pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", | ||
5325 | skdev->name, __func__, __LINE__, skdev->name, skmsg, event); | ||
5326 | pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", | ||
5327 | skdev->name, __func__, __LINE__, | ||
5328 | skd_skmsg_state_to_str(skmsg->state), skmsg->state, | ||
5329 | skmsg->id, skmsg->length); | ||
5330 | } | ||
5331 | |||
5332 | static void skd_log_skreq(struct skd_device *skdev, | ||
5333 | struct skd_request_context *skreq, const char *event) | ||
5334 | { | ||
5335 | pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", | ||
5336 | skdev->name, __func__, __LINE__, skdev->name, skreq, event); | ||
5337 | pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", | ||
5338 | skdev->name, __func__, __LINE__, | ||
5339 | skd_skreq_state_to_str(skreq->state), skreq->state, | ||
5340 | skreq->id, skreq->fitmsg_id); | ||
5341 | pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", | ||
5342 | skdev->name, __func__, __LINE__, | ||
5343 | skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); | ||
5344 | |||
5345 | if (skreq->req != NULL) { | ||
5346 | struct request *req = skreq->req; | ||
5347 | u32 lba = (u32)blk_rq_pos(req); | ||
5348 | u32 count = blk_rq_sectors(req); | ||
5349 | |||
5350 | pr_debug("%s:%s:%d " | ||
5351 | "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | ||
5352 | skdev->name, __func__, __LINE__, | ||
5353 | req, lba, lba, count, count, | ||
5354 | (int)rq_data_dir(req)); | ||
5355 | } else | ||
5356 | pr_debug("%s:%s:%d req=NULL\n", | ||
5357 | skdev->name, __func__, __LINE__); | ||
5358 | } | ||
5359 | |||
5360 | /* | ||
5361 | ***************************************************************************** | ||
5362 | * MODULE GLUE | ||
5363 | ***************************************************************************** | ||
5364 | */ | ||
5365 | |||
5366 | static int __init skd_init(void) | ||
5367 | { | ||
5368 | pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); | ||
5369 | |||
5370 | switch (skd_isr_type) { | ||
5371 | case SKD_IRQ_LEGACY: | ||
5372 | case SKD_IRQ_MSI: | ||
5373 | case SKD_IRQ_MSIX: | ||
5374 | break; | ||
5375 | default: | ||
5376 | pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", | ||
5377 | skd_isr_type, SKD_IRQ_DEFAULT); | ||
5378 | skd_isr_type = SKD_IRQ_DEFAULT; | ||
5379 | } | ||
5380 | |||
5381 | if (skd_max_queue_depth < 1 || | ||
5382 | skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { | ||
5383 | pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", | ||
5384 | skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); | ||
5385 | skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | ||
5386 | } | ||
5387 | |||
5388 | if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { | ||
5389 | pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", | ||
5390 | skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); | ||
5391 | skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | ||
5392 | } | ||
5393 | |||
5394 | if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { | ||
5395 | pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", | ||
5396 | skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); | ||
5397 | skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | ||
5398 | } | ||
5399 | |||
5400 | if (skd_dbg_level < 0 || skd_dbg_level > 2) { | ||
5401 | pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", | ||
5402 | skd_dbg_level, 0); | ||
5403 | skd_dbg_level = 0; | ||
5404 | } | ||
5405 | |||
5406 | if (skd_isr_comp_limit < 0) { | ||
5407 | pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", | ||
5408 | skd_isr_comp_limit, 0); | ||
5409 | skd_isr_comp_limit = 0; | ||
5410 | } | ||
5411 | |||
5412 | if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { | ||
5413 | pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", | ||
5414 | skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); | ||
5415 | skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | ||
5416 | } | ||
5417 | |||
5418 | return pci_register_driver(&skd_driver); | ||
5419 | } | ||
5420 | |||
5421 | static void __exit skd_exit(void) | ||
5422 | { | ||
5423 | pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); | ||
5424 | |||
5425 | pci_unregister_driver(&skd_driver); | ||
5426 | |||
5427 | if (skd_major) | ||
5428 | unregister_blkdev(skd_major, DRV_NAME); | ||
5429 | } | ||
5430 | |||
5431 | module_init(skd_init); | ||
5432 | module_exit(skd_exit); | ||