diff options
Diffstat (limited to 'drivers/scsi/csiostor/csio_init.c')
-rw-r--r-- | drivers/scsi/csiostor/csio_init.c | 1274 |
1 files changed, 1274 insertions, 0 deletions
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c new file mode 100644 index 000000000000..fdd408ff80ad --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.c | |||
@@ -0,0 +1,1274 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio FCoE driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
36 | |||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/aer.h> | ||
42 | #include <linux/mm.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/kdebug.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/debugfs.h> | ||
47 | #include <linux/string.h> | ||
48 | #include <linux/export.h> | ||
49 | |||
50 | #include "csio_init.h" | ||
51 | #include "csio_defs.h" | ||
52 | |||
53 | #define CSIO_MIN_MEMPOOL_SZ 64 | ||
54 | |||
55 | static struct dentry *csio_debugfs_root; | ||
56 | |||
57 | static struct scsi_transport_template *csio_fcoe_transport; | ||
58 | static struct scsi_transport_template *csio_fcoe_transport_vport; | ||
59 | |||
60 | /* | ||
61 | * debugfs support | ||
62 | */ | ||
63 | static int | ||
64 | csio_mem_open(struct inode *inode, struct file *file) | ||
65 | { | ||
66 | file->private_data = inode->i_private; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static ssize_t | ||
71 | csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
72 | { | ||
73 | loff_t pos = *ppos; | ||
74 | loff_t avail = file->f_path.dentry->d_inode->i_size; | ||
75 | unsigned int mem = (uintptr_t)file->private_data & 3; | ||
76 | struct csio_hw *hw = file->private_data - mem; | ||
77 | |||
78 | if (pos < 0) | ||
79 | return -EINVAL; | ||
80 | if (pos >= avail) | ||
81 | return 0; | ||
82 | if (count > avail - pos) | ||
83 | count = avail - pos; | ||
84 | |||
85 | while (count) { | ||
86 | size_t len; | ||
87 | int ret, ofst; | ||
88 | __be32 data[16]; | ||
89 | |||
90 | if (mem == MEM_MC) | ||
91 | ret = csio_hw_mc_read(hw, pos, data, NULL); | ||
92 | else | ||
93 | ret = csio_hw_edc_read(hw, mem, pos, data, NULL); | ||
94 | if (ret) | ||
95 | return ret; | ||
96 | |||
97 | ofst = pos % sizeof(data); | ||
98 | len = min(count, sizeof(data) - ofst); | ||
99 | if (copy_to_user(buf, (u8 *)data + ofst, len)) | ||
100 | return -EFAULT; | ||
101 | |||
102 | buf += len; | ||
103 | pos += len; | ||
104 | count -= len; | ||
105 | } | ||
106 | count = pos - *ppos; | ||
107 | *ppos = pos; | ||
108 | return count; | ||
109 | } | ||
110 | |||
111 | static const struct file_operations csio_mem_debugfs_fops = { | ||
112 | .owner = THIS_MODULE, | ||
113 | .open = csio_mem_open, | ||
114 | .read = csio_mem_read, | ||
115 | .llseek = default_llseek, | ||
116 | }; | ||
117 | |||
118 | static void __devinit | ||
119 | csio_add_debugfs_mem(struct csio_hw *hw, const char *name, | ||
120 | unsigned int idx, unsigned int size_mb) | ||
121 | { | ||
122 | struct dentry *de; | ||
123 | |||
124 | de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root, | ||
125 | (void *)hw + idx, &csio_mem_debugfs_fops); | ||
126 | if (de && de->d_inode) | ||
127 | de->d_inode->i_size = size_mb << 20; | ||
128 | } | ||
129 | |||
130 | static int __devinit | ||
131 | csio_setup_debugfs(struct csio_hw *hw) | ||
132 | { | ||
133 | int i; | ||
134 | |||
135 | if (IS_ERR_OR_NULL(hw->debugfs_root)) | ||
136 | return -1; | ||
137 | |||
138 | i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); | ||
139 | if (i & EDRAM0_ENABLE) | ||
140 | csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); | ||
141 | if (i & EDRAM1_ENABLE) | ||
142 | csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); | ||
143 | if (i & EXT_MEM_ENABLE) | ||
144 | csio_add_debugfs_mem(hw, "mc", MEM_MC, | ||
145 | EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR))); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * csio_dfs_create - Creates and sets up per-hw debugfs. | ||
151 | * | ||
152 | */ | ||
153 | static int | ||
154 | csio_dfs_create(struct csio_hw *hw) | ||
155 | { | ||
156 | if (csio_debugfs_root) { | ||
157 | hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev), | ||
158 | csio_debugfs_root); | ||
159 | csio_setup_debugfs(hw); | ||
160 | } | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * csio_dfs_destroy - Destroys per-hw debugfs. | ||
167 | */ | ||
168 | static int | ||
169 | csio_dfs_destroy(struct csio_hw *hw) | ||
170 | { | ||
171 | if (hw->debugfs_root) | ||
172 | debugfs_remove_recursive(hw->debugfs_root); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * csio_dfs_init - Debug filesystem initialization for the module. | ||
179 | * | ||
180 | */ | ||
181 | static int | ||
182 | csio_dfs_init(void) | ||
183 | { | ||
184 | csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
185 | if (!csio_debugfs_root) | ||
186 | pr_warn("Could not create debugfs entry, continuing\n"); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * csio_dfs_exit - debugfs cleanup for the module. | ||
193 | */ | ||
194 | static void | ||
195 | csio_dfs_exit(void) | ||
196 | { | ||
197 | debugfs_remove(csio_debugfs_root); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * csio_pci_init - PCI initialization. | ||
202 | * @pdev: PCI device. | ||
203 | * @bars: Bitmask of bars to be requested. | ||
204 | * | ||
205 | * Initializes the PCI function by enabling MMIO, setting bus | ||
206 | * mastership and setting DMA mask. | ||
207 | */ | ||
208 | static int | ||
209 | csio_pci_init(struct pci_dev *pdev, int *bars) | ||
210 | { | ||
211 | int rv = -ENODEV; | ||
212 | |||
213 | *bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
214 | |||
215 | if (pci_enable_device_mem(pdev)) | ||
216 | goto err; | ||
217 | |||
218 | if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME)) | ||
219 | goto err_disable_device; | ||
220 | |||
221 | pci_set_master(pdev); | ||
222 | pci_try_set_mwi(pdev); | ||
223 | |||
224 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
225 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
226 | } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
227 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
228 | } else { | ||
229 | dev_err(&pdev->dev, "No suitable DMA available.\n"); | ||
230 | goto err_release_regions; | ||
231 | } | ||
232 | |||
233 | return 0; | ||
234 | |||
235 | err_release_regions: | ||
236 | pci_release_selected_regions(pdev, *bars); | ||
237 | err_disable_device: | ||
238 | pci_disable_device(pdev); | ||
239 | err: | ||
240 | return rv; | ||
241 | |||
242 | } | ||
243 | |||
244 | /* | ||
245 | * csio_pci_exit - PCI unitialization. | ||
246 | * @pdev: PCI device. | ||
247 | * @bars: Bars to be released. | ||
248 | * | ||
249 | */ | ||
250 | static void | ||
251 | csio_pci_exit(struct pci_dev *pdev, int *bars) | ||
252 | { | ||
253 | pci_release_selected_regions(pdev, *bars); | ||
254 | pci_disable_device(pdev); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * csio_hw_init_workers - Initialize the HW module's worker threads. | ||
259 | * @hw: HW module. | ||
260 | * | ||
261 | */ | ||
262 | static void | ||
263 | csio_hw_init_workers(struct csio_hw *hw) | ||
264 | { | ||
265 | INIT_WORK(&hw->evtq_work, csio_evtq_worker); | ||
266 | } | ||
267 | |||
268 | static void | ||
269 | csio_hw_exit_workers(struct csio_hw *hw) | ||
270 | { | ||
271 | cancel_work_sync(&hw->evtq_work); | ||
272 | flush_scheduled_work(); | ||
273 | } | ||
274 | |||
275 | static int | ||
276 | csio_create_queues(struct csio_hw *hw) | ||
277 | { | ||
278 | int i, j; | ||
279 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | ||
280 | int rv; | ||
281 | struct csio_scsi_cpu_info *info; | ||
282 | |||
283 | if (hw->flags & CSIO_HWF_Q_FW_ALLOCED) | ||
284 | return 0; | ||
285 | |||
286 | if (hw->intr_mode != CSIO_IM_MSIX) { | ||
287 | rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx, | ||
288 | 0, hw->pport[0].portid, false, NULL); | ||
289 | if (rv != 0) { | ||
290 | csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv); | ||
291 | return rv; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /* FW event queue */ | ||
296 | rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx, | ||
297 | csio_get_fwevt_intr_idx(hw), | ||
298 | hw->pport[0].portid, true, NULL); | ||
299 | if (rv != 0) { | ||
300 | csio_err(hw, "FW event IQ config failed!: %d\n", rv); | ||
301 | return rv; | ||
302 | } | ||
303 | |||
304 | /* Create mgmt queue */ | ||
305 | rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx, | ||
306 | mgmtm->iq_idx, hw->pport[0].portid, NULL); | ||
307 | |||
308 | if (rv != 0) { | ||
309 | csio_err(hw, "Mgmt EQ create failed!: %d\n", rv); | ||
310 | goto err; | ||
311 | } | ||
312 | |||
313 | /* Create SCSI queues */ | ||
314 | for (i = 0; i < hw->num_pports; i++) { | ||
315 | info = &hw->scsi_cpu_info[i]; | ||
316 | |||
317 | for (j = 0; j < info->max_cpus; j++) { | ||
318 | struct csio_scsi_qset *sqset = &hw->sqset[i][j]; | ||
319 | |||
320 | rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx, | ||
321 | sqset->intr_idx, i, false, NULL); | ||
322 | if (rv != 0) { | ||
323 | csio_err(hw, | ||
324 | "SCSI module IQ config failed [%d][%d]:%d\n", | ||
325 | i, j, rv); | ||
326 | goto err; | ||
327 | } | ||
328 | rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx, | ||
329 | sqset->iq_idx, i, NULL); | ||
330 | if (rv != 0) { | ||
331 | csio_err(hw, | ||
332 | "SCSI module EQ config failed [%d][%d]:%d\n", | ||
333 | i, j, rv); | ||
334 | goto err; | ||
335 | } | ||
336 | } /* for all CPUs */ | ||
337 | } /* For all ports */ | ||
338 | |||
339 | hw->flags |= CSIO_HWF_Q_FW_ALLOCED; | ||
340 | return 0; | ||
341 | err: | ||
342 | csio_wr_destroy_queues(hw, true); | ||
343 | return -EINVAL; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * csio_config_queues - Configure the DMA queues. | ||
348 | * @hw: HW module. | ||
349 | * | ||
350 | * Allocates memory for queues are registers them with FW. | ||
351 | */ | ||
352 | int | ||
353 | csio_config_queues(struct csio_hw *hw) | ||
354 | { | ||
355 | int i, j, idx, k = 0; | ||
356 | int rv; | ||
357 | struct csio_scsi_qset *sqset; | ||
358 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | ||
359 | struct csio_scsi_qset *orig; | ||
360 | struct csio_scsi_cpu_info *info; | ||
361 | |||
362 | if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED) | ||
363 | return csio_create_queues(hw); | ||
364 | |||
365 | /* Calculate number of SCSI queues for MSIX we would like */ | ||
366 | hw->num_scsi_msix_cpus = num_online_cpus(); | ||
367 | hw->num_sqsets = num_online_cpus() * hw->num_pports; | ||
368 | |||
369 | if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) { | ||
370 | hw->num_sqsets = CSIO_MAX_SCSI_QSETS; | ||
371 | hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU; | ||
372 | } | ||
373 | |||
374 | /* Initialize max_cpus, may get reduced during msix allocations */ | ||
375 | for (i = 0; i < hw->num_pports; i++) | ||
376 | hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus; | ||
377 | |||
378 | csio_dbg(hw, "nsqsets:%d scpus:%d\n", | ||
379 | hw->num_sqsets, hw->num_scsi_msix_cpus); | ||
380 | |||
381 | csio_intr_enable(hw); | ||
382 | |||
383 | if (hw->intr_mode != CSIO_IM_MSIX) { | ||
384 | |||
385 | /* Allocate Forward interrupt iq. */ | ||
386 | hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE, | ||
387 | CSIO_INTR_WRSIZE, CSIO_INGRESS, | ||
388 | (void *)hw, 0, 0, NULL); | ||
389 | if (hw->intr_iq_idx == -1) { | ||
390 | csio_err(hw, | ||
391 | "Forward interrupt queue creation failed\n"); | ||
392 | goto intr_disable; | ||
393 | } | ||
394 | } | ||
395 | |||
396 | /* Allocate the FW evt queue */ | ||
397 | hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE, | ||
398 | CSIO_FWEVT_WRSIZE, | ||
399 | CSIO_INGRESS, (void *)hw, | ||
400 | CSIO_FWEVT_FLBUFS, 0, | ||
401 | csio_fwevt_intx_handler); | ||
402 | if (hw->fwevt_iq_idx == -1) { | ||
403 | csio_err(hw, "FW evt queue creation failed\n"); | ||
404 | goto intr_disable; | ||
405 | } | ||
406 | |||
407 | /* Allocate the mgmt queue */ | ||
408 | mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE, | ||
409 | CSIO_MGMT_EQ_WRSIZE, | ||
410 | CSIO_EGRESS, (void *)hw, 0, 0, NULL); | ||
411 | if (mgmtm->eq_idx == -1) { | ||
412 | csio_err(hw, "Failed to alloc egress queue for mgmt module\n"); | ||
413 | goto intr_disable; | ||
414 | } | ||
415 | |||
416 | /* Use FW IQ for MGMT req completion */ | ||
417 | mgmtm->iq_idx = hw->fwevt_iq_idx; | ||
418 | |||
419 | /* Allocate SCSI queues */ | ||
420 | for (i = 0; i < hw->num_pports; i++) { | ||
421 | info = &hw->scsi_cpu_info[i]; | ||
422 | |||
423 | for (j = 0; j < hw->num_scsi_msix_cpus; j++) { | ||
424 | sqset = &hw->sqset[i][j]; | ||
425 | |||
426 | if (j >= info->max_cpus) { | ||
427 | k = j % info->max_cpus; | ||
428 | orig = &hw->sqset[i][k]; | ||
429 | sqset->eq_idx = orig->eq_idx; | ||
430 | sqset->iq_idx = orig->iq_idx; | ||
431 | continue; | ||
432 | } | ||
433 | |||
434 | idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0, | ||
435 | CSIO_EGRESS, (void *)hw, 0, 0, | ||
436 | NULL); | ||
437 | if (idx == -1) { | ||
438 | csio_err(hw, "EQ creation failed for idx:%d\n", | ||
439 | idx); | ||
440 | goto intr_disable; | ||
441 | } | ||
442 | |||
443 | sqset->eq_idx = idx; | ||
444 | |||
445 | idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE, | ||
446 | CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS, | ||
447 | (void *)hw, 0, 0, | ||
448 | csio_scsi_intx_handler); | ||
449 | if (idx == -1) { | ||
450 | csio_err(hw, "IQ creation failed for idx:%d\n", | ||
451 | idx); | ||
452 | goto intr_disable; | ||
453 | } | ||
454 | sqset->iq_idx = idx; | ||
455 | } /* for all CPUs */ | ||
456 | } /* For all ports */ | ||
457 | |||
458 | hw->flags |= CSIO_HWF_Q_MEM_ALLOCED; | ||
459 | |||
460 | rv = csio_create_queues(hw); | ||
461 | if (rv != 0) | ||
462 | goto intr_disable; | ||
463 | |||
464 | /* | ||
465 | * Now request IRQs for the vectors. In the event of a failure, | ||
466 | * cleanup is handled internally by this function. | ||
467 | */ | ||
468 | rv = csio_request_irqs(hw); | ||
469 | if (rv != 0) | ||
470 | return -EINVAL; | ||
471 | |||
472 | return 0; | ||
473 | |||
474 | intr_disable: | ||
475 | csio_intr_disable(hw, false); | ||
476 | |||
477 | return -EINVAL; | ||
478 | } | ||
479 | |||
480 | static int | ||
481 | csio_resource_alloc(struct csio_hw *hw) | ||
482 | { | ||
483 | struct csio_wrm *wrm = csio_hw_to_wrm(hw); | ||
484 | int rv = -ENOMEM; | ||
485 | |||
486 | wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ + | ||
487 | CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ); | ||
488 | |||
489 | hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, | ||
490 | sizeof(struct csio_mb)); | ||
491 | if (!hw->mb_mempool) | ||
492 | goto err; | ||
493 | |||
494 | hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, | ||
495 | sizeof(struct csio_rnode)); | ||
496 | if (!hw->rnode_mempool) | ||
497 | goto err_free_mb_mempool; | ||
498 | |||
499 | hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev, | ||
500 | CSIO_SCSI_RSP_LEN, 8, 0); | ||
501 | if (!hw->scsi_pci_pool) | ||
502 | goto err_free_rn_pool; | ||
503 | |||
504 | return 0; | ||
505 | |||
506 | err_free_rn_pool: | ||
507 | mempool_destroy(hw->rnode_mempool); | ||
508 | hw->rnode_mempool = NULL; | ||
509 | err_free_mb_mempool: | ||
510 | mempool_destroy(hw->mb_mempool); | ||
511 | hw->mb_mempool = NULL; | ||
512 | err: | ||
513 | return rv; | ||
514 | } | ||
515 | |||
516 | static void | ||
517 | csio_resource_free(struct csio_hw *hw) | ||
518 | { | ||
519 | pci_pool_destroy(hw->scsi_pci_pool); | ||
520 | hw->scsi_pci_pool = NULL; | ||
521 | mempool_destroy(hw->rnode_mempool); | ||
522 | hw->rnode_mempool = NULL; | ||
523 | mempool_destroy(hw->mb_mempool); | ||
524 | hw->mb_mempool = NULL; | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * csio_hw_alloc - Allocate and initialize the HW module. | ||
529 | * @pdev: PCI device. | ||
530 | * | ||
531 | * Allocates HW structure, DMA, memory resources, maps BARS to | ||
532 | * host memory and initializes HW module. | ||
533 | */ | ||
534 | static struct csio_hw * __devinit | ||
535 | csio_hw_alloc(struct pci_dev *pdev) | ||
536 | { | ||
537 | struct csio_hw *hw; | ||
538 | |||
539 | hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL); | ||
540 | if (!hw) | ||
541 | goto err; | ||
542 | |||
543 | hw->pdev = pdev; | ||
544 | strncpy(hw->drv_version, CSIO_DRV_VERSION, 32); | ||
545 | |||
546 | /* memory pool/DMA pool allocation */ | ||
547 | if (csio_resource_alloc(hw)) | ||
548 | goto err_free_hw; | ||
549 | |||
550 | /* Get the start address of registers from BAR 0 */ | ||
551 | hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0), | ||
552 | pci_resource_len(pdev, 0)); | ||
553 | if (!hw->regstart) { | ||
554 | csio_err(hw, "Could not map BAR 0, regstart = %p\n", | ||
555 | hw->regstart); | ||
556 | goto err_resource_free; | ||
557 | } | ||
558 | |||
559 | csio_hw_init_workers(hw); | ||
560 | |||
561 | if (csio_hw_init(hw)) | ||
562 | goto err_unmap_bar; | ||
563 | |||
564 | csio_dfs_create(hw); | ||
565 | |||
566 | csio_dbg(hw, "hw:%p\n", hw); | ||
567 | |||
568 | return hw; | ||
569 | |||
570 | err_unmap_bar: | ||
571 | csio_hw_exit_workers(hw); | ||
572 | iounmap(hw->regstart); | ||
573 | err_resource_free: | ||
574 | csio_resource_free(hw); | ||
575 | err_free_hw: | ||
576 | kfree(hw); | ||
577 | err: | ||
578 | return NULL; | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * csio_hw_free - Uninitialize and free the HW module. | ||
583 | * @hw: The HW module | ||
584 | * | ||
585 | * Disable interrupts, uninit the HW module, free resources, free hw. | ||
586 | */ | ||
587 | static void | ||
588 | csio_hw_free(struct csio_hw *hw) | ||
589 | { | ||
590 | csio_intr_disable(hw, true); | ||
591 | csio_hw_exit_workers(hw); | ||
592 | csio_hw_exit(hw); | ||
593 | iounmap(hw->regstart); | ||
594 | csio_dfs_destroy(hw); | ||
595 | csio_resource_free(hw); | ||
596 | kfree(hw); | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * csio_shost_init - Create and initialize the lnode module. | ||
601 | * @hw: The HW module. | ||
602 | * @dev: The device associated with this invocation. | ||
603 | * @probe: Called from probe context or not? | ||
604 | * @os_pln: Parent lnode if any. | ||
605 | * | ||
606 | * Allocates lnode structure via scsi_host_alloc, initializes | ||
607 | * shost, initializes lnode module and registers with SCSI ML | ||
608 | * via scsi_host_add. This function is shared between physical and | ||
609 | * virtual node ports. | ||
610 | */ | ||
611 | struct csio_lnode * | ||
612 | csio_shost_init(struct csio_hw *hw, struct device *dev, | ||
613 | bool probe, struct csio_lnode *pln) | ||
614 | { | ||
615 | struct Scsi_Host *shost = NULL; | ||
616 | struct csio_lnode *ln; | ||
617 | |||
618 | csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth; | ||
619 | csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth; | ||
620 | |||
621 | /* | ||
622 | * hw->pdev is the physical port's PCI dev structure, | ||
623 | * which will be different from the NPIV dev structure. | ||
624 | */ | ||
625 | if (dev == &hw->pdev->dev) | ||
626 | shost = scsi_host_alloc( | ||
627 | &csio_fcoe_shost_template, | ||
628 | sizeof(struct csio_lnode)); | ||
629 | else | ||
630 | shost = scsi_host_alloc( | ||
631 | &csio_fcoe_shost_vport_template, | ||
632 | sizeof(struct csio_lnode)); | ||
633 | |||
634 | if (!shost) | ||
635 | goto err; | ||
636 | |||
637 | ln = shost_priv(shost); | ||
638 | memset(ln, 0, sizeof(struct csio_lnode)); | ||
639 | |||
640 | /* Link common lnode to this lnode */ | ||
641 | ln->dev_num = (shost->host_no << 16); | ||
642 | |||
643 | shost->can_queue = CSIO_MAX_QUEUE; | ||
644 | shost->this_id = -1; | ||
645 | shost->unique_id = shost->host_no; | ||
646 | shost->max_cmd_len = 16; /* Max CDB length supported */ | ||
647 | shost->max_id = min_t(uint32_t, csio_fcoe_rnodes, | ||
648 | hw->fres_info.max_ssns); | ||
649 | shost->max_lun = CSIO_MAX_LUN; | ||
650 | if (dev == &hw->pdev->dev) | ||
651 | shost->transportt = csio_fcoe_transport; | ||
652 | else | ||
653 | shost->transportt = csio_fcoe_transport_vport; | ||
654 | |||
655 | /* root lnode */ | ||
656 | if (!hw->rln) | ||
657 | hw->rln = ln; | ||
658 | |||
659 | /* Other initialization here: Common, Transport specific */ | ||
660 | if (csio_lnode_init(ln, hw, pln)) | ||
661 | goto err_shost_put; | ||
662 | |||
663 | if (scsi_add_host(shost, dev)) | ||
664 | goto err_lnode_exit; | ||
665 | |||
666 | return ln; | ||
667 | |||
668 | err_lnode_exit: | ||
669 | csio_lnode_exit(ln); | ||
670 | err_shost_put: | ||
671 | scsi_host_put(shost); | ||
672 | err: | ||
673 | return NULL; | ||
674 | } | ||
675 | |||
676 | /** | ||
677 | * csio_shost_exit - De-instantiate the shost. | ||
678 | * @ln: The lnode module corresponding to the shost. | ||
679 | * | ||
680 | */ | ||
681 | void | ||
682 | csio_shost_exit(struct csio_lnode *ln) | ||
683 | { | ||
684 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
685 | struct csio_hw *hw = csio_lnode_to_hw(ln); | ||
686 | |||
687 | /* Inform transport */ | ||
688 | fc_remove_host(shost); | ||
689 | |||
690 | /* Inform SCSI ML */ | ||
691 | scsi_remove_host(shost); | ||
692 | |||
693 | /* Flush all the events, so that any rnode removal events | ||
694 | * already queued are all handled, before we remove the lnode. | ||
695 | */ | ||
696 | spin_lock_irq(&hw->lock); | ||
697 | csio_evtq_flush(hw); | ||
698 | spin_unlock_irq(&hw->lock); | ||
699 | |||
700 | csio_lnode_exit(ln); | ||
701 | scsi_host_put(shost); | ||
702 | } | ||
703 | |||
704 | struct csio_lnode * | ||
705 | csio_lnode_alloc(struct csio_hw *hw) | ||
706 | { | ||
707 | return csio_shost_init(hw, &hw->pdev->dev, false, NULL); | ||
708 | } | ||
709 | |||
710 | void | ||
711 | csio_lnodes_block_request(struct csio_hw *hw) | ||
712 | { | ||
713 | struct Scsi_Host *shost; | ||
714 | struct csio_lnode *sln; | ||
715 | struct csio_lnode *ln; | ||
716 | struct list_head *cur_ln, *cur_cln; | ||
717 | struct csio_lnode **lnode_list; | ||
718 | int cur_cnt = 0, ii; | ||
719 | |||
720 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
721 | GFP_KERNEL); | ||
722 | if (!lnode_list) { | ||
723 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
724 | return; | ||
725 | } | ||
726 | |||
727 | spin_lock_irq(&hw->lock); | ||
728 | /* Traverse sibling lnodes */ | ||
729 | list_for_each(cur_ln, &hw->sln_head) { | ||
730 | sln = (struct csio_lnode *) cur_ln; | ||
731 | lnode_list[cur_cnt++] = sln; | ||
732 | |||
733 | /* Traverse children lnodes */ | ||
734 | list_for_each(cur_cln, &sln->cln_head) | ||
735 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
736 | } | ||
737 | spin_unlock_irq(&hw->lock); | ||
738 | |||
739 | for (ii = 0; ii < cur_cnt; ii++) { | ||
740 | csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); | ||
741 | ln = lnode_list[ii]; | ||
742 | shost = csio_ln_to_shost(ln); | ||
743 | scsi_block_requests(shost); | ||
744 | |||
745 | } | ||
746 | kfree(lnode_list); | ||
747 | } | ||
748 | |||
749 | void | ||
750 | csio_lnodes_unblock_request(struct csio_hw *hw) | ||
751 | { | ||
752 | struct csio_lnode *ln; | ||
753 | struct Scsi_Host *shost; | ||
754 | struct csio_lnode *sln; | ||
755 | struct list_head *cur_ln, *cur_cln; | ||
756 | struct csio_lnode **lnode_list; | ||
757 | int cur_cnt = 0, ii; | ||
758 | |||
759 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
760 | GFP_KERNEL); | ||
761 | if (!lnode_list) { | ||
762 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
763 | return; | ||
764 | } | ||
765 | |||
766 | spin_lock_irq(&hw->lock); | ||
767 | /* Traverse sibling lnodes */ | ||
768 | list_for_each(cur_ln, &hw->sln_head) { | ||
769 | sln = (struct csio_lnode *) cur_ln; | ||
770 | lnode_list[cur_cnt++] = sln; | ||
771 | |||
772 | /* Traverse children lnodes */ | ||
773 | list_for_each(cur_cln, &sln->cln_head) | ||
774 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
775 | } | ||
776 | spin_unlock_irq(&hw->lock); | ||
777 | |||
778 | for (ii = 0; ii < cur_cnt; ii++) { | ||
779 | csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); | ||
780 | ln = lnode_list[ii]; | ||
781 | shost = csio_ln_to_shost(ln); | ||
782 | scsi_unblock_requests(shost); | ||
783 | } | ||
784 | kfree(lnode_list); | ||
785 | } | ||
786 | |||
787 | void | ||
788 | csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid) | ||
789 | { | ||
790 | struct csio_lnode *ln; | ||
791 | struct Scsi_Host *shost; | ||
792 | struct csio_lnode *sln; | ||
793 | struct list_head *cur_ln, *cur_cln; | ||
794 | struct csio_lnode **lnode_list; | ||
795 | int cur_cnt = 0, ii; | ||
796 | |||
797 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
798 | GFP_KERNEL); | ||
799 | if (!lnode_list) { | ||
800 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
801 | return; | ||
802 | } | ||
803 | |||
804 | spin_lock_irq(&hw->lock); | ||
805 | /* Traverse sibling lnodes */ | ||
806 | list_for_each(cur_ln, &hw->sln_head) { | ||
807 | sln = (struct csio_lnode *) cur_ln; | ||
808 | if (sln->portid != portid) | ||
809 | continue; | ||
810 | |||
811 | lnode_list[cur_cnt++] = sln; | ||
812 | |||
813 | /* Traverse children lnodes */ | ||
814 | list_for_each(cur_cln, &sln->cln_head) | ||
815 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
816 | } | ||
817 | spin_unlock_irq(&hw->lock); | ||
818 | |||
819 | for (ii = 0; ii < cur_cnt; ii++) { | ||
820 | csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); | ||
821 | ln = lnode_list[ii]; | ||
822 | shost = csio_ln_to_shost(ln); | ||
823 | scsi_block_requests(shost); | ||
824 | } | ||
825 | kfree(lnode_list); | ||
826 | } | ||
827 | |||
828 | void | ||
829 | csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid) | ||
830 | { | ||
831 | struct csio_lnode *ln; | ||
832 | struct Scsi_Host *shost; | ||
833 | struct csio_lnode *sln; | ||
834 | struct list_head *cur_ln, *cur_cln; | ||
835 | struct csio_lnode **lnode_list; | ||
836 | int cur_cnt = 0, ii; | ||
837 | |||
838 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
839 | GFP_KERNEL); | ||
840 | if (!lnode_list) { | ||
841 | csio_err(hw, "Failed to allocate lnodes_list"); | ||
842 | return; | ||
843 | } | ||
844 | |||
845 | spin_lock_irq(&hw->lock); | ||
846 | /* Traverse sibling lnodes */ | ||
847 | list_for_each(cur_ln, &hw->sln_head) { | ||
848 | sln = (struct csio_lnode *) cur_ln; | ||
849 | if (sln->portid != portid) | ||
850 | continue; | ||
851 | lnode_list[cur_cnt++] = sln; | ||
852 | |||
853 | /* Traverse children lnodes */ | ||
854 | list_for_each(cur_cln, &sln->cln_head) | ||
855 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
856 | } | ||
857 | spin_unlock_irq(&hw->lock); | ||
858 | |||
859 | for (ii = 0; ii < cur_cnt; ii++) { | ||
860 | csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); | ||
861 | ln = lnode_list[ii]; | ||
862 | shost = csio_ln_to_shost(ln); | ||
863 | scsi_unblock_requests(shost); | ||
864 | } | ||
865 | kfree(lnode_list); | ||
866 | } | ||
867 | |||
868 | void | ||
869 | csio_lnodes_exit(struct csio_hw *hw, bool npiv) | ||
870 | { | ||
871 | struct csio_lnode *sln; | ||
872 | struct csio_lnode *ln; | ||
873 | struct list_head *cur_ln, *cur_cln; | ||
874 | struct csio_lnode **lnode_list; | ||
875 | int cur_cnt = 0, ii; | ||
876 | |||
877 | lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), | ||
878 | GFP_KERNEL); | ||
879 | if (!lnode_list) { | ||
880 | csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n"); | ||
881 | return; | ||
882 | } | ||
883 | |||
884 | /* Get all child lnodes(NPIV ports) */ | ||
885 | spin_lock_irq(&hw->lock); | ||
886 | list_for_each(cur_ln, &hw->sln_head) { | ||
887 | sln = (struct csio_lnode *) cur_ln; | ||
888 | |||
889 | /* Traverse children lnodes */ | ||
890 | list_for_each(cur_cln, &sln->cln_head) | ||
891 | lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; | ||
892 | } | ||
893 | spin_unlock_irq(&hw->lock); | ||
894 | |||
895 | /* Delete NPIV lnodes */ | ||
896 | for (ii = 0; ii < cur_cnt; ii++) { | ||
897 | csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]); | ||
898 | ln = lnode_list[ii]; | ||
899 | fc_vport_terminate(ln->fc_vport); | ||
900 | } | ||
901 | |||
902 | /* Delete only npiv lnodes */ | ||
903 | if (npiv) | ||
904 | goto free_lnodes; | ||
905 | |||
906 | cur_cnt = 0; | ||
907 | /* Get all physical lnodes */ | ||
908 | spin_lock_irq(&hw->lock); | ||
909 | /* Traverse sibling lnodes */ | ||
910 | list_for_each(cur_ln, &hw->sln_head) { | ||
911 | sln = (struct csio_lnode *) cur_ln; | ||
912 | lnode_list[cur_cnt++] = sln; | ||
913 | } | ||
914 | spin_unlock_irq(&hw->lock); | ||
915 | |||
916 | /* Delete physical lnodes */ | ||
917 | for (ii = 0; ii < cur_cnt; ii++) { | ||
918 | csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]); | ||
919 | csio_shost_exit(lnode_list[ii]); | ||
920 | } | ||
921 | |||
922 | free_lnodes: | ||
923 | kfree(lnode_list); | ||
924 | } | ||
925 | |||
926 | /* | ||
927 | * csio_lnode_init_post: Set lnode attributes after starting HW. | ||
928 | * @ln: lnode. | ||
929 | * | ||
930 | */ | ||
931 | static void | ||
932 | csio_lnode_init_post(struct csio_lnode *ln) | ||
933 | { | ||
934 | struct Scsi_Host *shost = csio_ln_to_shost(ln); | ||
935 | |||
936 | csio_fchost_attr_init(ln); | ||
937 | |||
938 | scsi_scan_host(shost); | ||
939 | } | ||
940 | |||
941 | /* | ||
942 | * csio_probe_one - Instantiate this function. | ||
943 | * @pdev: PCI device | ||
944 | * @id: Device ID | ||
945 | * | ||
946 | * This is the .probe() callback of the driver. This function: | ||
947 | * - Initializes the PCI function by enabling MMIO, setting bus | ||
948 | * mastership and setting DMA mask. | ||
949 | * - Allocates HW structure, DMA, memory resources, maps BARS to | ||
950 | * host memory and initializes HW module. | ||
951 | * - Allocates lnode structure via scsi_host_alloc, initializes | ||
952 | * shost, initialized lnode module and registers with SCSI ML | ||
953 | * via scsi_host_add. | ||
954 | * - Enables interrupts, and starts the chip by kicking off the | ||
955 | * HW state machine. | ||
956 | * - Once hardware is ready, initiated scan of the host via | ||
957 | * scsi_scan_host. | ||
958 | */ | ||
959 | static int __devinit | ||
960 | csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | ||
961 | { | ||
962 | int rv; | ||
963 | int bars; | ||
964 | int i; | ||
965 | struct csio_hw *hw; | ||
966 | struct csio_lnode *ln; | ||
967 | |||
968 | rv = csio_pci_init(pdev, &bars); | ||
969 | if (rv) | ||
970 | goto err; | ||
971 | |||
972 | hw = csio_hw_alloc(pdev); | ||
973 | if (!hw) { | ||
974 | rv = -ENODEV; | ||
975 | goto err_pci_exit; | ||
976 | } | ||
977 | |||
978 | pci_set_drvdata(pdev, hw); | ||
979 | |||
980 | if (csio_hw_start(hw) != 0) { | ||
981 | dev_err(&pdev->dev, | ||
982 | "Failed to start FW, continuing in debug mode.\n"); | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", | ||
987 | FW_HDR_FW_VER_MAJOR_GET(hw->fwrev), | ||
988 | FW_HDR_FW_VER_MINOR_GET(hw->fwrev), | ||
989 | FW_HDR_FW_VER_MICRO_GET(hw->fwrev), | ||
990 | FW_HDR_FW_VER_BUILD_GET(hw->fwrev)); | ||
991 | |||
992 | for (i = 0; i < hw->num_pports; i++) { | ||
993 | ln = csio_shost_init(hw, &pdev->dev, true, NULL); | ||
994 | if (!ln) { | ||
995 | rv = -ENODEV; | ||
996 | break; | ||
997 | } | ||
998 | /* Initialize portid */ | ||
999 | ln->portid = hw->pport[i].portid; | ||
1000 | |||
1001 | spin_lock_irq(&hw->lock); | ||
1002 | if (csio_lnode_start(ln) != 0) | ||
1003 | rv = -ENODEV; | ||
1004 | spin_unlock_irq(&hw->lock); | ||
1005 | |||
1006 | if (rv) | ||
1007 | break; | ||
1008 | |||
1009 | csio_lnode_init_post(ln); | ||
1010 | } | ||
1011 | |||
1012 | if (rv) | ||
1013 | goto err_lnode_exit; | ||
1014 | |||
1015 | return 0; | ||
1016 | |||
1017 | err_lnode_exit: | ||
1018 | csio_lnodes_block_request(hw); | ||
1019 | spin_lock_irq(&hw->lock); | ||
1020 | csio_hw_stop(hw); | ||
1021 | spin_unlock_irq(&hw->lock); | ||
1022 | csio_lnodes_unblock_request(hw); | ||
1023 | pci_set_drvdata(hw->pdev, NULL); | ||
1024 | csio_lnodes_exit(hw, 0); | ||
1025 | csio_hw_free(hw); | ||
1026 | err_pci_exit: | ||
1027 | csio_pci_exit(pdev, &bars); | ||
1028 | err: | ||
1029 | dev_err(&pdev->dev, "probe of device failed: %d\n", rv); | ||
1030 | return rv; | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1034 | * csio_remove_one - Remove one instance of the driver at this PCI function. | ||
1035 | * @pdev: PCI device | ||
1036 | * | ||
1037 | * Used during hotplug operation. | ||
1038 | */ | ||
1039 | static void __devexit | ||
1040 | csio_remove_one(struct pci_dev *pdev) | ||
1041 | { | ||
1042 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1043 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1044 | |||
1045 | csio_lnodes_block_request(hw); | ||
1046 | spin_lock_irq(&hw->lock); | ||
1047 | |||
1048 | /* Stops lnode, Rnode s/m | ||
1049 | * Quiesce IOs. | ||
1050 | * All sessions with remote ports are unregistered. | ||
1051 | */ | ||
1052 | csio_hw_stop(hw); | ||
1053 | spin_unlock_irq(&hw->lock); | ||
1054 | csio_lnodes_unblock_request(hw); | ||
1055 | |||
1056 | csio_lnodes_exit(hw, 0); | ||
1057 | csio_hw_free(hw); | ||
1058 | pci_set_drvdata(pdev, NULL); | ||
1059 | csio_pci_exit(pdev, &bars); | ||
1060 | } | ||
1061 | |||
1062 | /* | ||
1063 | * csio_pci_error_detected - PCI error was detected | ||
1064 | * @pdev: PCI device | ||
1065 | * | ||
1066 | */ | ||
1067 | static pci_ers_result_t | ||
1068 | csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | ||
1069 | { | ||
1070 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1071 | |||
1072 | csio_lnodes_block_request(hw); | ||
1073 | spin_lock_irq(&hw->lock); | ||
1074 | |||
1075 | /* Post PCI error detected evt to HW s/m | ||
1076 | * HW s/m handles this evt by quiescing IOs, unregisters rports | ||
1077 | * and finally takes the device to offline. | ||
1078 | */ | ||
1079 | csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED); | ||
1080 | spin_unlock_irq(&hw->lock); | ||
1081 | csio_lnodes_unblock_request(hw); | ||
1082 | csio_lnodes_exit(hw, 0); | ||
1083 | csio_intr_disable(hw, true); | ||
1084 | pci_disable_device(pdev); | ||
1085 | return state == pci_channel_io_perm_failure ? | ||
1086 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | ||
1087 | } | ||
1088 | |||
1089 | /* | ||
1090 | * csio_pci_slot_reset - PCI slot has been reset. | ||
1091 | * @pdev: PCI device | ||
1092 | * | ||
1093 | */ | ||
1094 | static pci_ers_result_t | ||
1095 | csio_pci_slot_reset(struct pci_dev *pdev) | ||
1096 | { | ||
1097 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1098 | int ready; | ||
1099 | |||
1100 | if (pci_enable_device(pdev)) { | ||
1101 | dev_err(&pdev->dev, "cannot re-enable device in slot reset\n"); | ||
1102 | return PCI_ERS_RESULT_DISCONNECT; | ||
1103 | } | ||
1104 | |||
1105 | pci_set_master(pdev); | ||
1106 | pci_restore_state(pdev); | ||
1107 | pci_save_state(pdev); | ||
1108 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
1109 | |||
1110 | /* Bring HW s/m to ready state. | ||
1111 | * but don't resume IOs. | ||
1112 | */ | ||
1113 | spin_lock_irq(&hw->lock); | ||
1114 | csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET); | ||
1115 | ready = csio_is_hw_ready(hw); | ||
1116 | spin_unlock_irq(&hw->lock); | ||
1117 | |||
1118 | if (ready) { | ||
1119 | return PCI_ERS_RESULT_RECOVERED; | ||
1120 | } else { | ||
1121 | dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n"); | ||
1122 | return PCI_ERS_RESULT_DISCONNECT; | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * csio_pci_resume - Resume normal operations | ||
1128 | * @pdev: PCI device | ||
1129 | * | ||
1130 | */ | ||
1131 | static void | ||
1132 | csio_pci_resume(struct pci_dev *pdev) | ||
1133 | { | ||
1134 | struct csio_hw *hw = pci_get_drvdata(pdev); | ||
1135 | struct csio_lnode *ln; | ||
1136 | int rv = 0; | ||
1137 | int i; | ||
1138 | |||
1139 | /* Bring the LINK UP and Resume IO */ | ||
1140 | |||
1141 | for (i = 0; i < hw->num_pports; i++) { | ||
1142 | ln = csio_shost_init(hw, &pdev->dev, true, NULL); | ||
1143 | if (!ln) { | ||
1144 | rv = -ENODEV; | ||
1145 | break; | ||
1146 | } | ||
1147 | /* Initialize portid */ | ||
1148 | ln->portid = hw->pport[i].portid; | ||
1149 | |||
1150 | spin_lock_irq(&hw->lock); | ||
1151 | if (csio_lnode_start(ln) != 0) | ||
1152 | rv = -ENODEV; | ||
1153 | spin_unlock_irq(&hw->lock); | ||
1154 | |||
1155 | if (rv) | ||
1156 | break; | ||
1157 | |||
1158 | csio_lnode_init_post(ln); | ||
1159 | } | ||
1160 | |||
1161 | if (rv) | ||
1162 | goto err_resume_exit; | ||
1163 | |||
1164 | return; | ||
1165 | |||
1166 | err_resume_exit: | ||
1167 | csio_lnodes_block_request(hw); | ||
1168 | spin_lock_irq(&hw->lock); | ||
1169 | csio_hw_stop(hw); | ||
1170 | spin_unlock_irq(&hw->lock); | ||
1171 | csio_lnodes_unblock_request(hw); | ||
1172 | csio_lnodes_exit(hw, 0); | ||
1173 | csio_hw_free(hw); | ||
1174 | dev_err(&pdev->dev, "resume of device failed: %d\n", rv); | ||
1175 | } | ||
1176 | |||
1177 | static struct pci_error_handlers csio_err_handler = { | ||
1178 | .error_detected = csio_pci_error_detected, | ||
1179 | .slot_reset = csio_pci_slot_reset, | ||
1180 | .resume = csio_pci_resume, | ||
1181 | }; | ||
1182 | |||
1183 | static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { | ||
1184 | CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */ | ||
1185 | CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ | ||
1186 | CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ | ||
1187 | CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ | ||
1188 | CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */ | ||
1189 | CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */ | ||
1190 | CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */ | ||
1191 | CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */ | ||
1192 | CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */ | ||
1193 | CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */ | ||
1194 | CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */ | ||
1195 | CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */ | ||
1196 | CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */ | ||
1197 | CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */ | ||
1198 | CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */ | ||
1199 | CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */ | ||
1200 | CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */ | ||
1201 | { 0, 0, 0, 0, 0, 0, 0 } | ||
1202 | }; | ||
1203 | |||
1204 | |||
1205 | static struct pci_driver csio_pci_driver = { | ||
1206 | .name = KBUILD_MODNAME, | ||
1207 | .driver = { | ||
1208 | .owner = THIS_MODULE, | ||
1209 | }, | ||
1210 | .id_table = csio_pci_tbl, | ||
1211 | .probe = csio_probe_one, | ||
1212 | .remove = csio_remove_one, | ||
1213 | .err_handler = &csio_err_handler, | ||
1214 | }; | ||
1215 | |||
1216 | /* | ||
1217 | * csio_init - Chelsio storage driver initialization function. | ||
1218 | * | ||
1219 | */ | ||
1220 | static int __init | ||
1221 | csio_init(void) | ||
1222 | { | ||
1223 | int rv = -ENOMEM; | ||
1224 | |||
1225 | pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION); | ||
1226 | |||
1227 | csio_dfs_init(); | ||
1228 | |||
1229 | csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs); | ||
1230 | if (!csio_fcoe_transport) | ||
1231 | goto err; | ||
1232 | |||
1233 | csio_fcoe_transport_vport = | ||
1234 | fc_attach_transport(&csio_fc_transport_vport_funcs); | ||
1235 | if (!csio_fcoe_transport_vport) | ||
1236 | goto err_vport; | ||
1237 | |||
1238 | rv = pci_register_driver(&csio_pci_driver); | ||
1239 | if (rv) | ||
1240 | goto err_pci; | ||
1241 | |||
1242 | return 0; | ||
1243 | |||
1244 | err_pci: | ||
1245 | fc_release_transport(csio_fcoe_transport_vport); | ||
1246 | err_vport: | ||
1247 | fc_release_transport(csio_fcoe_transport); | ||
1248 | err: | ||
1249 | csio_dfs_exit(); | ||
1250 | return rv; | ||
1251 | } | ||
1252 | |||
1253 | /* | ||
1254 | * csio_exit - Chelsio storage driver uninitialization . | ||
1255 | * | ||
1256 | * Function that gets called in the unload path. | ||
1257 | */ | ||
1258 | static void __exit | ||
1259 | csio_exit(void) | ||
1260 | { | ||
1261 | pci_unregister_driver(&csio_pci_driver); | ||
1262 | csio_dfs_exit(); | ||
1263 | fc_release_transport(csio_fcoe_transport_vport); | ||
1264 | fc_release_transport(csio_fcoe_transport); | ||
1265 | } | ||
1266 | |||
1267 | module_init(csio_init); | ||
1268 | module_exit(csio_exit); | ||
1269 | MODULE_AUTHOR(CSIO_DRV_AUTHOR); | ||
1270 | MODULE_DESCRIPTION(CSIO_DRV_DESC); | ||
1271 | MODULE_LICENSE(CSIO_DRV_LICENSE); | ||
1272 | MODULE_DEVICE_TABLE(pci, csio_pci_tbl); | ||
1273 | MODULE_VERSION(CSIO_DRV_VERSION); | ||
1274 | MODULE_FIRMWARE(CSIO_FW_FNAME); | ||