diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_init.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 42 |
1 files changed, 12 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index f1d16d3a01f6..a01f3fce8eb3 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | |||
80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | 80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); |
81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | 81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); |
82 | 82 | ||
83 | struct workqueue_struct *qib_wq; | ||
84 | struct workqueue_struct *qib_cq_wq; | 83 | struct workqueue_struct *qib_cq_wq; |
85 | 84 | ||
86 | static void verify_interrupt(unsigned long); | 85 | static void verify_interrupt(unsigned long); |
@@ -92,9 +91,11 @@ unsigned long *qib_cpulist; | |||
92 | /* set number of contexts we'll actually use */ | 91 | /* set number of contexts we'll actually use */ |
93 | void qib_set_ctxtcnt(struct qib_devdata *dd) | 92 | void qib_set_ctxtcnt(struct qib_devdata *dd) |
94 | { | 93 | { |
95 | if (!qib_cfgctxts) | 94 | if (!qib_cfgctxts) { |
96 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); | 95 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); |
97 | else if (qib_cfgctxts < dd->num_pports) | 96 | if (dd->cfgctxts > dd->ctxtcnt) |
97 | dd->cfgctxts = dd->ctxtcnt; | ||
98 | } else if (qib_cfgctxts < dd->num_pports) | ||
98 | dd->cfgctxts = dd->ctxtcnt; | 99 | dd->cfgctxts = dd->ctxtcnt; |
99 | else if (qib_cfgctxts <= dd->ctxtcnt) | 100 | else if (qib_cfgctxts <= dd->ctxtcnt) |
100 | dd->cfgctxts = qib_cfgctxts; | 101 | dd->cfgctxts = qib_cfgctxts; |
@@ -268,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd) | |||
268 | struct page **pages; | 269 | struct page **pages; |
269 | dma_addr_t *addrs; | 270 | dma_addr_t *addrs; |
270 | 271 | ||
271 | pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | 272 | pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); |
272 | if (!pages) { | 273 | if (!pages) { |
273 | qib_dev_err(dd, "failed to allocate shadow page * " | 274 | qib_dev_err(dd, "failed to allocate shadow page * " |
274 | "array, no expected sends!\n"); | 275 | "array, no expected sends!\n"); |
275 | goto bail; | 276 | goto bail; |
276 | } | 277 | } |
277 | 278 | ||
278 | addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | 279 | addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); |
279 | if (!addrs) { | 280 | if (!addrs) { |
280 | qib_dev_err(dd, "failed to allocate shadow dma handle " | 281 | qib_dev_err(dd, "failed to allocate shadow dma handle " |
281 | "array, no expected sends!\n"); | 282 | "array, no expected sends!\n"); |
282 | goto bail_free; | 283 | goto bail_free; |
283 | } | 284 | } |
284 | 285 | ||
285 | memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | ||
286 | memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | ||
287 | |||
288 | dd->pageshadow = pages; | 286 | dd->pageshadow = pages; |
289 | dd->physshadow = addrs; | 287 | dd->physshadow = addrs; |
290 | return; | 288 | return; |
@@ -348,7 +346,7 @@ done: | |||
348 | * @dd: the qlogic_ib device | 346 | * @dd: the qlogic_ib device |
349 | * | 347 | * |
350 | * sanity check at least some of the values after reset, and | 348 | * sanity check at least some of the values after reset, and |
351 | * ensure no receive or transmit (explictly, in case reset | 349 | * ensure no receive or transmit (explicitly, in case reset |
352 | * failed | 350 | * failed |
353 | */ | 351 | */ |
354 | static int init_after_reset(struct qib_devdata *dd) | 352 | static int init_after_reset(struct qib_devdata *dd) |
@@ -1045,24 +1043,10 @@ static int __init qlogic_ib_init(void) | |||
1045 | if (ret) | 1043 | if (ret) |
1046 | goto bail; | 1044 | goto bail; |
1047 | 1045 | ||
1048 | /* | ||
1049 | * We create our own workqueue mainly because we want to be | ||
1050 | * able to flush it when devices are being removed. We can't | ||
1051 | * use schedule_work()/flush_scheduled_work() because both | ||
1052 | * unregister_netdev() and linkwatch_event take the rtnl lock, | ||
1053 | * so flush_scheduled_work() can deadlock during device | ||
1054 | * removal. | ||
1055 | */ | ||
1056 | qib_wq = create_workqueue("qib"); | ||
1057 | if (!qib_wq) { | ||
1058 | ret = -ENOMEM; | ||
1059 | goto bail_dev; | ||
1060 | } | ||
1061 | |||
1062 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); | 1046 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); |
1063 | if (!qib_cq_wq) { | 1047 | if (!qib_cq_wq) { |
1064 | ret = -ENOMEM; | 1048 | ret = -ENOMEM; |
1065 | goto bail_wq; | 1049 | goto bail_dev; |
1066 | } | 1050 | } |
1067 | 1051 | ||
1068 | /* | 1052 | /* |
@@ -1092,8 +1076,6 @@ bail_unit: | |||
1092 | idr_destroy(&qib_unit_table); | 1076 | idr_destroy(&qib_unit_table); |
1093 | bail_cq_wq: | 1077 | bail_cq_wq: |
1094 | destroy_workqueue(qib_cq_wq); | 1078 | destroy_workqueue(qib_cq_wq); |
1095 | bail_wq: | ||
1096 | destroy_workqueue(qib_wq); | ||
1097 | bail_dev: | 1079 | bail_dev: |
1098 | qib_dev_cleanup(); | 1080 | qib_dev_cleanup(); |
1099 | bail: | 1081 | bail: |
@@ -1117,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) | |||
1117 | 1099 | ||
1118 | pci_unregister_driver(&qib_driver); | 1100 | pci_unregister_driver(&qib_driver); |
1119 | 1101 | ||
1120 | destroy_workqueue(qib_wq); | ||
1121 | destroy_workqueue(qib_cq_wq); | 1102 | destroy_workqueue(qib_cq_wq); |
1122 | 1103 | ||
1123 | qib_cpulist_count = 0; | 1104 | qib_cpulist_count = 0; |
@@ -1243,6 +1224,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1243 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | 1224 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " |
1244 | "work if CONFIG_PCI_MSI is not enabled\n", | 1225 | "work if CONFIG_PCI_MSI is not enabled\n", |
1245 | ent->device); | 1226 | ent->device); |
1227 | dd = ERR_PTR(-ENODEV); | ||
1246 | #endif | 1228 | #endif |
1247 | break; | 1229 | break; |
1248 | 1230 | ||
@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1289 | 1271 | ||
1290 | if (qib_mini_init || initfail || ret) { | 1272 | if (qib_mini_init || initfail || ret) { |
1291 | qib_stop_timers(dd); | 1273 | qib_stop_timers(dd); |
1292 | flush_scheduled_work(); | 1274 | flush_workqueue(ib_wq); |
1293 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1275 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
1294 | dd->f_quiet_serdes(dd->pport + pidx); | 1276 | dd->f_quiet_serdes(dd->pport + pidx); |
1295 | if (qib_mini_init) | 1277 | if (qib_mini_init) |
@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) | |||
1338 | 1320 | ||
1339 | qib_stop_timers(dd); | 1321 | qib_stop_timers(dd); |
1340 | 1322 | ||
1341 | /* wait until all of our (qsfp) schedule_work() calls complete */ | 1323 | /* wait until all of our (qsfp) queue_work() calls complete */ |
1342 | flush_scheduled_work(); | 1324 | flush_workqueue(ib_wq); |
1343 | 1325 | ||
1344 | ret = qibfs_remove(dd); | 1326 | ret = qibfs_remove(dd); |
1345 | if (ret) | 1327 | if (ret) |