diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_init.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 33 |
1 files changed, 6 insertions, 27 deletions
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 7896afbb9ce..ffefb78b894 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | |||
80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | 80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); |
81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | 81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); |
82 | 82 | ||
83 | struct workqueue_struct *qib_wq; | ||
84 | struct workqueue_struct *qib_cq_wq; | 83 | struct workqueue_struct *qib_cq_wq; |
85 | 84 | ||
86 | static void verify_interrupt(unsigned long); | 85 | static void verify_interrupt(unsigned long); |
@@ -270,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd) | |||
270 | struct page **pages; | 269 | struct page **pages; |
271 | dma_addr_t *addrs; | 270 | dma_addr_t *addrs; |
272 | 271 | ||
273 | pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | 272 | pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); |
274 | if (!pages) { | 273 | if (!pages) { |
275 | qib_dev_err(dd, "failed to allocate shadow page * " | 274 | qib_dev_err(dd, "failed to allocate shadow page * " |
276 | "array, no expected sends!\n"); | 275 | "array, no expected sends!\n"); |
277 | goto bail; | 276 | goto bail; |
278 | } | 277 | } |
279 | 278 | ||
280 | addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | 279 | addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); |
281 | if (!addrs) { | 280 | if (!addrs) { |
282 | qib_dev_err(dd, "failed to allocate shadow dma handle " | 281 | qib_dev_err(dd, "failed to allocate shadow dma handle " |
283 | "array, no expected sends!\n"); | 282 | "array, no expected sends!\n"); |
284 | goto bail_free; | 283 | goto bail_free; |
285 | } | 284 | } |
286 | 285 | ||
287 | memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | ||
288 | memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | ||
289 | |||
290 | dd->pageshadow = pages; | 286 | dd->pageshadow = pages; |
291 | dd->physshadow = addrs; | 287 | dd->physshadow = addrs; |
292 | return; | 288 | return; |
@@ -1047,24 +1043,10 @@ static int __init qlogic_ib_init(void) | |||
1047 | if (ret) | 1043 | if (ret) |
1048 | goto bail; | 1044 | goto bail; |
1049 | 1045 | ||
1050 | /* | ||
1051 | * We create our own workqueue mainly because we want to be | ||
1052 | * able to flush it when devices are being removed. We can't | ||
1053 | * use schedule_work()/flush_scheduled_work() because both | ||
1054 | * unregister_netdev() and linkwatch_event take the rtnl lock, | ||
1055 | * so flush_scheduled_work() can deadlock during device | ||
1056 | * removal. | ||
1057 | */ | ||
1058 | qib_wq = create_workqueue("qib"); | ||
1059 | if (!qib_wq) { | ||
1060 | ret = -ENOMEM; | ||
1061 | goto bail_dev; | ||
1062 | } | ||
1063 | |||
1064 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); | 1046 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); |
1065 | if (!qib_cq_wq) { | 1047 | if (!qib_cq_wq) { |
1066 | ret = -ENOMEM; | 1048 | ret = -ENOMEM; |
1067 | goto bail_wq; | 1049 | goto bail_dev; |
1068 | } | 1050 | } |
1069 | 1051 | ||
1070 | /* | 1052 | /* |
@@ -1094,8 +1076,6 @@ bail_unit: | |||
1094 | idr_destroy(&qib_unit_table); | 1076 | idr_destroy(&qib_unit_table); |
1095 | bail_cq_wq: | 1077 | bail_cq_wq: |
1096 | destroy_workqueue(qib_cq_wq); | 1078 | destroy_workqueue(qib_cq_wq); |
1097 | bail_wq: | ||
1098 | destroy_workqueue(qib_wq); | ||
1099 | bail_dev: | 1079 | bail_dev: |
1100 | qib_dev_cleanup(); | 1080 | qib_dev_cleanup(); |
1101 | bail: | 1081 | bail: |
@@ -1119,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) | |||
1119 | 1099 | ||
1120 | pci_unregister_driver(&qib_driver); | 1100 | pci_unregister_driver(&qib_driver); |
1121 | 1101 | ||
1122 | destroy_workqueue(qib_wq); | ||
1123 | destroy_workqueue(qib_cq_wq); | 1102 | destroy_workqueue(qib_cq_wq); |
1124 | 1103 | ||
1125 | qib_cpulist_count = 0; | 1104 | qib_cpulist_count = 0; |
@@ -1292,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1292 | 1271 | ||
1293 | if (qib_mini_init || initfail || ret) { | 1272 | if (qib_mini_init || initfail || ret) { |
1294 | qib_stop_timers(dd); | 1273 | qib_stop_timers(dd); |
1295 | flush_scheduled_work(); | 1274 | flush_workqueue(ib_wq); |
1296 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1275 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
1297 | dd->f_quiet_serdes(dd->pport + pidx); | 1276 | dd->f_quiet_serdes(dd->pport + pidx); |
1298 | if (qib_mini_init) | 1277 | if (qib_mini_init) |
@@ -1341,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) | |||
1341 | 1320 | ||
1342 | qib_stop_timers(dd); | 1321 | qib_stop_timers(dd); |
1343 | 1322 | ||
1344 | /* wait until all of our (qsfp) schedule_work() calls complete */ | 1323 | /* wait until all of our (qsfp) queue_work() calls complete */ |
1345 | flush_scheduled_work(); | 1324 | flush_workqueue(ib_wq); |
1346 | 1325 | ||
1347 | ret = qibfs_remove(dd); | 1326 | ret = qibfs_remove(dd); |
1348 | if (ret) | 1327 | if (ret) |