diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-17 17:45:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-17 17:45:48 -0500 |
commit | 6845a44a314c0c626549de373131bf108f9cc1f1 (patch) | |
tree | 9152a5c557434515dd7306d288e8f55ecc64652e /drivers/infiniband/hw/qib | |
parent | eee2a817df7c5a6e569f353f8be78cc1b3604bb6 (diff) | |
parent | 4790f4dc5f4326dab5d81ed8fb8c9473e620bdbb (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA: Update workqueue usage
RDMA/nes: Fix incorrect SFP+ link status detection on driver init
RDMA/nes: Fix SFP+ link down detection issue with switch port disable
RDMA/nes: Generate IB_EVENT_PORT_ERR/PORT_ACTIVE events
RDMA/nes: Fix bonding on iw_nes
IB/srp: Test only once whether iu allocation succeeded
IB/mlx4: Handle protocol field in multicast table
RDMA: Use vzalloc() to replace vmalloc()+memset(0)
mlx4_{core, ib, en}: Fix driver when sizeof (phys_addr_t) > sizeof (long)
IB/mthca: Fix driver when sizeof (phys_addr_t) > sizeof (long)
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7220.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qsfp.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 3 |
5 files changed, 21 insertions, 45 deletions
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 127a0d5069f0..de799f17cb9e 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) | |||
1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | 1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; |
1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
1694 | wake_up(&ppd->cpspec->autoneg_wait); | 1694 | wake_up(&ppd->cpspec->autoneg_wait); |
1695 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | 1695 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
1696 | flush_scheduled_work(); | ||
1697 | 1696 | ||
1698 | shutdown_7220_relock_poll(ppd->dd); | 1697 | shutdown_7220_relock_poll(ppd->dd); |
1699 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); | 1698 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); |
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd) | |||
3515 | 3514 | ||
3516 | toggle_7220_rclkrls(ppd->dd); | 3515 | toggle_7220_rclkrls(ppd->dd); |
3517 | /* 2 msec is minimum length of a poll cycle */ | 3516 | /* 2 msec is minimum length of a poll cycle */ |
3518 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | 3517 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
3519 | msecs_to_jiffies(2)); | 3518 | msecs_to_jiffies(2)); |
3520 | } | 3519 | } |
3521 | 3520 | ||
3522 | /* | 3521 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index abd409d592ef..50cceb3ab885 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) | |||
2406 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | 2406 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; |
2407 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 2407 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
2408 | wake_up(&ppd->cpspec->autoneg_wait); | 2408 | wake_up(&ppd->cpspec->autoneg_wait); |
2409 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | 2409 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
2410 | if (ppd->dd->cspec->r1) | 2410 | if (ppd->dd->cspec->r1) |
2411 | cancel_delayed_work(&ppd->cpspec->ipg_work); | 2411 | cancel_delayed_work_sync(&ppd->cpspec->ipg_work); |
2412 | flush_scheduled_work(); | ||
2413 | 2412 | ||
2414 | ppd->cpspec->chase_end = 0; | 2413 | ppd->cpspec->chase_end = 0; |
2415 | if (ppd->cpspec->chase_timer.data) /* if initted */ | 2414 | if (ppd->cpspec->chase_timer.data) /* if initted */ |
@@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) | |||
2706 | if (!(pins & mask)) { | 2705 | if (!(pins & mask)) { |
2707 | ++handled; | 2706 | ++handled; |
2708 | qd->t_insert = get_jiffies_64(); | 2707 | qd->t_insert = get_jiffies_64(); |
2709 | schedule_work(&qd->work); | 2708 | queue_work(ib_wq, &qd->work); |
2710 | } | 2709 | } |
2711 | } | 2710 | } |
2712 | } | 2711 | } |
@@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd) | |||
4990 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | 4989 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); |
4991 | qib_7322_mini_pcs_reset(ppd); | 4990 | qib_7322_mini_pcs_reset(ppd); |
4992 | /* 2 msec is minimum length of a poll cycle */ | 4991 | /* 2 msec is minimum length of a poll cycle */ |
4993 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | 4992 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
4994 | msecs_to_jiffies(2)); | 4993 | msecs_to_jiffies(2)); |
4995 | } | 4994 | } |
4996 | 4995 | ||
4997 | /* | 4996 | /* |
@@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) | |||
5121 | ib_free_send_mad(send_buf); | 5120 | ib_free_send_mad(send_buf); |
5122 | retry: | 5121 | retry: |
5123 | delay = 2 << ppd->cpspec->ipg_tries; | 5122 | delay = 2 << ppd->cpspec->ipg_tries; |
5124 | schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); | 5123 | queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, |
5124 | msecs_to_jiffies(delay)); | ||
5125 | } | 5125 | } |
5126 | 5126 | ||
5127 | /* | 5127 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 7896afbb9ce8..ffefb78b8949 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | |||
80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | 80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); |
81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | 81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); |
82 | 82 | ||
83 | struct workqueue_struct *qib_wq; | ||
84 | struct workqueue_struct *qib_cq_wq; | 83 | struct workqueue_struct *qib_cq_wq; |
85 | 84 | ||
86 | static void verify_interrupt(unsigned long); | 85 | static void verify_interrupt(unsigned long); |
@@ -270,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd) | |||
270 | struct page **pages; | 269 | struct page **pages; |
271 | dma_addr_t *addrs; | 270 | dma_addr_t *addrs; |
272 | 271 | ||
273 | pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | 272 | pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); |
274 | if (!pages) { | 273 | if (!pages) { |
275 | qib_dev_err(dd, "failed to allocate shadow page * " | 274 | qib_dev_err(dd, "failed to allocate shadow page * " |
276 | "array, no expected sends!\n"); | 275 | "array, no expected sends!\n"); |
277 | goto bail; | 276 | goto bail; |
278 | } | 277 | } |
279 | 278 | ||
280 | addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | 279 | addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); |
281 | if (!addrs) { | 280 | if (!addrs) { |
282 | qib_dev_err(dd, "failed to allocate shadow dma handle " | 281 | qib_dev_err(dd, "failed to allocate shadow dma handle " |
283 | "array, no expected sends!\n"); | 282 | "array, no expected sends!\n"); |
284 | goto bail_free; | 283 | goto bail_free; |
285 | } | 284 | } |
286 | 285 | ||
287 | memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | ||
288 | memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | ||
289 | |||
290 | dd->pageshadow = pages; | 286 | dd->pageshadow = pages; |
291 | dd->physshadow = addrs; | 287 | dd->physshadow = addrs; |
292 | return; | 288 | return; |
@@ -1047,24 +1043,10 @@ static int __init qlogic_ib_init(void) | |||
1047 | if (ret) | 1043 | if (ret) |
1048 | goto bail; | 1044 | goto bail; |
1049 | 1045 | ||
1050 | /* | ||
1051 | * We create our own workqueue mainly because we want to be | ||
1052 | * able to flush it when devices are being removed. We can't | ||
1053 | * use schedule_work()/flush_scheduled_work() because both | ||
1054 | * unregister_netdev() and linkwatch_event take the rtnl lock, | ||
1055 | * so flush_scheduled_work() can deadlock during device | ||
1056 | * removal. | ||
1057 | */ | ||
1058 | qib_wq = create_workqueue("qib"); | ||
1059 | if (!qib_wq) { | ||
1060 | ret = -ENOMEM; | ||
1061 | goto bail_dev; | ||
1062 | } | ||
1063 | |||
1064 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); | 1046 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); |
1065 | if (!qib_cq_wq) { | 1047 | if (!qib_cq_wq) { |
1066 | ret = -ENOMEM; | 1048 | ret = -ENOMEM; |
1067 | goto bail_wq; | 1049 | goto bail_dev; |
1068 | } | 1050 | } |
1069 | 1051 | ||
1070 | /* | 1052 | /* |
@@ -1094,8 +1076,6 @@ bail_unit: | |||
1094 | idr_destroy(&qib_unit_table); | 1076 | idr_destroy(&qib_unit_table); |
1095 | bail_cq_wq: | 1077 | bail_cq_wq: |
1096 | destroy_workqueue(qib_cq_wq); | 1078 | destroy_workqueue(qib_cq_wq); |
1097 | bail_wq: | ||
1098 | destroy_workqueue(qib_wq); | ||
1099 | bail_dev: | 1079 | bail_dev: |
1100 | qib_dev_cleanup(); | 1080 | qib_dev_cleanup(); |
1101 | bail: | 1081 | bail: |
@@ -1119,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) | |||
1119 | 1099 | ||
1120 | pci_unregister_driver(&qib_driver); | 1100 | pci_unregister_driver(&qib_driver); |
1121 | 1101 | ||
1122 | destroy_workqueue(qib_wq); | ||
1123 | destroy_workqueue(qib_cq_wq); | 1102 | destroy_workqueue(qib_cq_wq); |
1124 | 1103 | ||
1125 | qib_cpulist_count = 0; | 1104 | qib_cpulist_count = 0; |
@@ -1292,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1292 | 1271 | ||
1293 | if (qib_mini_init || initfail || ret) { | 1272 | if (qib_mini_init || initfail || ret) { |
1294 | qib_stop_timers(dd); | 1273 | qib_stop_timers(dd); |
1295 | flush_scheduled_work(); | 1274 | flush_workqueue(ib_wq); |
1296 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1275 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
1297 | dd->f_quiet_serdes(dd->pport + pidx); | 1276 | dd->f_quiet_serdes(dd->pport + pidx); |
1298 | if (qib_mini_init) | 1277 | if (qib_mini_init) |
@@ -1341,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) | |||
1341 | 1320 | ||
1342 | qib_stop_timers(dd); | 1321 | qib_stop_timers(dd); |
1343 | 1322 | ||
1344 | /* wait until all of our (qsfp) schedule_work() calls complete */ | 1323 | /* wait until all of our (qsfp) queue_work() calls complete */ |
1345 | flush_scheduled_work(); | 1324 | flush_workqueue(ib_wq); |
1346 | 1325 | ||
1347 | ret = qibfs_remove(dd); | 1326 | ret = qibfs_remove(dd); |
1348 | if (ret) | 1327 | if (ret) |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 35b3604b691d..3374a52232c1 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
485 | goto bail; | 485 | goto bail; |
486 | /* We see a module, but it may be unwise to look yet. Just schedule */ | 486 | /* We see a module, but it may be unwise to look yet. Just schedule */ |
487 | qd->t_insert = get_jiffies_64(); | 487 | qd->t_insert = get_jiffies_64(); |
488 | schedule_work(&qd->work); | 488 | queue_work(ib_wq, &qd->work); |
489 | bail: | 489 | bail: |
490 | return; | 490 | return; |
491 | } | 491 | } |
@@ -493,10 +493,9 @@ bail: | |||
493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) | 493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) |
494 | { | 494 | { |
495 | /* | 495 | /* |
496 | * There is nothing to do here for now. our | 496 | * There is nothing to do here for now. our work is scheduled |
497 | * work is scheduled with schedule_work(), and | 497 | * with queue_work(), and flush_workqueue() from remove_one |
498 | * flush_scheduled_work() from remove_one will | 498 | * will block until all work setup with queue_work() |
499 | * block until all work ssetup with schedule_work() | ||
500 | * completes. | 499 | * completes. |
501 | */ | 500 | */ |
502 | } | 501 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 63b22a9a7feb..95e5b47223b3 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp) | |||
805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); | 805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); |
806 | } | 806 | } |
807 | 807 | ||
808 | extern struct workqueue_struct *qib_wq; | ||
809 | extern struct workqueue_struct *qib_cq_wq; | 808 | extern struct workqueue_struct *qib_cq_wq; |
810 | 809 | ||
811 | /* | 810 | /* |
@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq; | |||
814 | static inline void qib_schedule_send(struct qib_qp *qp) | 813 | static inline void qib_schedule_send(struct qib_qp *qp) |
815 | { | 814 | { |
816 | if (qib_send_ok(qp)) | 815 | if (qib_send_ok(qp)) |
817 | queue_work(qib_wq, &qp->s_work); | 816 | queue_work(ib_wq, &qp->s_work); |
818 | } | 817 | } |
819 | 818 | ||
820 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | 819 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |