aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJan-Bernd Themann <ossthema@de.ibm.com>2007-10-08 10:01:33 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:55:27 -0400
commit3bf76b81608479a10077bd6b55972d40db782067 (patch)
treef6a4e251c1db5c3723e90c4c6dbff82cd5e356b4 /drivers/net
parentddfce6bb43c6bf1c9956e7a65ce1b2e19a156bd2 (diff)
ehea: use kernel event queue
eHEA recovery and DLPAR functions are called seldomly. The eHEA workqueues are replaced by the kernel event queue. Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ehea/ehea.h3
-rw-r--r--drivers/net/ehea/ehea_main.c28
-rw-r--r--drivers/net/ehea/ehea_qmr.c3
3 files changed, 10 insertions, 24 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 30220894b01f..ac21526b6de8 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0077" 43#define DRV_VERSION "EHEA_0078"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -391,7 +391,6 @@ struct ehea_adapter {
391 struct ibmebus_dev *ebus_dev; 391 struct ibmebus_dev *ebus_dev;
392 struct ehea_port *port[EHEA_MAX_PORTS]; 392 struct ehea_port *port[EHEA_MAX_PORTS];
393 struct ehea_eq *neq; /* notification event queue */ 393 struct ehea_eq *neq; /* notification event queue */
394 struct workqueue_struct *ehea_wq;
395 struct tasklet_struct neq_tasklet; 394 struct tasklet_struct neq_tasklet;
396 struct ehea_mr mr; 395 struct ehea_mr mr;
397 u32 pd; /* protection domain */ 396 u32 pd; /* protection domain */
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 5bc0a1530eb7..2ba57e6ace4d 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -94,7 +94,6 @@ MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
94static int port_name_cnt = 0; 94static int port_name_cnt = 0;
95static LIST_HEAD(adapter_list); 95static LIST_HEAD(adapter_list);
96u64 ehea_driver_flags = 0; 96u64 ehea_driver_flags = 0;
97struct workqueue_struct *ehea_driver_wq;
98struct work_struct ehea_rereg_mr_task; 97struct work_struct ehea_rereg_mr_task;
99 98
100struct semaphore dlpar_mem_lock; 99struct semaphore dlpar_mem_lock;
@@ -421,7 +420,7 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
421 420
422 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 421 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
423 ehea_error("Critical receive error. Resetting port."); 422 ehea_error("Critical receive error. Resetting port.");
424 queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task); 423 schedule_work(&pr->port->reset_task);
425 return 1; 424 return 1;
426 } 425 }
427 426
@@ -596,8 +595,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
596 ehea_error("Send Completion Error: Resetting port"); 595 ehea_error("Send Completion Error: Resetting port");
597 if (netif_msg_tx_err(pr->port)) 596 if (netif_msg_tx_err(pr->port))
598 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 597 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
599 queue_work(pr->port->adapter->ehea_wq, 598 schedule_work(&pr->port->reset_task);
600 &pr->port->reset_task);
601 break; 599 break;
602 } 600 }
603 601
@@ -716,7 +714,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
716 eqe = ehea_poll_eq(port->qp_eq); 714 eqe = ehea_poll_eq(port->qp_eq);
717 } 715 }
718 716
719 queue_work(port->adapter->ehea_wq, &port->reset_task); 717 schedule_work(&port->reset_task);
720 718
721 return IRQ_HANDLED; 719 return IRQ_HANDLED;
722} 720}
@@ -2395,7 +2393,7 @@ static int ehea_stop(struct net_device *dev)
2395 if (netif_msg_ifdown(port)) 2393 if (netif_msg_ifdown(port))
2396 ehea_info("disabling port %s", dev->name); 2394 ehea_info("disabling port %s", dev->name);
2397 2395
2398 flush_workqueue(port->adapter->ehea_wq); 2396 flush_scheduled_work();
2399 down(&port->port_lock); 2397 down(&port->port_lock);
2400 netif_stop_queue(dev); 2398 netif_stop_queue(dev);
2401 ret = ehea_down(dev); 2399 ret = ehea_down(dev);
@@ -2710,7 +2708,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
2710 2708
2711 if (netif_carrier_ok(dev) && 2709 if (netif_carrier_ok(dev) &&
2712 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 2710 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2713 queue_work(port->adapter->ehea_wq, &port->reset_task); 2711 schedule_work(&port->reset_task);
2714} 2712}
2715 2713
2716int ehea_sense_adapter_attr(struct ehea_adapter *adapter) 2714int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
@@ -3243,15 +3241,9 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
3243 goto out_kill_eq; 3241 goto out_kill_eq;
3244 } 3242 }
3245 3243
3246 adapter->ehea_wq = create_workqueue("ehea_wq");
3247 if (!adapter->ehea_wq) {
3248 ret = -EIO;
3249 goto out_free_irq;
3250 }
3251
3252 ret = ehea_create_device_sysfs(dev); 3244 ret = ehea_create_device_sysfs(dev);
3253 if (ret) 3245 if (ret)
3254 goto out_kill_wq; 3246 goto out_free_irq;
3255 3247
3256 ret = ehea_setup_ports(adapter); 3248 ret = ehea_setup_ports(adapter);
3257 if (ret) { 3249 if (ret) {
@@ -3265,9 +3257,6 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
3265out_rem_dev_sysfs: 3257out_rem_dev_sysfs:
3266 ehea_remove_device_sysfs(dev); 3258 ehea_remove_device_sysfs(dev);
3267 3259
3268out_kill_wq:
3269 destroy_workqueue(adapter->ehea_wq);
3270
3271out_free_irq: 3260out_free_irq:
3272 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); 3261 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
3273 3262
@@ -3293,7 +3282,7 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
3293 3282
3294 ehea_remove_device_sysfs(dev); 3283 ehea_remove_device_sysfs(dev);
3295 3284
3296 destroy_workqueue(adapter->ehea_wq); 3285 flush_scheduled_work();
3297 3286
3298 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); 3287 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
3299 tasklet_kill(&adapter->neq_tasklet); 3288 tasklet_kill(&adapter->neq_tasklet);
@@ -3351,7 +3340,6 @@ int __init ehea_module_init(void)
3351 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", 3340 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3352 DRV_VERSION); 3341 DRV_VERSION);
3353 3342
3354 ehea_driver_wq = create_workqueue("ehea_driver_wq");
3355 3343
3356 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); 3344 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3357 sema_init(&dlpar_mem_lock, 1); 3345 sema_init(&dlpar_mem_lock, 1);
@@ -3385,7 +3373,7 @@ out:
3385 3373
3386static void __exit ehea_module_exit(void) 3374static void __exit ehea_module_exit(void)
3387{ 3375{
3388 destroy_workqueue(ehea_driver_wq); 3376 flush_scheduled_work();
3389 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3377 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3390 ibmebus_unregister_driver(&ehea_driver); 3378 ibmebus_unregister_driver(&ehea_driver);
3391 ehea_destroy_busmap(); 3379 ehea_destroy_busmap();
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 329a25248d75..83b76432b41a 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -34,7 +34,6 @@
34 34
35struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 35struct ehea_busmap ehea_bmap = { 0, 0, NULL };
36extern u64 ehea_driver_flags; 36extern u64 ehea_driver_flags;
37extern struct workqueue_struct *ehea_driver_wq;
38extern struct work_struct ehea_rereg_mr_task; 37extern struct work_struct ehea_rereg_mr_task;
39 38
40 39
@@ -618,7 +617,7 @@ u64 ehea_map_vaddr(void *caddr)
618 617
619 if (unlikely(mapped_addr == -1)) 618 if (unlikely(mapped_addr == -1))
620 if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 619 if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
621 queue_work(ehea_driver_wq, &ehea_rereg_mr_task); 620 schedule_work(&ehea_rereg_mr_task);
622 621
623 return mapped_addr; 622 return mapped_addr;
624} 623}