aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptlan.c29
-rw-r--r--drivers/message/fusion/mptsas.c25
-rw-r--r--drivers/message/fusion/mptspi.c14
-rw-r--r--drivers/message/i2o/driver.c2
-rw-r--r--drivers/message/i2o/exec-osm.c13
-rw-r--r--drivers/message/i2o/i2o_block.c15
-rw-r--r--drivers/message/i2o/i2o_block.h2
8 files changed, 67 insertions, 47 deletions
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1dd491773150..ef2b55e19910 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1018,9 +1018,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
1018} 1018}
1019 1019
1020static void 1020static void
1021mptfc_setup_reset(void *arg) 1021mptfc_setup_reset(struct work_struct *work)
1022{ 1022{
1023 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1023 MPT_ADAPTER *ioc =
1024 container_of(work, MPT_ADAPTER, fc_setup_reset_work);
1024 u64 pn; 1025 u64 pn;
1025 struct mptfc_rport_info *ri; 1026 struct mptfc_rport_info *ri;
1026 1027
@@ -1043,9 +1044,10 @@ mptfc_setup_reset(void *arg)
1043} 1044}
1044 1045
1045static void 1046static void
1046mptfc_rescan_devices(void *arg) 1047mptfc_rescan_devices(struct work_struct *work)
1047{ 1048{
1048 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1049 MPT_ADAPTER *ioc =
1050 container_of(work, MPT_ADAPTER, fc_rescan_work);
1049 int ii; 1051 int ii;
1050 u64 pn; 1052 u64 pn;
1051 struct mptfc_rport_info *ri; 1053 struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1154 } 1156 }
1155 1157
1156 spin_lock_init(&ioc->fc_rescan_work_lock); 1158 spin_lock_init(&ioc->fc_rescan_work_lock);
1157 INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); 1159 INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
1158 INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); 1160 INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
1159 1161
1160 spin_lock_irqsave(&ioc->FreeQlock, flags); 1162 spin_lock_irqsave(&ioc->FreeQlock, flags);
1161 1163
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 314c3a27585d..b7c4407c5e3f 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -111,7 +111,8 @@ struct mpt_lan_priv {
111 u32 total_received; 111 u32 total_received;
112 struct net_device_stats stats; /* Per device statistics */ 112 struct net_device_stats stats; /* Per device statistics */
113 113
114 struct work_struct post_buckets_task; 114 struct delayed_work post_buckets_task;
115 struct net_device *dev;
115 unsigned long post_buckets_active; 116 unsigned long post_buckets_active;
116}; 117};
117 118
@@ -132,7 +133,7 @@ static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
132static int mpt_lan_open(struct net_device *dev); 133static int mpt_lan_open(struct net_device *dev);
133static int mpt_lan_reset(struct net_device *dev); 134static int mpt_lan_reset(struct net_device *dev);
134static int mpt_lan_close(struct net_device *dev); 135static int mpt_lan_close(struct net_device *dev);
135static void mpt_lan_post_receive_buckets(void *dev_id); 136static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
136static void mpt_lan_wake_post_buckets_task(struct net_device *dev, 137static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
137 int priority); 138 int priority);
138static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); 139static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
345 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; 346 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
346 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 347 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
347 } else { 348 } else {
348 mpt_lan_post_receive_buckets(dev); 349 mpt_lan_post_receive_buckets(priv);
349 netif_wake_queue(dev); 350 netif_wake_queue(dev);
350 } 351 }
351 352
@@ -441,7 +442,7 @@ mpt_lan_open(struct net_device *dev)
441 442
442 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); 443 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
443 444
444 mpt_lan_post_receive_buckets(dev); 445 mpt_lan_post_receive_buckets(priv);
445 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", 446 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
446 IOC_AND_NETDEV_NAMES_s_s(dev)); 447 IOC_AND_NETDEV_NAMES_s_s(dev));
447 448
@@ -854,7 +855,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
854 855
855 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { 856 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
856 if (priority) { 857 if (priority) {
857 schedule_work(&priv->post_buckets_task); 858 schedule_delayed_work(&priv->post_buckets_task, 0);
858 } else { 859 } else {
859 schedule_delayed_work(&priv->post_buckets_task, 1); 860 schedule_delayed_work(&priv->post_buckets_task, 1);
860 dioprintk((KERN_INFO MYNAM ": post_buckets queued on " 861 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@ mpt_lan_receive_post_reply(struct net_device *dev,
1188/* Simple SGE's only at the moment */ 1189/* Simple SGE's only at the moment */
1189 1190
1190static void 1191static void
1191mpt_lan_post_receive_buckets(void *dev_id) 1192mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1192{ 1193{
1193 struct net_device *dev = dev_id; 1194 struct net_device *dev = priv->dev;
1194 struct mpt_lan_priv *priv = dev->priv;
1195 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 1195 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1196 MPT_FRAME_HDR *mf; 1196 MPT_FRAME_HDR *mf;
1197 LANReceivePostRequest_t *pRecvReq; 1197 LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@ out:
1335 clear_bit(0, &priv->post_buckets_active); 1335 clear_bit(0, &priv->post_buckets_active);
1336} 1336}
1337 1337
1338static void
1339mpt_lan_post_receive_buckets_work(struct work_struct *work)
1340{
1341 mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1342 post_buckets_task.work));
1343}
1344
1338/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1345/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1339static struct net_device * 1346static struct net_device *
1340mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) 1347mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1350 1357
1351 priv = netdev_priv(dev); 1358 priv = netdev_priv(dev);
1352 1359
1360 priv->dev = dev;
1353 priv->mpt_dev = mpt_dev; 1361 priv->mpt_dev = mpt_dev;
1354 priv->pnum = pnum; 1362 priv->pnum = pnum;
1355 1363
1356 memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); 1364 memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
1357 INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); 1365 INIT_DELAYED_WORK(&priv->post_buckets_task,
1366 mpt_lan_post_receive_buckets_work);
1358 priv->post_buckets_active = 0; 1367 priv->post_buckets_active = 0;
1359 1368
1360 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", 1369 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b752a479f6db..4f0c530e47b0 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2006,9 +2006,10 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc)
2006 *(Mutex LOCKED) 2006 *(Mutex LOCKED)
2007 */ 2007 */
2008static void 2008static void
2009mptsas_discovery_work(void * arg) 2009mptsas_discovery_work(struct work_struct *work)
2010{ 2010{
2011 struct mptsas_discovery_event *ev = arg; 2011 struct mptsas_discovery_event *ev =
2012 container_of(work, struct mptsas_discovery_event, work);
2012 MPT_ADAPTER *ioc = ev->ioc; 2013 MPT_ADAPTER *ioc = ev->ioc;
2013 2014
2014 mutex_lock(&ioc->sas_discovery_mutex); 2015 mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
2068 * Work queue thread to clear the persitency table 2069 * Work queue thread to clear the persitency table
2069 */ 2070 */
2070static void 2071static void
2071mptsas_persist_clear_table(void * arg) 2072mptsas_persist_clear_table(struct work_struct *work)
2072{ 2073{
2073 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 2074 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2074 2075
2075 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); 2076 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2076} 2077}
@@ -2093,9 +2094,10 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
2093 * Work queue thread to handle SAS hotplug events 2094 * Work queue thread to handle SAS hotplug events
2094 */ 2095 */
2095static void 2096static void
2096mptsas_hotplug_work(void *arg) 2097mptsas_hotplug_work(struct work_struct *work)
2097{ 2098{
2098 struct mptsas_hotplug_event *ev = arg; 2099 struct mptsas_hotplug_event *ev =
2100 container_of(work, struct mptsas_hotplug_event, work);
2099 MPT_ADAPTER *ioc = ev->ioc; 2101 MPT_ADAPTER *ioc = ev->ioc;
2100 struct mptsas_phyinfo *phy_info; 2102 struct mptsas_phyinfo *phy_info;
2101 struct sas_rphy *rphy; 2103 struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
2341 break; 2343 break;
2342 } 2344 }
2343 2345
2344 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2346 INIT_WORK(&ev->work, mptsas_hotplug_work);
2345 ev->ioc = ioc; 2347 ev->ioc = ioc;
2346 ev->handle = le16_to_cpu(sas_event_data->DevHandle); 2348 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2347 ev->parent_handle = 2349 ev->parent_handle =
@@ -2366,7 +2368,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
2366 * Persistent table is full. 2368 * Persistent table is full.
2367 */ 2369 */
2368 INIT_WORK(&ioc->sas_persist_task, 2370 INIT_WORK(&ioc->sas_persist_task,
2369 mptsas_persist_clear_table, (void *)ioc); 2371 mptsas_persist_clear_table);
2370 schedule_work(&ioc->sas_persist_task); 2372 schedule_work(&ioc->sas_persist_task);
2371 break; 2373 break;
2372 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 2374 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
2395 return; 2397 return;
2396 } 2398 }
2397 2399
2398 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2400 INIT_WORK(&ev->work, mptsas_hotplug_work);
2399 ev->ioc = ioc; 2401 ev->ioc = ioc;
2400 ev->id = raid_event_data->VolumeID; 2402 ev->id = raid_event_data->VolumeID;
2401 ev->event_type = MPTSAS_IGNORE_EVENT; 2403 ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
2474 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2476 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2475 if (!ev) 2477 if (!ev)
2476 return; 2478 return;
2477 INIT_WORK(&ev->work, mptsas_discovery_work, ev); 2479 INIT_WORK(&ev->work, mptsas_discovery_work);
2478 ev->ioc = ioc; 2480 ev->ioc = ioc;
2479 schedule_work(&ev->work); 2481 schedule_work(&ev->work);
2480}; 2482};
@@ -2511,8 +2513,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
2511 break; 2513 break;
2512 case MPI_EVENT_PERSISTENT_TABLE_FULL: 2514 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2513 INIT_WORK(&ioc->sas_persist_task, 2515 INIT_WORK(&ioc->sas_persist_task,
2514 mptsas_persist_clear_table, 2516 mptsas_persist_clear_table);
2515 (void *)ioc);
2516 schedule_work(&ioc->sas_persist_task); 2517 schedule_work(&ioc->sas_persist_task);
2517 break; 2518 break;
2518 case MPI_EVENT_SAS_DISCOVERY: 2519 case MPI_EVENT_SAS_DISCOVERY:
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e4cc3dd5fc9f..f422c0d0621c 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -646,9 +646,10 @@ struct work_queue_wrapper {
646 int disk; 646 int disk;
647}; 647};
648 648
649static void mpt_work_wrapper(void *data) 649static void mpt_work_wrapper(struct work_struct *work)
650{ 650{
651 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 651 struct work_queue_wrapper *wqw =
652 container_of(work, struct work_queue_wrapper, work);
652 struct _MPT_SCSI_HOST *hd = wqw->hd; 653 struct _MPT_SCSI_HOST *hd = wqw->hd;
653 struct Scsi_Host *shost = hd->ioc->sh; 654 struct Scsi_Host *shost = hd->ioc->sh;
654 struct scsi_device *sdev; 655 struct scsi_device *sdev;
@@ -695,7 +696,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
695 disk); 696 disk);
696 return; 697 return;
697 } 698 }
698 INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); 699 INIT_WORK(&wqw->work, mpt_work_wrapper);
699 wqw->hd = hd; 700 wqw->hd = hd;
700 wqw->disk = disk; 701 wqw->disk = disk;
701 702
@@ -784,9 +785,10 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
784 * renegotiate for a given target 785 * renegotiate for a given target
785 */ 786 */
786static void 787static void
787mptspi_dv_renegotiate_work(void *data) 788mptspi_dv_renegotiate_work(struct work_struct *work)
788{ 789{
789 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 790 struct work_queue_wrapper *wqw =
791 container_of(work, struct work_queue_wrapper, work);
790 struct _MPT_SCSI_HOST *hd = wqw->hd; 792 struct _MPT_SCSI_HOST *hd = wqw->hd;
791 struct scsi_device *sdev; 793 struct scsi_device *sdev;
792 794
@@ -804,7 +806,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
804 if (!wqw) 806 if (!wqw)
805 return; 807 return;
806 808
807 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); 809 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
808 wqw->hd = hd; 810 wqw->hd = hd;
809 811
810 schedule_work(&wqw->work); 812 schedule_work(&wqw->work);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 64130227574f..7fc7399bd2ec 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -232,7 +232,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
232 break; 232 break;
233 } 233 }
234 234
235 INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); 235 INIT_WORK(&evt->work, drv->event);
236 queue_work(drv->event_queue, &evt->work); 236 queue_work(drv->event_queue, &evt->work);
237 return 1; 237 return 1;
238 } 238 }
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index a2350640384b..9e529d8dd5cb 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -371,8 +371,10 @@ static int i2o_exec_remove(struct device *dev)
371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY 371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
372 * again, otherwise send LCT NOTIFY to get informed on next LCT change. 372 * again, otherwise send LCT NOTIFY to get informed on next LCT change.
373 */ 373 */
374static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) 374static void i2o_exec_lct_modified(struct work_struct *_work)
375{ 375{
376 struct i2o_exec_lct_notify_work *work =
377 container_of(_work, struct i2o_exec_lct_notify_work, work);
376 u32 change_ind = 0; 378 u32 change_ind = 0;
377 struct i2o_controller *c = work->c; 379 struct i2o_controller *c = work->c;
378 380
@@ -439,8 +441,7 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
439 441
440 work->c = c; 442 work->c = c;
441 443
442 INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, 444 INIT_WORK(&work->work, i2o_exec_lct_modified);
443 work);
444 queue_work(i2o_exec_driver.event_queue, &work->work); 445 queue_work(i2o_exec_driver.event_queue, &work->work);
445 return 1; 446 return 1;
446 } 447 }
@@ -460,13 +461,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
460 461
461/** 462/**
462 * i2o_exec_event - Event handling function 463 * i2o_exec_event - Event handling function
463 * @evt: Event which occurs 464 * @work: Work item in occurring event
464 * 465 *
465 * Handles events send by the Executive device. At the moment does not do 466 * Handles events send by the Executive device. At the moment does not do
466 * anything useful. 467 * anything useful.
467 */ 468 */
468static void i2o_exec_event(struct i2o_event *evt) 469static void i2o_exec_event(struct work_struct *work)
469{ 470{
471 struct i2o_event *evt = container_of(work, struct i2o_event, work);
472
470 if (likely(evt->i2o_dev)) 473 if (likely(evt->i2o_dev))
471 osm_debug("Event received from device: %d\n", 474 osm_debug("Event received from device: %d\n",
472 evt->i2o_dev->lct_data.tid); 475 evt->i2o_dev->lct_data.tid);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index eaba81bf2eca..70ae00253321 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -419,16 +419,18 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
419 419
420/** 420/**
421 * i2o_block_delayed_request_fn - delayed request queue function 421 * i2o_block_delayed_request_fn - delayed request queue function
422 * delayed_request: the delayed request with the queue to start 422 * @work: the delayed request with the queue to start
423 * 423 *
424 * If the request queue is stopped for a disk, and there is no open 424 * If the request queue is stopped for a disk, and there is no open
425 * request, a new event is created, which calls this function to start 425 * request, a new event is created, which calls this function to start
426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
427 * be started again. 427 * be started again.
428 */ 428 */
429static void i2o_block_delayed_request_fn(void *delayed_request) 429static void i2o_block_delayed_request_fn(struct work_struct *work)
430{ 430{
431 struct i2o_block_delayed_request *dreq = delayed_request; 431 struct i2o_block_delayed_request *dreq =
432 container_of(work, struct i2o_block_delayed_request,
433 work.work);
432 struct request_queue *q = dreq->queue; 434 struct request_queue *q = dreq->queue;
433 unsigned long flags; 435 unsigned long flags;
434 436
@@ -538,8 +540,9 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
538 return 1; 540 return 1;
539}; 541};
540 542
541static void i2o_block_event(struct i2o_event *evt) 543static void i2o_block_event(struct work_struct *work)
542{ 544{
545 struct i2o_event *evt = container_of(work, struct i2o_event, work);
543 osm_debug("event received\n"); 546 osm_debug("event received\n");
544 kfree(evt); 547 kfree(evt);
545}; 548};
@@ -938,8 +941,8 @@ static void i2o_block_request_fn(struct request_queue *q)
938 continue; 941 continue;
939 942
940 dreq->queue = q; 943 dreq->queue = q;
941 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, 944 INIT_DELAYED_WORK(&dreq->work,
942 dreq); 945 i2o_block_delayed_request_fn);
943 946
944 if (!queue_delayed_work(i2o_block_driver.event_queue, 947 if (!queue_delayed_work(i2o_block_driver.event_queue,
945 &dreq->work, 948 &dreq->work,
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 4fdaa5bda412..d9fdc95b440d 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -96,7 +96,7 @@ struct i2o_block_request {
96 96
97/* I2O Block device delayed request */ 97/* I2O Block device delayed request */
98struct i2o_block_delayed_request { 98struct i2o_block_delayed_request {
99 struct work_struct work; 99 struct delayed_work work;
100 struct request_queue *queue; 100 struct request_queue *queue;
101}; 101};
102 102