aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/device.c
diff options
context:
space:
mode:
authorPeter Oberparleiter <peter.oberparleiter@de.ibm.com>2009-12-07 06:51:19 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2009-12-07 06:51:30 -0500
commit37de53bb52908726c18fc84515792a5b2f454532 (patch)
treec372eb1065e393a5fccb8e3db1609ae2fbe5a098 /drivers/s390/cio/device.c
parent390935acac21f3ea1a130bdca8eb9397cb293643 (diff)
[S390] cio: introduce ccw device todos
Introduce a central mechanism for performing delayed ccw device work to ensure that different types of work do not overwrite each other. Prioritization ensures that the most important work is always performed while less important tasks are either obsoleted or repeated later. Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/device.c')
-rw-r--r--drivers/s390/cio/device.c203
1 files changed, 100 insertions, 103 deletions
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 0dcfc0ee3d81..167446785d19 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -306,47 +306,6 @@ static void ccw_device_unregister(struct ccw_device *cdev)
306 } 306 }
307} 307}
308 308
309static void ccw_device_remove_orphan_cb(struct work_struct *work)
310{
311 struct ccw_device_private *priv;
312 struct ccw_device *cdev;
313
314 priv = container_of(work, struct ccw_device_private, kick_work);
315 cdev = priv->cdev;
316 ccw_device_unregister(cdev);
317 /* Release cdev reference for workqueue processing. */
318 put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324 unsigned long flags;
325
326 /*
327 * Forced offline in disconnected state means
328 * 'throw away device'.
329 */
330 if (ccw_device_is_orphan(cdev)) {
331 /*
332 * Deregister ccw device.
333 * Unfortunately, we cannot do this directly from the
334 * attribute method.
335 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 spin_lock_irqsave(cdev->ccwlock, flags);
340 cdev->private->state = DEV_STATE_NOT_OPER;
341 spin_unlock_irqrestore(cdev->ccwlock, flags);
342 PREPARE_WORK(&cdev->private->kick_work,
343 ccw_device_remove_orphan_cb);
344 queue_work(slow_path_wq, &cdev->private->kick_work);
345 } else
346 /* Deregister subchannel, which will kill the ccw device. */
347 ccw_device_schedule_sch_unregister(cdev);
348}
349
350/** 309/**
351 * ccw_device_set_offline() - disable a ccw device for I/O 310 * ccw_device_set_offline() - disable a ccw device for I/O
352 * @cdev: target ccw device 311 * @cdev: target ccw device
@@ -494,9 +453,11 @@ error:
494 453
495static int online_store_handle_offline(struct ccw_device *cdev) 454static int online_store_handle_offline(struct ccw_device *cdev)
496{ 455{
497 if (cdev->private->state == DEV_STATE_DISCONNECTED) 456 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
498 ccw_device_remove_disconnected(cdev); 457 spin_lock_irq(cdev->ccwlock);
499 else if (cdev->online && cdev->drv && cdev->drv->set_offline) 458 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
459 spin_unlock_irq(cdev->ccwlock);
460 } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500 return ccw_device_set_offline(cdev); 461 return ccw_device_set_offline(cdev);
501 return 0; 462 return 0;
502} 463}
@@ -690,17 +651,10 @@ static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
690 return dev ? to_ccwdev(dev) : NULL; 651 return dev ? to_ccwdev(dev) : NULL;
691} 652}
692 653
693void ccw_device_do_unbind_bind(struct work_struct *work) 654static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
694{ 655{
695 struct ccw_device_private *priv;
696 struct ccw_device *cdev;
697 struct subchannel *sch;
698 int ret; 656 int ret;
699 657
700 priv = container_of(work, struct ccw_device_private, kick_work);
701 cdev = priv->cdev;
702 sch = to_subchannel(cdev->dev.parent);
703
704 if (test_bit(1, &cdev->private->registered)) { 658 if (test_bit(1, &cdev->private->registered)) {
705 device_release_driver(&cdev->dev); 659 device_release_driver(&cdev->dev);
706 ret = device_attach(&cdev->dev); 660 ret = device_attach(&cdev->dev);
@@ -735,6 +689,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
735 return ERR_PTR(-ENOMEM); 689 return ERR_PTR(-ENOMEM);
736} 690}
737 691
692static void ccw_device_todo(struct work_struct *work);
693
738static int io_subchannel_initialize_dev(struct subchannel *sch, 694static int io_subchannel_initialize_dev(struct subchannel *sch,
739 struct ccw_device *cdev) 695 struct ccw_device *cdev)
740{ 696{
@@ -742,7 +698,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
742 atomic_set(&cdev->private->onoff, 0); 698 atomic_set(&cdev->private->onoff, 0);
743 cdev->dev.parent = &sch->dev; 699 cdev->dev.parent = &sch->dev;
744 cdev->dev.release = ccw_device_release; 700 cdev->dev.release = ccw_device_release;
745 INIT_WORK(&cdev->private->kick_work, NULL); 701 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
746 cdev->dev.groups = ccwdev_attr_groups; 702 cdev->dev.groups = ccwdev_attr_groups;
747 /* Do first half of device_register. */ 703 /* Do first half of device_register. */
748 device_initialize(&cdev->dev); 704 device_initialize(&cdev->dev);
@@ -797,17 +753,12 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
797/* 753/*
798 * Register recognized device. 754 * Register recognized device.
799 */ 755 */
800static void 756static void io_subchannel_register(struct ccw_device *cdev)
801io_subchannel_register(struct work_struct *work)
802{ 757{
803 struct ccw_device_private *priv;
804 struct ccw_device *cdev;
805 struct subchannel *sch; 758 struct subchannel *sch;
806 int ret; 759 int ret;
807 unsigned long flags; 760 unsigned long flags;
808 761
809 priv = container_of(work, struct ccw_device_private, kick_work);
810 cdev = priv->cdev;
811 sch = to_subchannel(cdev->dev.parent); 762 sch = to_subchannel(cdev->dev.parent);
812 /* 763 /*
813 * Check if subchannel is still registered. It may have become 764 * Check if subchannel is still registered. It may have become
@@ -859,41 +810,23 @@ out:
859 cdev->private->flags.recog_done = 1; 810 cdev->private->flags.recog_done = 1;
860 wake_up(&cdev->private->wait_q); 811 wake_up(&cdev->private->wait_q);
861out_err: 812out_err:
862 /* Release reference for workqueue processing. */
863 put_device(&cdev->dev);
864 if (atomic_dec_and_test(&ccw_device_init_count)) 813 if (atomic_dec_and_test(&ccw_device_init_count))
865 wake_up(&ccw_device_init_wq); 814 wake_up(&ccw_device_init_wq);
866} 815}
867 816
868static void ccw_device_call_sch_unregister(struct work_struct *work) 817static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
869{ 818{
870 struct ccw_device_private *priv;
871 struct ccw_device *cdev;
872 struct subchannel *sch; 819 struct subchannel *sch;
873 820
874 priv = container_of(work, struct ccw_device_private, kick_work);
875 cdev = priv->cdev;
876 /* Get subchannel reference for local processing. */ 821 /* Get subchannel reference for local processing. */
877 if (!get_device(cdev->dev.parent)) 822 if (!get_device(cdev->dev.parent))
878 return; 823 return;
879 sch = to_subchannel(cdev->dev.parent); 824 sch = to_subchannel(cdev->dev.parent);
880 css_sch_device_unregister(sch); 825 css_sch_device_unregister(sch);
881 /* Release cdev reference for workqueue processing.*/
882 put_device(&cdev->dev);
883 /* Release subchannel reference for local processing. */ 826 /* Release subchannel reference for local processing. */
884 put_device(&sch->dev); 827 put_device(&sch->dev);
885} 828}
886 829
887void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
888{
889 /* Get cdev reference for workqueue processing. */
890 if (!get_device(&cdev->dev))
891 return;
892 PREPARE_WORK(&cdev->private->kick_work,
893 ccw_device_call_sch_unregister);
894 queue_work(slow_path_wq, &cdev->private->kick_work);
895}
896
897/* 830/*
898 * subchannel recognition done. Called from the state machine. 831 * subchannel recognition done. Called from the state machine.
899 */ 832 */
@@ -909,7 +842,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
909 /* Device did not respond in time. */ 842 /* Device did not respond in time. */
910 case DEV_STATE_NOT_OPER: 843 case DEV_STATE_NOT_OPER:
911 cdev->private->flags.recog_done = 1; 844 cdev->private->flags.recog_done = 1;
912 ccw_device_schedule_sch_unregister(cdev); 845 /* Remove device found not operational. */
846 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
913 if (atomic_dec_and_test(&ccw_device_init_count)) 847 if (atomic_dec_and_test(&ccw_device_init_count))
914 wake_up(&ccw_device_init_wq); 848 wake_up(&ccw_device_init_wq);
915 break; 849 break;
@@ -918,11 +852,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
918 * We can't register the device in interrupt context so 852 * We can't register the device in interrupt context so
919 * we schedule a work item. 853 * we schedule a work item.
920 */ 854 */
921 if (!get_device(&cdev->dev)) 855 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
922 break;
923 PREPARE_WORK(&cdev->private->kick_work,
924 io_subchannel_register);
925 queue_work(slow_path_wq, &cdev->private->kick_work);
926 break; 856 break;
927 } 857 }
928} 858}
@@ -1333,20 +1263,16 @@ static void ccw_device_schedule_recovery(void)
1333static int purge_fn(struct device *dev, void *data) 1263static int purge_fn(struct device *dev, void *data)
1334{ 1264{
1335 struct ccw_device *cdev = to_ccwdev(dev); 1265 struct ccw_device *cdev = to_ccwdev(dev);
1336 struct ccw_device_private *priv = cdev->private; 1266 struct ccw_dev_id *id = &cdev->private->dev_id;
1337 int unreg;
1338 1267
1339 spin_lock_irq(cdev->ccwlock); 1268 spin_lock_irq(cdev->ccwlock);
1340 unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) && 1269 if (is_blacklisted(id->ssid, id->devno) &&
1341 (priv->state == DEV_STATE_OFFLINE); 1270 (cdev->private->state == DEV_STATE_OFFLINE)) {
1271 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1272 id->devno);
1273 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1274 }
1342 spin_unlock_irq(cdev->ccwlock); 1275 spin_unlock_irq(cdev->ccwlock);
1343 if (!unreg)
1344 goto out;
1345 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1346 priv->dev_id.devno);
1347 ccw_device_schedule_sch_unregister(cdev);
1348
1349out:
1350 /* Abort loop in case of pending signal. */ 1276 /* Abort loop in case of pending signal. */
1351 if (signal_pending(current)) 1277 if (signal_pending(current))
1352 return -EINTR; 1278 return -EINTR;
@@ -1456,12 +1382,14 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1456 goto out_unlock; 1382 goto out_unlock;
1457 if (work_pending(&sch->todo_work)) 1383 if (work_pending(&sch->todo_work))
1458 goto out_unlock; 1384 goto out_unlock;
1385 cdev = sch_get_cdev(sch);
1386 if (cdev && work_pending(&cdev->private->todo_work))
1387 goto out_unlock;
1459 action = sch_get_action(sch); 1388 action = sch_get_action(sch);
1460 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1389 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1461 sch->schid.ssid, sch->schid.sch_no, process, 1390 sch->schid.ssid, sch->schid.sch_no, process,
1462 action); 1391 action);
1463 /* Perform immediate actions while holding the lock. */ 1392 /* Perform immediate actions while holding the lock. */
1464 cdev = sch_get_cdev(sch);
1465 switch (action) { 1393 switch (action) {
1466 case IO_SCH_REPROBE: 1394 case IO_SCH_REPROBE:
1467 /* Trigger device recognition. */ 1395 /* Trigger device recognition. */
@@ -1753,7 +1681,7 @@ static int ccw_device_pm_prepare(struct device *dev)
1753{ 1681{
1754 struct ccw_device *cdev = to_ccwdev(dev); 1682 struct ccw_device *cdev = to_ccwdev(dev);
1755 1683
1756 if (work_pending(&cdev->private->kick_work)) 1684 if (work_pending(&cdev->private->todo_work))
1757 return -EAGAIN; 1685 return -EAGAIN;
1758 /* Fail while device is being set online/offline. */ 1686 /* Fail while device is being set online/offline. */
1759 if (atomic_read(&cdev->private->onoff)) 1687 if (atomic_read(&cdev->private->onoff))
@@ -1874,7 +1802,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
1874 cdev->private->state = DEV_STATE_BOXED; 1802 cdev->private->state = DEV_STATE_BOXED;
1875 if (ccw_device_notify(cdev, CIO_BOXED)) 1803 if (ccw_device_notify(cdev, CIO_BOXED))
1876 return 0; 1804 return 0;
1877 ccw_device_schedule_sch_unregister(cdev); 1805 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1878 return -ENODEV; 1806 return -ENODEV;
1879} 1807}
1880 1808
@@ -1883,7 +1811,7 @@ static int resume_handle_disc(struct ccw_device *cdev)
1883 cdev->private->state = DEV_STATE_DISCONNECTED; 1811 cdev->private->state = DEV_STATE_DISCONNECTED;
1884 if (ccw_device_notify(cdev, CIO_GONE)) 1812 if (ccw_device_notify(cdev, CIO_GONE))
1885 return 0; 1813 return 0;
1886 ccw_device_schedule_sch_unregister(cdev); 1814 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1887 return -ENODEV; 1815 return -ENODEV;
1888} 1816}
1889 1817
@@ -1928,9 +1856,7 @@ static int ccw_device_pm_restore(struct device *dev)
1928 /* check if the device type has changed */ 1856 /* check if the device type has changed */
1929 if (!ccw_device_test_sense_data(cdev)) { 1857 if (!ccw_device_test_sense_data(cdev)) {
1930 ccw_device_update_sense_data(cdev); 1858 ccw_device_update_sense_data(cdev);
1931 PREPARE_WORK(&cdev->private->kick_work, 1859 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1932 ccw_device_do_unbind_bind);
1933 queue_work(ccw_device_work, &cdev->private->kick_work);
1934 ret = -ENODEV; 1860 ret = -ENODEV;
1935 goto out_unlock; 1861 goto out_unlock;
1936 } 1862 }
@@ -1974,7 +1900,7 @@ out_disc_unlock:
1974 goto out_restore; 1900 goto out_restore;
1975 1901
1976out_unreg_unlock: 1902out_unreg_unlock:
1977 ccw_device_schedule_sch_unregister(cdev); 1903 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
1978 ret = -ENODEV; 1904 ret = -ENODEV;
1979out_unlock: 1905out_unlock:
1980 spin_unlock_irq(sch->lock); 1906 spin_unlock_irq(sch->lock);
@@ -2039,6 +1965,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
2039 return sch->schid; 1965 return sch->schid;
2040} 1966}
2041 1967
1968static void ccw_device_todo(struct work_struct *work)
1969{
1970 struct ccw_device_private *priv;
1971 struct ccw_device *cdev;
1972 struct subchannel *sch;
1973 enum cdev_todo todo;
1974
1975 priv = container_of(work, struct ccw_device_private, todo_work);
1976 cdev = priv->cdev;
1977 sch = to_subchannel(cdev->dev.parent);
1978 /* Find out todo. */
1979 spin_lock_irq(cdev->ccwlock);
1980 todo = priv->todo;
1981 priv->todo = CDEV_TODO_NOTHING;
1982 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1983 priv->dev_id.ssid, priv->dev_id.devno, todo);
1984 spin_unlock_irq(cdev->ccwlock);
1985 /* Perform todo. */
1986 switch (todo) {
1987 case CDEV_TODO_ENABLE_CMF:
1988 cmf_reenable(cdev);
1989 break;
1990 case CDEV_TODO_REBIND:
1991 ccw_device_do_unbind_bind(cdev);
1992 break;
1993 case CDEV_TODO_REGISTER:
1994 io_subchannel_register(cdev);
1995 break;
1996 case CDEV_TODO_UNREG_EVAL:
1997 if (!sch_is_pseudo_sch(sch))
1998 css_schedule_eval(sch->schid);
1999 /* fall-through */
2000 case CDEV_TODO_UNREG:
2001 if (sch_is_pseudo_sch(sch))
2002 ccw_device_unregister(cdev);
2003 else
2004 ccw_device_call_sch_unregister(cdev);
2005 break;
2006 default:
2007 break;
2008 }
2009 /* Release workqueue ref. */
2010 put_device(&cdev->dev);
2011}
2012
2013/**
2014 * ccw_device_sched_todo - schedule ccw device operation
2015 * @cdev: ccw device
2016 * @todo: todo
2017 *
2018 * Schedule the operation identified by @todo to be performed on the slow path
2019 * workqueue. Do nothing if another operation with higher priority is already
2020 * scheduled. Needs to be called with ccwdev lock held.
2021 */
2022void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2023{
2024 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2025 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2026 todo);
2027 if (cdev->private->todo >= todo)
2028 return;
2029 cdev->private->todo = todo;
2030 /* Get workqueue ref. */
2031 if (!get_device(&cdev->dev))
2032 return;
2033 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
2034 /* Already queued, release workqueue ref. */
2035 put_device(&cdev->dev);
2036 }
2037}
2038
2042MODULE_LICENSE("GPL"); 2039MODULE_LICENSE("GPL");
2043EXPORT_SYMBOL(ccw_device_set_online); 2040EXPORT_SYMBOL(ccw_device_set_online);
2044EXPORT_SYMBOL(ccw_device_set_offline); 2041EXPORT_SYMBOL(ccw_device_set_offline);