diff options
author | Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 2009-12-07 06:51:19 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2009-12-07 06:51:30 -0500 |
commit | 37de53bb52908726c18fc84515792a5b2f454532 (patch) | |
tree | c372eb1065e393a5fccb8e3db1609ae2fbe5a098 /drivers | |
parent | 390935acac21f3ea1a130bdca8eb9397cb293643 (diff) |
[S390] cio: introduce ccw device todos
Introduce a central mechanism for performing delayed ccw device work
to ensure that different types of work do not overwrite each other.
Prioritization ensures that the most important work is always
performed while less important tasks are either obsoleted or repeated
later.
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/s390/cio/device.c | 203 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 3 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 28 | ||||
-rw-r--r-- | drivers/s390/cio/io_sch.h | 12 |
4 files changed, 119 insertions, 127 deletions
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 0dcfc0ee3d81..167446785d19 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -306,47 +306,6 @@ static void ccw_device_unregister(struct ccw_device *cdev) | |||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
309 | static void ccw_device_remove_orphan_cb(struct work_struct *work) | ||
310 | { | ||
311 | struct ccw_device_private *priv; | ||
312 | struct ccw_device *cdev; | ||
313 | |||
314 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
315 | cdev = priv->cdev; | ||
316 | ccw_device_unregister(cdev); | ||
317 | /* Release cdev reference for workqueue processing. */ | ||
318 | put_device(&cdev->dev); | ||
319 | } | ||
320 | |||
321 | static void | ||
322 | ccw_device_remove_disconnected(struct ccw_device *cdev) | ||
323 | { | ||
324 | unsigned long flags; | ||
325 | |||
326 | /* | ||
327 | * Forced offline in disconnected state means | ||
328 | * 'throw away device'. | ||
329 | */ | ||
330 | if (ccw_device_is_orphan(cdev)) { | ||
331 | /* | ||
332 | * Deregister ccw device. | ||
333 | * Unfortunately, we cannot do this directly from the | ||
334 | * attribute method. | ||
335 | */ | ||
336 | /* Get cdev reference for workqueue processing. */ | ||
337 | if (!get_device(&cdev->dev)) | ||
338 | return; | ||
339 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
340 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
341 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
342 | PREPARE_WORK(&cdev->private->kick_work, | ||
343 | ccw_device_remove_orphan_cb); | ||
344 | queue_work(slow_path_wq, &cdev->private->kick_work); | ||
345 | } else | ||
346 | /* Deregister subchannel, which will kill the ccw device. */ | ||
347 | ccw_device_schedule_sch_unregister(cdev); | ||
348 | } | ||
349 | |||
350 | /** | 309 | /** |
351 | * ccw_device_set_offline() - disable a ccw device for I/O | 310 | * ccw_device_set_offline() - disable a ccw device for I/O |
352 | * @cdev: target ccw device | 311 | * @cdev: target ccw device |
@@ -494,9 +453,11 @@ error: | |||
494 | 453 | ||
495 | static int online_store_handle_offline(struct ccw_device *cdev) | 454 | static int online_store_handle_offline(struct ccw_device *cdev) |
496 | { | 455 | { |
497 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | 456 | if (cdev->private->state == DEV_STATE_DISCONNECTED) { |
498 | ccw_device_remove_disconnected(cdev); | 457 | spin_lock_irq(cdev->ccwlock); |
499 | else if (cdev->online && cdev->drv && cdev->drv->set_offline) | 458 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); |
459 | spin_unlock_irq(cdev->ccwlock); | ||
460 | } else if (cdev->online && cdev->drv && cdev->drv->set_offline) | ||
500 | return ccw_device_set_offline(cdev); | 461 | return ccw_device_set_offline(cdev); |
501 | return 0; | 462 | return 0; |
502 | } | 463 | } |
@@ -690,17 +651,10 @@ static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) | |||
690 | return dev ? to_ccwdev(dev) : NULL; | 651 | return dev ? to_ccwdev(dev) : NULL; |
691 | } | 652 | } |
692 | 653 | ||
693 | void ccw_device_do_unbind_bind(struct work_struct *work) | 654 | static void ccw_device_do_unbind_bind(struct ccw_device *cdev) |
694 | { | 655 | { |
695 | struct ccw_device_private *priv; | ||
696 | struct ccw_device *cdev; | ||
697 | struct subchannel *sch; | ||
698 | int ret; | 656 | int ret; |
699 | 657 | ||
700 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
701 | cdev = priv->cdev; | ||
702 | sch = to_subchannel(cdev->dev.parent); | ||
703 | |||
704 | if (test_bit(1, &cdev->private->registered)) { | 658 | if (test_bit(1, &cdev->private->registered)) { |
705 | device_release_driver(&cdev->dev); | 659 | device_release_driver(&cdev->dev); |
706 | ret = device_attach(&cdev->dev); | 660 | ret = device_attach(&cdev->dev); |
@@ -735,6 +689,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) | |||
735 | return ERR_PTR(-ENOMEM); | 689 | return ERR_PTR(-ENOMEM); |
736 | } | 690 | } |
737 | 691 | ||
692 | static void ccw_device_todo(struct work_struct *work); | ||
693 | |||
738 | static int io_subchannel_initialize_dev(struct subchannel *sch, | 694 | static int io_subchannel_initialize_dev(struct subchannel *sch, |
739 | struct ccw_device *cdev) | 695 | struct ccw_device *cdev) |
740 | { | 696 | { |
@@ -742,7 +698,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
742 | atomic_set(&cdev->private->onoff, 0); | 698 | atomic_set(&cdev->private->onoff, 0); |
743 | cdev->dev.parent = &sch->dev; | 699 | cdev->dev.parent = &sch->dev; |
744 | cdev->dev.release = ccw_device_release; | 700 | cdev->dev.release = ccw_device_release; |
745 | INIT_WORK(&cdev->private->kick_work, NULL); | 701 | INIT_WORK(&cdev->private->todo_work, ccw_device_todo); |
746 | cdev->dev.groups = ccwdev_attr_groups; | 702 | cdev->dev.groups = ccwdev_attr_groups; |
747 | /* Do first half of device_register. */ | 703 | /* Do first half of device_register. */ |
748 | device_initialize(&cdev->dev); | 704 | device_initialize(&cdev->dev); |
@@ -797,17 +753,12 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) | |||
797 | /* | 753 | /* |
798 | * Register recognized device. | 754 | * Register recognized device. |
799 | */ | 755 | */ |
800 | static void | 756 | static void io_subchannel_register(struct ccw_device *cdev) |
801 | io_subchannel_register(struct work_struct *work) | ||
802 | { | 757 | { |
803 | struct ccw_device_private *priv; | ||
804 | struct ccw_device *cdev; | ||
805 | struct subchannel *sch; | 758 | struct subchannel *sch; |
806 | int ret; | 759 | int ret; |
807 | unsigned long flags; | 760 | unsigned long flags; |
808 | 761 | ||
809 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
810 | cdev = priv->cdev; | ||
811 | sch = to_subchannel(cdev->dev.parent); | 762 | sch = to_subchannel(cdev->dev.parent); |
812 | /* | 763 | /* |
813 | * Check if subchannel is still registered. It may have become | 764 | * Check if subchannel is still registered. It may have become |
@@ -859,41 +810,23 @@ out: | |||
859 | cdev->private->flags.recog_done = 1; | 810 | cdev->private->flags.recog_done = 1; |
860 | wake_up(&cdev->private->wait_q); | 811 | wake_up(&cdev->private->wait_q); |
861 | out_err: | 812 | out_err: |
862 | /* Release reference for workqueue processing. */ | ||
863 | put_device(&cdev->dev); | ||
864 | if (atomic_dec_and_test(&ccw_device_init_count)) | 813 | if (atomic_dec_and_test(&ccw_device_init_count)) |
865 | wake_up(&ccw_device_init_wq); | 814 | wake_up(&ccw_device_init_wq); |
866 | } | 815 | } |
867 | 816 | ||
868 | static void ccw_device_call_sch_unregister(struct work_struct *work) | 817 | static void ccw_device_call_sch_unregister(struct ccw_device *cdev) |
869 | { | 818 | { |
870 | struct ccw_device_private *priv; | ||
871 | struct ccw_device *cdev; | ||
872 | struct subchannel *sch; | 819 | struct subchannel *sch; |
873 | 820 | ||
874 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
875 | cdev = priv->cdev; | ||
876 | /* Get subchannel reference for local processing. */ | 821 | /* Get subchannel reference for local processing. */ |
877 | if (!get_device(cdev->dev.parent)) | 822 | if (!get_device(cdev->dev.parent)) |
878 | return; | 823 | return; |
879 | sch = to_subchannel(cdev->dev.parent); | 824 | sch = to_subchannel(cdev->dev.parent); |
880 | css_sch_device_unregister(sch); | 825 | css_sch_device_unregister(sch); |
881 | /* Release cdev reference for workqueue processing.*/ | ||
882 | put_device(&cdev->dev); | ||
883 | /* Release subchannel reference for local processing. */ | 826 | /* Release subchannel reference for local processing. */ |
884 | put_device(&sch->dev); | 827 | put_device(&sch->dev); |
885 | } | 828 | } |
886 | 829 | ||
887 | void ccw_device_schedule_sch_unregister(struct ccw_device *cdev) | ||
888 | { | ||
889 | /* Get cdev reference for workqueue processing. */ | ||
890 | if (!get_device(&cdev->dev)) | ||
891 | return; | ||
892 | PREPARE_WORK(&cdev->private->kick_work, | ||
893 | ccw_device_call_sch_unregister); | ||
894 | queue_work(slow_path_wq, &cdev->private->kick_work); | ||
895 | } | ||
896 | |||
897 | /* | 830 | /* |
898 | * subchannel recognition done. Called from the state machine. | 831 | * subchannel recognition done. Called from the state machine. |
899 | */ | 832 | */ |
@@ -909,7 +842,8 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
909 | /* Device did not respond in time. */ | 842 | /* Device did not respond in time. */ |
910 | case DEV_STATE_NOT_OPER: | 843 | case DEV_STATE_NOT_OPER: |
911 | cdev->private->flags.recog_done = 1; | 844 | cdev->private->flags.recog_done = 1; |
912 | ccw_device_schedule_sch_unregister(cdev); | 845 | /* Remove device found not operational. */ |
846 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | ||
913 | if (atomic_dec_and_test(&ccw_device_init_count)) | 847 | if (atomic_dec_and_test(&ccw_device_init_count)) |
914 | wake_up(&ccw_device_init_wq); | 848 | wake_up(&ccw_device_init_wq); |
915 | break; | 849 | break; |
@@ -918,11 +852,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
918 | * We can't register the device in interrupt context so | 852 | * We can't register the device in interrupt context so |
919 | * we schedule a work item. | 853 | * we schedule a work item. |
920 | */ | 854 | */ |
921 | if (!get_device(&cdev->dev)) | 855 | ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); |
922 | break; | ||
923 | PREPARE_WORK(&cdev->private->kick_work, | ||
924 | io_subchannel_register); | ||
925 | queue_work(slow_path_wq, &cdev->private->kick_work); | ||
926 | break; | 856 | break; |
927 | } | 857 | } |
928 | } | 858 | } |
@@ -1333,20 +1263,16 @@ static void ccw_device_schedule_recovery(void) | |||
1333 | static int purge_fn(struct device *dev, void *data) | 1263 | static int purge_fn(struct device *dev, void *data) |
1334 | { | 1264 | { |
1335 | struct ccw_device *cdev = to_ccwdev(dev); | 1265 | struct ccw_device *cdev = to_ccwdev(dev); |
1336 | struct ccw_device_private *priv = cdev->private; | 1266 | struct ccw_dev_id *id = &cdev->private->dev_id; |
1337 | int unreg; | ||
1338 | 1267 | ||
1339 | spin_lock_irq(cdev->ccwlock); | 1268 | spin_lock_irq(cdev->ccwlock); |
1340 | unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) && | 1269 | if (is_blacklisted(id->ssid, id->devno) && |
1341 | (priv->state == DEV_STATE_OFFLINE); | 1270 | (cdev->private->state == DEV_STATE_OFFLINE)) { |
1271 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, | ||
1272 | id->devno); | ||
1273 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | ||
1274 | } | ||
1342 | spin_unlock_irq(cdev->ccwlock); | 1275 | spin_unlock_irq(cdev->ccwlock); |
1343 | if (!unreg) | ||
1344 | goto out; | ||
1345 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid, | ||
1346 | priv->dev_id.devno); | ||
1347 | ccw_device_schedule_sch_unregister(cdev); | ||
1348 | |||
1349 | out: | ||
1350 | /* Abort loop in case of pending signal. */ | 1276 | /* Abort loop in case of pending signal. */ |
1351 | if (signal_pending(current)) | 1277 | if (signal_pending(current)) |
1352 | return -EINTR; | 1278 | return -EINTR; |
@@ -1456,12 +1382,14 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
1456 | goto out_unlock; | 1382 | goto out_unlock; |
1457 | if (work_pending(&sch->todo_work)) | 1383 | if (work_pending(&sch->todo_work)) |
1458 | goto out_unlock; | 1384 | goto out_unlock; |
1385 | cdev = sch_get_cdev(sch); | ||
1386 | if (cdev && work_pending(&cdev->private->todo_work)) | ||
1387 | goto out_unlock; | ||
1459 | action = sch_get_action(sch); | 1388 | action = sch_get_action(sch); |
1460 | CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", | 1389 | CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", |
1461 | sch->schid.ssid, sch->schid.sch_no, process, | 1390 | sch->schid.ssid, sch->schid.sch_no, process, |
1462 | action); | 1391 | action); |
1463 | /* Perform immediate actions while holding the lock. */ | 1392 | /* Perform immediate actions while holding the lock. */ |
1464 | cdev = sch_get_cdev(sch); | ||
1465 | switch (action) { | 1393 | switch (action) { |
1466 | case IO_SCH_REPROBE: | 1394 | case IO_SCH_REPROBE: |
1467 | /* Trigger device recognition. */ | 1395 | /* Trigger device recognition. */ |
@@ -1753,7 +1681,7 @@ static int ccw_device_pm_prepare(struct device *dev) | |||
1753 | { | 1681 | { |
1754 | struct ccw_device *cdev = to_ccwdev(dev); | 1682 | struct ccw_device *cdev = to_ccwdev(dev); |
1755 | 1683 | ||
1756 | if (work_pending(&cdev->private->kick_work)) | 1684 | if (work_pending(&cdev->private->todo_work)) |
1757 | return -EAGAIN; | 1685 | return -EAGAIN; |
1758 | /* Fail while device is being set online/offline. */ | 1686 | /* Fail while device is being set online/offline. */ |
1759 | if (atomic_read(&cdev->private->onoff)) | 1687 | if (atomic_read(&cdev->private->onoff)) |
@@ -1874,7 +1802,7 @@ static int resume_handle_boxed(struct ccw_device *cdev) | |||
1874 | cdev->private->state = DEV_STATE_BOXED; | 1802 | cdev->private->state = DEV_STATE_BOXED; |
1875 | if (ccw_device_notify(cdev, CIO_BOXED)) | 1803 | if (ccw_device_notify(cdev, CIO_BOXED)) |
1876 | return 0; | 1804 | return 0; |
1877 | ccw_device_schedule_sch_unregister(cdev); | 1805 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
1878 | return -ENODEV; | 1806 | return -ENODEV; |
1879 | } | 1807 | } |
1880 | 1808 | ||
@@ -1883,7 +1811,7 @@ static int resume_handle_disc(struct ccw_device *cdev) | |||
1883 | cdev->private->state = DEV_STATE_DISCONNECTED; | 1811 | cdev->private->state = DEV_STATE_DISCONNECTED; |
1884 | if (ccw_device_notify(cdev, CIO_GONE)) | 1812 | if (ccw_device_notify(cdev, CIO_GONE)) |
1885 | return 0; | 1813 | return 0; |
1886 | ccw_device_schedule_sch_unregister(cdev); | 1814 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
1887 | return -ENODEV; | 1815 | return -ENODEV; |
1888 | } | 1816 | } |
1889 | 1817 | ||
@@ -1928,9 +1856,7 @@ static int ccw_device_pm_restore(struct device *dev) | |||
1928 | /* check if the device type has changed */ | 1856 | /* check if the device type has changed */ |
1929 | if (!ccw_device_test_sense_data(cdev)) { | 1857 | if (!ccw_device_test_sense_data(cdev)) { |
1930 | ccw_device_update_sense_data(cdev); | 1858 | ccw_device_update_sense_data(cdev); |
1931 | PREPARE_WORK(&cdev->private->kick_work, | 1859 | ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); |
1932 | ccw_device_do_unbind_bind); | ||
1933 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
1934 | ret = -ENODEV; | 1860 | ret = -ENODEV; |
1935 | goto out_unlock; | 1861 | goto out_unlock; |
1936 | } | 1862 | } |
@@ -1974,7 +1900,7 @@ out_disc_unlock: | |||
1974 | goto out_restore; | 1900 | goto out_restore; |
1975 | 1901 | ||
1976 | out_unreg_unlock: | 1902 | out_unreg_unlock: |
1977 | ccw_device_schedule_sch_unregister(cdev); | 1903 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); |
1978 | ret = -ENODEV; | 1904 | ret = -ENODEV; |
1979 | out_unlock: | 1905 | out_unlock: |
1980 | spin_unlock_irq(sch->lock); | 1906 | spin_unlock_irq(sch->lock); |
@@ -2039,6 +1965,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev) | |||
2039 | return sch->schid; | 1965 | return sch->schid; |
2040 | } | 1966 | } |
2041 | 1967 | ||
1968 | static void ccw_device_todo(struct work_struct *work) | ||
1969 | { | ||
1970 | struct ccw_device_private *priv; | ||
1971 | struct ccw_device *cdev; | ||
1972 | struct subchannel *sch; | ||
1973 | enum cdev_todo todo; | ||
1974 | |||
1975 | priv = container_of(work, struct ccw_device_private, todo_work); | ||
1976 | cdev = priv->cdev; | ||
1977 | sch = to_subchannel(cdev->dev.parent); | ||
1978 | /* Find out todo. */ | ||
1979 | spin_lock_irq(cdev->ccwlock); | ||
1980 | todo = priv->todo; | ||
1981 | priv->todo = CDEV_TODO_NOTHING; | ||
1982 | CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", | ||
1983 | priv->dev_id.ssid, priv->dev_id.devno, todo); | ||
1984 | spin_unlock_irq(cdev->ccwlock); | ||
1985 | /* Perform todo. */ | ||
1986 | switch (todo) { | ||
1987 | case CDEV_TODO_ENABLE_CMF: | ||
1988 | cmf_reenable(cdev); | ||
1989 | break; | ||
1990 | case CDEV_TODO_REBIND: | ||
1991 | ccw_device_do_unbind_bind(cdev); | ||
1992 | break; | ||
1993 | case CDEV_TODO_REGISTER: | ||
1994 | io_subchannel_register(cdev); | ||
1995 | break; | ||
1996 | case CDEV_TODO_UNREG_EVAL: | ||
1997 | if (!sch_is_pseudo_sch(sch)) | ||
1998 | css_schedule_eval(sch->schid); | ||
1999 | /* fall-through */ | ||
2000 | case CDEV_TODO_UNREG: | ||
2001 | if (sch_is_pseudo_sch(sch)) | ||
2002 | ccw_device_unregister(cdev); | ||
2003 | else | ||
2004 | ccw_device_call_sch_unregister(cdev); | ||
2005 | break; | ||
2006 | default: | ||
2007 | break; | ||
2008 | } | ||
2009 | /* Release workqueue ref. */ | ||
2010 | put_device(&cdev->dev); | ||
2011 | } | ||
2012 | |||
2013 | /** | ||
2014 | * ccw_device_sched_todo - schedule ccw device operation | ||
2015 | * @cdev: ccw device | ||
2016 | * @todo: todo | ||
2017 | * | ||
2018 | * Schedule the operation identified by @todo to be performed on the slow path | ||
2019 | * workqueue. Do nothing if another operation with higher priority is already | ||
2020 | * scheduled. Needs to be called with ccwdev lock held. | ||
2021 | */ | ||
2022 | void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) | ||
2023 | { | ||
2024 | CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", | ||
2025 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, | ||
2026 | todo); | ||
2027 | if (cdev->private->todo >= todo) | ||
2028 | return; | ||
2029 | cdev->private->todo = todo; | ||
2030 | /* Get workqueue ref. */ | ||
2031 | if (!get_device(&cdev->dev)) | ||
2032 | return; | ||
2033 | if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { | ||
2034 | /* Already queued, release workqueue ref. */ | ||
2035 | put_device(&cdev->dev); | ||
2036 | } | ||
2037 | } | ||
2038 | |||
2042 | MODULE_LICENSE("GPL"); | 2039 | MODULE_LICENSE("GPL"); |
2043 | EXPORT_SYMBOL(ccw_device_set_online); | 2040 | EXPORT_SYMBOL(ccw_device_set_online); |
2044 | EXPORT_SYMBOL(ccw_device_set_offline); | 2041 | EXPORT_SYMBOL(ccw_device_set_offline); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 246c6482842c..adaa27efc59e 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -81,8 +81,6 @@ void io_subchannel_init_config(struct subchannel *sch); | |||
81 | 81 | ||
82 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 82 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
83 | 83 | ||
84 | void ccw_device_do_unbind_bind(struct work_struct *); | ||
85 | void ccw_device_move_to_orphanage(struct work_struct *); | ||
86 | int ccw_device_is_orphan(struct ccw_device *); | 84 | int ccw_device_is_orphan(struct ccw_device *); |
87 | 85 | ||
88 | int ccw_device_recognition(struct ccw_device *); | 86 | int ccw_device_recognition(struct ccw_device *); |
@@ -92,6 +90,7 @@ void ccw_device_update_sense_data(struct ccw_device *); | |||
92 | int ccw_device_test_sense_data(struct ccw_device *); | 90 | int ccw_device_test_sense_data(struct ccw_device *); |
93 | void ccw_device_schedule_sch_unregister(struct ccw_device *); | 91 | void ccw_device_schedule_sch_unregister(struct ccw_device *); |
94 | int ccw_purge_blacklisted(void); | 92 | int ccw_purge_blacklisted(void); |
93 | void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo); | ||
95 | 94 | ||
96 | /* Function prototypes for device status and basic sense stuff. */ | 95 | /* Function prototypes for device status and basic sense stuff. */ |
97 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); | 96 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index d1e05f44fb6f..b163743bf586 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -289,9 +289,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
289 | wake_up(&cdev->private->wait_q); | 289 | wake_up(&cdev->private->wait_q); |
290 | } else { | 290 | } else { |
291 | ccw_device_update_sense_data(cdev); | 291 | ccw_device_update_sense_data(cdev); |
292 | PREPARE_WORK(&cdev->private->kick_work, | 292 | ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); |
293 | ccw_device_do_unbind_bind); | ||
294 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
295 | } | 293 | } |
296 | return; | 294 | return; |
297 | case DEV_STATE_BOXED: | 295 | case DEV_STATE_BOXED: |
@@ -343,28 +341,16 @@ int ccw_device_notify(struct ccw_device *cdev, int event) | |||
343 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | 341 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; |
344 | } | 342 | } |
345 | 343 | ||
346 | static void cmf_reenable_delayed(struct work_struct *work) | ||
347 | { | ||
348 | struct ccw_device_private *priv; | ||
349 | struct ccw_device *cdev; | ||
350 | |||
351 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
352 | cdev = priv->cdev; | ||
353 | cmf_reenable(cdev); | ||
354 | } | ||
355 | |||
356 | static void ccw_device_oper_notify(struct ccw_device *cdev) | 344 | static void ccw_device_oper_notify(struct ccw_device *cdev) |
357 | { | 345 | { |
358 | if (ccw_device_notify(cdev, CIO_OPER)) { | 346 | if (ccw_device_notify(cdev, CIO_OPER)) { |
359 | /* Reenable channel measurements, if needed. */ | 347 | /* Reenable channel measurements, if needed. */ |
360 | PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); | 348 | ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); |
361 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
362 | return; | 349 | return; |
363 | } | 350 | } |
364 | /* Driver doesn't want device back. */ | 351 | /* Driver doesn't want device back. */ |
365 | ccw_device_set_notoper(cdev); | 352 | ccw_device_set_notoper(cdev); |
366 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); | 353 | ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); |
367 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
368 | } | 354 | } |
369 | 355 | ||
370 | /* | 356 | /* |
@@ -392,14 +378,14 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
392 | CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", | 378 | CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", |
393 | cdev->private->dev_id.devno, sch->schid.sch_no); | 379 | cdev->private->dev_id.devno, sch->schid.sch_no); |
394 | if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) | 380 | if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) |
395 | ccw_device_schedule_sch_unregister(cdev); | 381 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
396 | cdev->private->flags.donotify = 0; | 382 | cdev->private->flags.donotify = 0; |
397 | break; | 383 | break; |
398 | case DEV_STATE_NOT_OPER: | 384 | case DEV_STATE_NOT_OPER: |
399 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", | 385 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", |
400 | cdev->private->dev_id.devno, sch->schid.sch_no); | 386 | cdev->private->dev_id.devno, sch->schid.sch_no); |
401 | if (!ccw_device_notify(cdev, CIO_GONE)) | 387 | if (!ccw_device_notify(cdev, CIO_GONE)) |
402 | ccw_device_schedule_sch_unregister(cdev); | 388 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
403 | else | 389 | else |
404 | ccw_device_set_disconnected(cdev); | 390 | ccw_device_set_disconnected(cdev); |
405 | cdev->private->flags.donotify = 0; | 391 | cdev->private->flags.donotify = 0; |
@@ -409,7 +395,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
409 | "%04x\n", cdev->private->dev_id.devno, | 395 | "%04x\n", cdev->private->dev_id.devno, |
410 | sch->schid.sch_no); | 396 | sch->schid.sch_no); |
411 | if (!ccw_device_notify(cdev, CIO_NO_PATH)) | 397 | if (!ccw_device_notify(cdev, CIO_NO_PATH)) |
412 | ccw_device_schedule_sch_unregister(cdev); | 398 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
413 | else | 399 | else |
414 | ccw_device_set_disconnected(cdev); | 400 | ccw_device_set_disconnected(cdev); |
415 | cdev->private->flags.donotify = 0; | 401 | cdev->private->flags.donotify = 0; |
@@ -751,7 +737,7 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev, | |||
751 | enum dev_event dev_event) | 737 | enum dev_event dev_event) |
752 | { | 738 | { |
753 | if (!ccw_device_notify(cdev, CIO_GONE)) | 739 | if (!ccw_device_notify(cdev, CIO_GONE)) |
754 | ccw_device_schedule_sch_unregister(cdev); | 740 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
755 | else | 741 | else |
756 | ccw_device_set_disconnected(cdev); | 742 | ccw_device_set_disconnected(cdev); |
757 | } | 743 | } |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 0b8f381bd20e..b770e4202131 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -82,6 +82,15 @@ struct senseid { | |||
82 | struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ | 82 | struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ |
83 | } __attribute__ ((packed, aligned(4))); | 83 | } __attribute__ ((packed, aligned(4))); |
84 | 84 | ||
85 | enum cdev_todo { | ||
86 | CDEV_TODO_NOTHING, | ||
87 | CDEV_TODO_ENABLE_CMF, | ||
88 | CDEV_TODO_REBIND, | ||
89 | CDEV_TODO_REGISTER, | ||
90 | CDEV_TODO_UNREG, | ||
91 | CDEV_TODO_UNREG_EVAL, | ||
92 | }; | ||
93 | |||
85 | struct ccw_device_private { | 94 | struct ccw_device_private { |
86 | struct ccw_device *cdev; | 95 | struct ccw_device *cdev; |
87 | struct subchannel *sch; | 96 | struct subchannel *sch; |
@@ -115,7 +124,8 @@ struct ccw_device_private { | |||
115 | struct senseid senseid; /* SenseID info */ | 124 | struct senseid senseid; /* SenseID info */ |
116 | struct pgid pgid[8]; /* path group IDs per chpid*/ | 125 | struct pgid pgid[8]; /* path group IDs per chpid*/ |
117 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | 126 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ |
118 | struct work_struct kick_work; | 127 | struct work_struct todo_work; |
128 | enum cdev_todo todo; | ||
119 | wait_queue_head_t wait_q; | 129 | wait_queue_head_t wait_q; |
120 | struct timer_list timer; | 130 | struct timer_list timer; |
121 | void *cmb; /* measurement information */ | 131 | void *cmb; /* measurement information */ |