aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/watchdog/watchdog_dev.c
diff options
context:
space:
mode:
authorGuenter Roeck <linux@roeck-us.net>2016-02-28 16:12:16 -0500
committerWim Van Sebroeck <wim@iguana.be>2016-03-16 16:11:15 -0400
commitee142889e32f564f9b5e57b68b06693ec5473074 (patch)
tree69afe4a8324087bbd96636048f9b306d5a84eb28 /drivers/watchdog/watchdog_dev.c
parent664a39236e718f9f03fa73fc01006da9ced04efc (diff)
watchdog: Introduce WDOG_HW_RUNNING flag
The WDOG_HW_RUNNING flag is expected to be set by watchdog drivers if the hardware watchdog is running. If the flag is set, the watchdog subsystem will ping the watchdog even if the watchdog device is closed. The watchdog driver stop function is now optional and may be omitted if the watchdog can not be stopped. If stopping the watchdog is not possible but the driver implements a stop function, it is responsible to set the WDOG_HW_RUNNING flag in its stop function. Signed-off-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Wim Van Sebroeck <wim@iguana.be>
Diffstat (limited to 'drivers/watchdog/watchdog_dev.c')
-rw-r--r--drivers/watchdog/watchdog_dev.c52
1 files changed, 39 insertions, 13 deletions
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index e668a9e8b648..5d3a9fa4856e 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -92,7 +92,8 @@ static inline bool watchdog_need_worker(struct watchdog_device *wdd)
92 * requests. 92 * requests.
93 * - Userspace requests a longer timeout than the hardware can handle. 93 * - Userspace requests a longer timeout than the hardware can handle.
94 */ 94 */
95 return watchdog_active(wdd) && hm && t > hm; 95 return hm && ((watchdog_active(wdd) && t > hm) ||
96 (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)));
96} 97}
97 98
98static long watchdog_next_keepalive(struct watchdog_device *wdd) 99static long watchdog_next_keepalive(struct watchdog_device *wdd)
@@ -108,6 +109,9 @@ static long watchdog_next_keepalive(struct watchdog_device *wdd)
108 hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms); 109 hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms);
109 keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2); 110 keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
110 111
112 if (!watchdog_active(wdd))
113 return keepalive_interval;
114
111 /* 115 /*
112 * To ensure that the watchdog times out wdd->timeout seconds 116 * To ensure that the watchdog times out wdd->timeout seconds
113 * after the most recent ping from userspace, the last 117 * after the most recent ping from userspace, the last
@@ -161,7 +165,7 @@ static int watchdog_ping(struct watchdog_device *wdd)
161{ 165{
162 struct watchdog_core_data *wd_data = wdd->wd_data; 166 struct watchdog_core_data *wd_data = wdd->wd_data;
163 167
164 if (!watchdog_active(wdd)) 168 if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
165 return 0; 169 return 0;
166 170
167 wd_data->last_keepalive = jiffies; 171 wd_data->last_keepalive = jiffies;
@@ -178,7 +182,7 @@ static void watchdog_ping_work(struct work_struct *work)
178 182
179 mutex_lock(&wd_data->lock); 183 mutex_lock(&wd_data->lock);
180 wdd = wd_data->wdd; 184 wdd = wd_data->wdd;
181 if (wdd && watchdog_active(wdd)) 185 if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)))
182 __watchdog_ping(wdd); 186 __watchdog_ping(wdd);
183 mutex_unlock(&wd_data->lock); 187 mutex_unlock(&wd_data->lock);
184} 188}
@@ -204,7 +208,10 @@ static int watchdog_start(struct watchdog_device *wdd)
204 return 0; 208 return 0;
205 209
206 started_at = jiffies; 210 started_at = jiffies;
207 err = wdd->ops->start(wdd); 211 if (watchdog_hw_running(wdd) && wdd->ops->ping)
212 err = wdd->ops->ping(wdd);
213 else
214 err = wdd->ops->start(wdd);
208 if (err == 0) { 215 if (err == 0) {
209 set_bit(WDOG_ACTIVE, &wdd->status); 216 set_bit(WDOG_ACTIVE, &wdd->status);
210 wd_data->last_keepalive = started_at; 217 wd_data->last_keepalive = started_at;
@@ -228,8 +235,7 @@ static int watchdog_start(struct watchdog_device *wdd)
228 235
229static int watchdog_stop(struct watchdog_device *wdd) 236static int watchdog_stop(struct watchdog_device *wdd)
230{ 237{
231 struct watchdog_core_data *wd_data = wdd->wd_data; 238 int err = 0;
232 int err;
233 239
234 if (!watchdog_active(wdd)) 240 if (!watchdog_active(wdd))
235 return 0; 241 return 0;
@@ -243,7 +249,7 @@ static int watchdog_stop(struct watchdog_device *wdd)
243 err = wdd->ops->stop(wdd); 249 err = wdd->ops->stop(wdd);
244 if (err == 0) { 250 if (err == 0) {
245 clear_bit(WDOG_ACTIVE, &wdd->status); 251 clear_bit(WDOG_ACTIVE, &wdd->status);
246 cancel_delayed_work(&wd_data->work); 252 watchdog_update_worker(wdd);
247 } 253 }
248 254
249 return err; 255 return err;
@@ -641,7 +647,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
641 * If the /dev/watchdog device is open, we don't want the module 647 * If the /dev/watchdog device is open, we don't want the module
642 * to be unloaded. 648 * to be unloaded.
643 */ 649 */
644 if (!try_module_get(wdd->ops->owner)) { 650 if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) {
645 err = -EBUSY; 651 err = -EBUSY;
646 goto out_clear; 652 goto out_clear;
647 } 653 }
@@ -652,7 +658,8 @@ static int watchdog_open(struct inode *inode, struct file *file)
652 658
653 file->private_data = wd_data; 659 file->private_data = wd_data;
654 660
655 kref_get(&wd_data->kref); 661 if (!watchdog_hw_running(wdd))
662 kref_get(&wd_data->kref);
656 663
657 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */ 664 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
658 return nonseekable_open(inode, file); 665 return nonseekable_open(inode, file);
@@ -713,15 +720,22 @@ static int watchdog_release(struct inode *inode, struct file *file)
713 } 720 }
714 721
715 cancel_delayed_work_sync(&wd_data->work); 722 cancel_delayed_work_sync(&wd_data->work);
723 watchdog_update_worker(wdd);
716 724
717 /* make sure that /dev/watchdog can be re-opened */ 725 /* make sure that /dev/watchdog can be re-opened */
718 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 726 clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
719 727
720done: 728done:
721 mutex_unlock(&wd_data->lock); 729 mutex_unlock(&wd_data->lock);
722 /* Allow the owner module to be unloaded again */ 730 /*
723 module_put(wd_data->cdev.owner); 731 * Allow the owner module to be unloaded again unless the watchdog
724 kref_put(&wd_data->kref, watchdog_core_data_release); 732 * is still running. If the watchdog is still running, it can not
733 * be stopped, and its driver must not be unloaded.
734 */
735 if (!watchdog_hw_running(wdd)) {
736 module_put(wdd->ops->owner);
737 kref_put(&wd_data->kref, watchdog_core_data_release);
738 }
725 return 0; 739 return 0;
726} 740}
727 741
@@ -798,8 +812,20 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
798 old_wd_data = NULL; 812 old_wd_data = NULL;
799 kref_put(&wd_data->kref, watchdog_core_data_release); 813 kref_put(&wd_data->kref, watchdog_core_data_release);
800 } 814 }
815 return err;
801 } 816 }
802 return err; 817
818 /*
819 * If the watchdog is running, prevent its driver from being unloaded,
820 * and schedule an immediate ping.
821 */
822 if (watchdog_hw_running(wdd)) {
823 __module_get(wdd->ops->owner);
824 kref_get(&wd_data->kref);
825 queue_delayed_work(watchdog_wq, &wd_data->work, 0);
826 }
827
828 return 0;
803} 829}
804 830
805/* 831/*