diff options
author | David S. Miller <davem@davemloft.net> | 2011-01-24 16:17:06 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-24 16:17:06 -0500 |
commit | e92427b289d252cfbd4cb5282d92f4ce1a5bb1fb (patch) | |
tree | 6d30e5e7b7f8e9aaa51d43b7128ac56860fa03bb /drivers/rtc/interface.c | |
parent | c506653d35249bb4738bb139c24362e1ae724bc1 (diff) | |
parent | ec30f343d61391ab23705e50a525da1d55395780 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/rtc/interface.c')
-rw-r--r-- | drivers/rtc/interface.c | 567 |
1 files changed, 352 insertions, 215 deletions
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c816238aa9..925006d33109 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -14,15 +14,14 @@ | |||
14 | #include <linux/rtc.h> | 14 | #include <linux/rtc.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/log2.h> | 16 | #include <linux/log2.h> |
17 | #include <linux/workqueue.h> | ||
17 | 18 | ||
18 | int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | 19 | static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); |
20 | static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); | ||
21 | |||
22 | static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | ||
19 | { | 23 | { |
20 | int err; | 24 | int err; |
21 | |||
22 | err = mutex_lock_interruptible(&rtc->ops_lock); | ||
23 | if (err) | ||
24 | return err; | ||
25 | |||
26 | if (!rtc->ops) | 25 | if (!rtc->ops) |
27 | err = -ENODEV; | 26 | err = -ENODEV; |
28 | else if (!rtc->ops->read_time) | 27 | else if (!rtc->ops->read_time) |
@@ -31,7 +30,18 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | |||
31 | memset(tm, 0, sizeof(struct rtc_time)); | 30 | memset(tm, 0, sizeof(struct rtc_time)); |
32 | err = rtc->ops->read_time(rtc->dev.parent, tm); | 31 | err = rtc->ops->read_time(rtc->dev.parent, tm); |
33 | } | 32 | } |
33 | return err; | ||
34 | } | ||
35 | |||
36 | int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | ||
37 | { | ||
38 | int err; | ||
39 | |||
40 | err = mutex_lock_interruptible(&rtc->ops_lock); | ||
41 | if (err) | ||
42 | return err; | ||
34 | 43 | ||
44 | err = __rtc_read_time(rtc, tm); | ||
35 | mutex_unlock(&rtc->ops_lock); | 45 | mutex_unlock(&rtc->ops_lock); |
36 | return err; | 46 | return err; |
37 | } | 47 | } |
@@ -106,188 +116,60 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) | |||
106 | } | 116 | } |
107 | EXPORT_SYMBOL_GPL(rtc_set_mmss); | 117 | EXPORT_SYMBOL_GPL(rtc_set_mmss); |
108 | 118 | ||
109 | static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 119 | int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
110 | { | 120 | { |
111 | int err; | 121 | int err; |
112 | 122 | ||
113 | err = mutex_lock_interruptible(&rtc->ops_lock); | 123 | err = mutex_lock_interruptible(&rtc->ops_lock); |
114 | if (err) | 124 | if (err) |
115 | return err; | 125 | return err; |
116 | |||
117 | if (rtc->ops == NULL) | 126 | if (rtc->ops == NULL) |
118 | err = -ENODEV; | 127 | err = -ENODEV; |
119 | else if (!rtc->ops->read_alarm) | 128 | else if (!rtc->ops->read_alarm) |
120 | err = -EINVAL; | 129 | err = -EINVAL; |
121 | else { | 130 | else { |
122 | memset(alarm, 0, sizeof(struct rtc_wkalrm)); | 131 | memset(alarm, 0, sizeof(struct rtc_wkalrm)); |
123 | err = rtc->ops->read_alarm(rtc->dev.parent, alarm); | 132 | alarm->enabled = rtc->aie_timer.enabled; |
133 | alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); | ||
124 | } | 134 | } |
125 | |||
126 | mutex_unlock(&rtc->ops_lock); | 135 | mutex_unlock(&rtc->ops_lock); |
136 | |||
127 | return err; | 137 | return err; |
128 | } | 138 | } |
139 | EXPORT_SYMBOL_GPL(rtc_read_alarm); | ||
129 | 140 | ||
130 | int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 141 | int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
131 | { | 142 | { |
143 | struct rtc_time tm; | ||
144 | long now, scheduled; | ||
132 | int err; | 145 | int err; |
133 | struct rtc_time before, now; | ||
134 | int first_time = 1; | ||
135 | unsigned long t_now, t_alm; | ||
136 | enum { none, day, month, year } missing = none; | ||
137 | unsigned days; | ||
138 | |||
139 | /* The lower level RTC driver may return -1 in some fields, | ||
140 | * creating invalid alarm->time values, for reasons like: | ||
141 | * | ||
142 | * - The hardware may not be capable of filling them in; | ||
143 | * many alarms match only on time-of-day fields, not | ||
144 | * day/month/year calendar data. | ||
145 | * | ||
146 | * - Some hardware uses illegal values as "wildcard" match | ||
147 | * values, which non-Linux firmware (like a BIOS) may try | ||
148 | * to set up as e.g. "alarm 15 minutes after each hour". | ||
149 | * Linux uses only oneshot alarms. | ||
150 | * | ||
151 | * When we see that here, we deal with it by using values from | ||
152 | * a current RTC timestamp for any missing (-1) values. The | ||
153 | * RTC driver prevents "periodic alarm" modes. | ||
154 | * | ||
155 | * But this can be racey, because some fields of the RTC timestamp | ||
156 | * may have wrapped in the interval since we read the RTC alarm, | ||
157 | * which would lead to us inserting inconsistent values in place | ||
158 | * of the -1 fields. | ||
159 | * | ||
160 | * Reading the alarm and timestamp in the reverse sequence | ||
161 | * would have the same race condition, and not solve the issue. | ||
162 | * | ||
163 | * So, we must first read the RTC timestamp, | ||
164 | * then read the RTC alarm value, | ||
165 | * and then read a second RTC timestamp. | ||
166 | * | ||
167 | * If any fields of the second timestamp have changed | ||
168 | * when compared with the first timestamp, then we know | ||
169 | * our timestamp may be inconsistent with that used by | ||
170 | * the low-level rtc_read_alarm_internal() function. | ||
171 | * | ||
172 | * So, when the two timestamps disagree, we just loop and do | ||
173 | * the process again to get a fully consistent set of values. | ||
174 | * | ||
175 | * This could all instead be done in the lower level driver, | ||
176 | * but since more than one lower level RTC implementation needs it, | ||
177 | * then it's probably best best to do it here instead of there.. | ||
178 | */ | ||
179 | 146 | ||
180 | /* Get the "before" timestamp */ | 147 | err = rtc_valid_tm(&alarm->time); |
181 | err = rtc_read_time(rtc, &before); | 148 | if (err) |
182 | if (err < 0) | ||
183 | return err; | 149 | return err; |
184 | do { | 150 | rtc_tm_to_time(&alarm->time, &scheduled); |
185 | if (!first_time) | ||
186 | memcpy(&before, &now, sizeof(struct rtc_time)); | ||
187 | first_time = 0; | ||
188 | |||
189 | /* get the RTC alarm values, which may be incomplete */ | ||
190 | err = rtc_read_alarm_internal(rtc, alarm); | ||
191 | if (err) | ||
192 | return err; | ||
193 | if (!alarm->enabled) | ||
194 | return 0; | ||
195 | |||
196 | /* full-function RTCs won't have such missing fields */ | ||
197 | if (rtc_valid_tm(&alarm->time) == 0) | ||
198 | return 0; | ||
199 | |||
200 | /* get the "after" timestamp, to detect wrapped fields */ | ||
201 | err = rtc_read_time(rtc, &now); | ||
202 | if (err < 0) | ||
203 | return err; | ||
204 | 151 | ||
205 | /* note that tm_sec is a "don't care" value here: */ | 152 | /* Make sure we're not setting alarms in the past */ |
206 | } while ( before.tm_min != now.tm_min | 153 | err = __rtc_read_time(rtc, &tm); |
207 | || before.tm_hour != now.tm_hour | 154 | rtc_tm_to_time(&tm, &now); |
208 | || before.tm_mon != now.tm_mon | 155 | if (scheduled <= now) |
209 | || before.tm_year != now.tm_year); | 156 | return -ETIME; |
210 | 157 | /* | |
211 | /* Fill in the missing alarm fields using the timestamp; we | 158 | * XXX - We just checked to make sure the alarm time is not |
212 | * know there's at least one since alarm->time is invalid. | 159 | * in the past, but there is still a race window where if |
160 | * the is alarm set for the next second and the second ticks | ||
161 | * over right here, before we set the alarm. | ||
213 | */ | 162 | */ |
214 | if (alarm->time.tm_sec == -1) | ||
215 | alarm->time.tm_sec = now.tm_sec; | ||
216 | if (alarm->time.tm_min == -1) | ||
217 | alarm->time.tm_min = now.tm_min; | ||
218 | if (alarm->time.tm_hour == -1) | ||
219 | alarm->time.tm_hour = now.tm_hour; | ||
220 | |||
221 | /* For simplicity, only support date rollover for now */ | ||
222 | if (alarm->time.tm_mday == -1) { | ||
223 | alarm->time.tm_mday = now.tm_mday; | ||
224 | missing = day; | ||
225 | } | ||
226 | if (alarm->time.tm_mon == -1) { | ||
227 | alarm->time.tm_mon = now.tm_mon; | ||
228 | if (missing == none) | ||
229 | missing = month; | ||
230 | } | ||
231 | if (alarm->time.tm_year == -1) { | ||
232 | alarm->time.tm_year = now.tm_year; | ||
233 | if (missing == none) | ||
234 | missing = year; | ||
235 | } | ||
236 | |||
237 | /* with luck, no rollover is needed */ | ||
238 | rtc_tm_to_time(&now, &t_now); | ||
239 | rtc_tm_to_time(&alarm->time, &t_alm); | ||
240 | if (t_now < t_alm) | ||
241 | goto done; | ||
242 | 163 | ||
243 | switch (missing) { | 164 | if (!rtc->ops) |
244 | 165 | err = -ENODEV; | |
245 | /* 24 hour rollover ... if it's now 10am Monday, an alarm that | 166 | else if (!rtc->ops->set_alarm) |
246 | * that will trigger at 5am will do so at 5am Tuesday, which | 167 | err = -EINVAL; |
247 | * could also be in the next month or year. This is a common | 168 | else |
248 | * case, especially for PCs. | 169 | err = rtc->ops->set_alarm(rtc->dev.parent, alarm); |
249 | */ | ||
250 | case day: | ||
251 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); | ||
252 | t_alm += 24 * 60 * 60; | ||
253 | rtc_time_to_tm(t_alm, &alarm->time); | ||
254 | break; | ||
255 | |||
256 | /* Month rollover ... if it's the 31th, an alarm on the 3rd will | ||
257 | * be next month. An alarm matching on the 30th, 29th, or 28th | ||
258 | * may end up in the month after that! Many newer PCs support | ||
259 | * this type of alarm. | ||
260 | */ | ||
261 | case month: | ||
262 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); | ||
263 | do { | ||
264 | if (alarm->time.tm_mon < 11) | ||
265 | alarm->time.tm_mon++; | ||
266 | else { | ||
267 | alarm->time.tm_mon = 0; | ||
268 | alarm->time.tm_year++; | ||
269 | } | ||
270 | days = rtc_month_days(alarm->time.tm_mon, | ||
271 | alarm->time.tm_year); | ||
272 | } while (days < alarm->time.tm_mday); | ||
273 | break; | ||
274 | |||
275 | /* Year rollover ... easy except for leap years! */ | ||
276 | case year: | ||
277 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); | ||
278 | do { | ||
279 | alarm->time.tm_year++; | ||
280 | } while (rtc_valid_tm(&alarm->time) != 0); | ||
281 | break; | ||
282 | |||
283 | default: | ||
284 | dev_warn(&rtc->dev, "alarm rollover not handled\n"); | ||
285 | } | ||
286 | 170 | ||
287 | done: | 171 | return err; |
288 | return 0; | ||
289 | } | 172 | } |
290 | EXPORT_SYMBOL_GPL(rtc_read_alarm); | ||
291 | 173 | ||
292 | int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 174 | int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
293 | { | 175 | { |
@@ -300,14 +182,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
300 | err = mutex_lock_interruptible(&rtc->ops_lock); | 182 | err = mutex_lock_interruptible(&rtc->ops_lock); |
301 | if (err) | 183 | if (err) |
302 | return err; | 184 | return err; |
303 | 185 | if (rtc->aie_timer.enabled) { | |
304 | if (!rtc->ops) | 186 | rtc_timer_remove(rtc, &rtc->aie_timer); |
305 | err = -ENODEV; | 187 | } |
306 | else if (!rtc->ops->set_alarm) | 188 | rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); |
307 | err = -EINVAL; | 189 | rtc->aie_timer.period = ktime_set(0, 0); |
308 | else | 190 | if (alarm->enabled) { |
309 | err = rtc->ops->set_alarm(rtc->dev.parent, alarm); | 191 | err = rtc_timer_enqueue(rtc, &rtc->aie_timer); |
310 | 192 | } | |
311 | mutex_unlock(&rtc->ops_lock); | 193 | mutex_unlock(&rtc->ops_lock); |
312 | return err; | 194 | return err; |
313 | } | 195 | } |
@@ -319,6 +201,16 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
319 | if (err) | 201 | if (err) |
320 | return err; | 202 | return err; |
321 | 203 | ||
204 | if (rtc->aie_timer.enabled != enabled) { | ||
205 | if (enabled) | ||
206 | err = rtc_timer_enqueue(rtc, &rtc->aie_timer); | ||
207 | else | ||
208 | rtc_timer_remove(rtc, &rtc->aie_timer); | ||
209 | } | ||
210 | |||
211 | if (err) | ||
212 | return err; | ||
213 | |||
322 | if (!rtc->ops) | 214 | if (!rtc->ops) |
323 | err = -ENODEV; | 215 | err = -ENODEV; |
324 | else if (!rtc->ops->alarm_irq_enable) | 216 | else if (!rtc->ops->alarm_irq_enable) |
@@ -337,52 +229,50 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
337 | if (err) | 229 | if (err) |
338 | return err; | 230 | return err; |
339 | 231 | ||
340 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 232 | /* make sure we're changing state */ |
341 | if (enabled == 0 && rtc->uie_irq_active) { | 233 | if (rtc->uie_rtctimer.enabled == enabled) |
342 | mutex_unlock(&rtc->ops_lock); | 234 | goto out; |
343 | return rtc_dev_update_irq_enable_emul(rtc, enabled); | ||
344 | } | ||
345 | #endif | ||
346 | 235 | ||
347 | if (!rtc->ops) | 236 | if (enabled) { |
348 | err = -ENODEV; | 237 | struct rtc_time tm; |
349 | else if (!rtc->ops->update_irq_enable) | 238 | ktime_t now, onesec; |
350 | err = -EINVAL; | ||
351 | else | ||
352 | err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled); | ||
353 | 239 | ||
354 | mutex_unlock(&rtc->ops_lock); | 240 | __rtc_read_time(rtc, &tm); |
241 | onesec = ktime_set(1, 0); | ||
242 | now = rtc_tm_to_ktime(tm); | ||
243 | rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); | ||
244 | rtc->uie_rtctimer.period = ktime_set(1, 0); | ||
245 | err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); | ||
246 | } else | ||
247 | rtc_timer_remove(rtc, &rtc->uie_rtctimer); | ||
355 | 248 | ||
356 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 249 | out: |
357 | /* | 250 | mutex_unlock(&rtc->ops_lock); |
358 | * Enable emulation if the driver did not provide | ||
359 | * the update_irq_enable function pointer or if returned | ||
360 | * -EINVAL to signal that it has been configured without | ||
361 | * interrupts or that are not available at the moment. | ||
362 | */ | ||
363 | if (err == -EINVAL) | ||
364 | err = rtc_dev_update_irq_enable_emul(rtc, enabled); | ||
365 | #endif | ||
366 | return err; | 251 | return err; |
252 | |||
367 | } | 253 | } |
368 | EXPORT_SYMBOL_GPL(rtc_update_irq_enable); | 254 | EXPORT_SYMBOL_GPL(rtc_update_irq_enable); |
369 | 255 | ||
256 | |||
370 | /** | 257 | /** |
371 | * rtc_update_irq - report RTC periodic, alarm, and/or update irqs | 258 | * rtc_handle_legacy_irq - AIE, UIE and PIE event hook |
372 | * @rtc: the rtc device | 259 | * @rtc: pointer to the rtc device |
373 | * @num: how many irqs are being reported (usually one) | 260 | * |
374 | * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF | 261 | * This function is called when an AIE, UIE or PIE mode interrupt |
375 | * Context: any | 262 | * has occured (or been emulated). |
263 | * | ||
264 | * Triggers the registered irq_task function callback. | ||
376 | */ | 265 | */ |
377 | void rtc_update_irq(struct rtc_device *rtc, | 266 | static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) |
378 | unsigned long num, unsigned long events) | ||
379 | { | 267 | { |
380 | unsigned long flags; | 268 | unsigned long flags; |
381 | 269 | ||
270 | /* mark one irq of the appropriate mode */ | ||
382 | spin_lock_irqsave(&rtc->irq_lock, flags); | 271 | spin_lock_irqsave(&rtc->irq_lock, flags); |
383 | rtc->irq_data = (rtc->irq_data + (num << 8)) | events; | 272 | rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode); |
384 | spin_unlock_irqrestore(&rtc->irq_lock, flags); | 273 | spin_unlock_irqrestore(&rtc->irq_lock, flags); |
385 | 274 | ||
275 | /* call the task func */ | ||
386 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 276 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
387 | if (rtc->irq_task) | 277 | if (rtc->irq_task) |
388 | rtc->irq_task->func(rtc->irq_task->private_data); | 278 | rtc->irq_task->func(rtc->irq_task->private_data); |
@@ -391,6 +281,69 @@ void rtc_update_irq(struct rtc_device *rtc, | |||
391 | wake_up_interruptible(&rtc->irq_queue); | 281 | wake_up_interruptible(&rtc->irq_queue); |
392 | kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); | 282 | kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); |
393 | } | 283 | } |
284 | |||
285 | |||
286 | /** | ||
287 | * rtc_aie_update_irq - AIE mode rtctimer hook | ||
288 | * @private: pointer to the rtc_device | ||
289 | * | ||
290 | * This functions is called when the aie_timer expires. | ||
291 | */ | ||
292 | void rtc_aie_update_irq(void *private) | ||
293 | { | ||
294 | struct rtc_device *rtc = (struct rtc_device *)private; | ||
295 | rtc_handle_legacy_irq(rtc, 1, RTC_AF); | ||
296 | } | ||
297 | |||
298 | |||
299 | /** | ||
300 | * rtc_uie_update_irq - UIE mode rtctimer hook | ||
301 | * @private: pointer to the rtc_device | ||
302 | * | ||
303 | * This functions is called when the uie_timer expires. | ||
304 | */ | ||
305 | void rtc_uie_update_irq(void *private) | ||
306 | { | ||
307 | struct rtc_device *rtc = (struct rtc_device *)private; | ||
308 | rtc_handle_legacy_irq(rtc, 1, RTC_UF); | ||
309 | } | ||
310 | |||
311 | |||
312 | /** | ||
313 | * rtc_pie_update_irq - PIE mode hrtimer hook | ||
314 | * @timer: pointer to the pie mode hrtimer | ||
315 | * | ||
316 | * This function is used to emulate PIE mode interrupts | ||
317 | * using an hrtimer. This function is called when the periodic | ||
318 | * hrtimer expires. | ||
319 | */ | ||
320 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) | ||
321 | { | ||
322 | struct rtc_device *rtc; | ||
323 | ktime_t period; | ||
324 | int count; | ||
325 | rtc = container_of(timer, struct rtc_device, pie_timer); | ||
326 | |||
327 | period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); | ||
328 | count = hrtimer_forward_now(timer, period); | ||
329 | |||
330 | rtc_handle_legacy_irq(rtc, count, RTC_PF); | ||
331 | |||
332 | return HRTIMER_RESTART; | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * rtc_update_irq - Triggered when a RTC interrupt occurs. | ||
337 | * @rtc: the rtc device | ||
338 | * @num: how many irqs are being reported (usually one) | ||
339 | * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF | ||
340 | * Context: any | ||
341 | */ | ||
342 | void rtc_update_irq(struct rtc_device *rtc, | ||
343 | unsigned long num, unsigned long events) | ||
344 | { | ||
345 | schedule_work(&rtc->irqwork); | ||
346 | } | ||
394 | EXPORT_SYMBOL_GPL(rtc_update_irq); | 347 | EXPORT_SYMBOL_GPL(rtc_update_irq); |
395 | 348 | ||
396 | static int __rtc_match(struct device *dev, void *data) | 349 | static int __rtc_match(struct device *dev, void *data) |
@@ -477,18 +430,20 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled | |||
477 | int err = 0; | 430 | int err = 0; |
478 | unsigned long flags; | 431 | unsigned long flags; |
479 | 432 | ||
480 | if (rtc->ops->irq_set_state == NULL) | ||
481 | return -ENXIO; | ||
482 | |||
483 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 433 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
484 | if (rtc->irq_task != NULL && task == NULL) | 434 | if (rtc->irq_task != NULL && task == NULL) |
485 | err = -EBUSY; | 435 | err = -EBUSY; |
486 | if (rtc->irq_task != task) | 436 | if (rtc->irq_task != task) |
487 | err = -EACCES; | 437 | err = -EACCES; |
488 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
489 | 438 | ||
490 | if (err == 0) | 439 | if (enabled) { |
491 | err = rtc->ops->irq_set_state(rtc->dev.parent, enabled); | 440 | ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); |
441 | hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); | ||
442 | } else { | ||
443 | hrtimer_cancel(&rtc->pie_timer); | ||
444 | } | ||
445 | rtc->pie_enabled = enabled; | ||
446 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
492 | 447 | ||
493 | return err; | 448 | return err; |
494 | } | 449 | } |
@@ -509,21 +464,203 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) | |||
509 | int err = 0; | 464 | int err = 0; |
510 | unsigned long flags; | 465 | unsigned long flags; |
511 | 466 | ||
512 | if (rtc->ops->irq_set_freq == NULL) | ||
513 | return -ENXIO; | ||
514 | |||
515 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 467 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
516 | if (rtc->irq_task != NULL && task == NULL) | 468 | if (rtc->irq_task != NULL && task == NULL) |
517 | err = -EBUSY; | 469 | err = -EBUSY; |
518 | if (rtc->irq_task != task) | 470 | if (rtc->irq_task != task) |
519 | err = -EACCES; | 471 | err = -EACCES; |
520 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
521 | |||
522 | if (err == 0) { | 472 | if (err == 0) { |
523 | err = rtc->ops->irq_set_freq(rtc->dev.parent, freq); | 473 | rtc->irq_freq = freq; |
524 | if (err == 0) | 474 | if (rtc->pie_enabled) { |
525 | rtc->irq_freq = freq; | 475 | ktime_t period; |
476 | hrtimer_cancel(&rtc->pie_timer); | ||
477 | period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); | ||
478 | hrtimer_start(&rtc->pie_timer, period, | ||
479 | HRTIMER_MODE_REL); | ||
480 | } | ||
526 | } | 481 | } |
482 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
527 | return err; | 483 | return err; |
528 | } | 484 | } |
529 | EXPORT_SYMBOL_GPL(rtc_irq_set_freq); | 485 | EXPORT_SYMBOL_GPL(rtc_irq_set_freq); |
486 | |||
487 | /** | ||
488 | * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue | ||
489 | * @rtc rtc device | ||
490 | * @timer timer being added. | ||
491 | * | ||
492 | * Enqueues a timer onto the rtc devices timerqueue and sets | ||
493 | * the next alarm event appropriately. | ||
494 | * | ||
495 | * Sets the enabled bit on the added timer. | ||
496 | * | ||
497 | * Must hold ops_lock for proper serialization of timerqueue | ||
498 | */ | ||
499 | static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) | ||
500 | { | ||
501 | timer->enabled = 1; | ||
502 | timerqueue_add(&rtc->timerqueue, &timer->node); | ||
503 | if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { | ||
504 | struct rtc_wkalrm alarm; | ||
505 | int err; | ||
506 | alarm.time = rtc_ktime_to_tm(timer->node.expires); | ||
507 | alarm.enabled = 1; | ||
508 | err = __rtc_set_alarm(rtc, &alarm); | ||
509 | if (err == -ETIME) | ||
510 | schedule_work(&rtc->irqwork); | ||
511 | else if (err) { | ||
512 | timerqueue_del(&rtc->timerqueue, &timer->node); | ||
513 | timer->enabled = 0; | ||
514 | return err; | ||
515 | } | ||
516 | } | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue | ||
522 | * @rtc rtc device | ||
523 | * @timer timer being removed. | ||
524 | * | ||
525 | * Removes a timer onto the rtc devices timerqueue and sets | ||
526 | * the next alarm event appropriately. | ||
527 | * | ||
528 | * Clears the enabled bit on the removed timer. | ||
529 | * | ||
530 | * Must hold ops_lock for proper serialization of timerqueue | ||
531 | */ | ||
532 | static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) | ||
533 | { | ||
534 | struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); | ||
535 | timerqueue_del(&rtc->timerqueue, &timer->node); | ||
536 | timer->enabled = 0; | ||
537 | if (next == &timer->node) { | ||
538 | struct rtc_wkalrm alarm; | ||
539 | int err; | ||
540 | next = timerqueue_getnext(&rtc->timerqueue); | ||
541 | if (!next) | ||
542 | return; | ||
543 | alarm.time = rtc_ktime_to_tm(next->expires); | ||
544 | alarm.enabled = 1; | ||
545 | err = __rtc_set_alarm(rtc, &alarm); | ||
546 | if (err == -ETIME) | ||
547 | schedule_work(&rtc->irqwork); | ||
548 | } | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * rtc_timer_do_work - Expires rtc timers | ||
553 | * @rtc rtc device | ||
554 | * @timer timer being removed. | ||
555 | * | ||
556 | * Expires rtc timers. Reprograms next alarm event if needed. | ||
557 | * Called via worktask. | ||
558 | * | ||
559 | * Serializes access to timerqueue via ops_lock mutex | ||
560 | */ | ||
561 | void rtc_timer_do_work(struct work_struct *work) | ||
562 | { | ||
563 | struct rtc_timer *timer; | ||
564 | struct timerqueue_node *next; | ||
565 | ktime_t now; | ||
566 | struct rtc_time tm; | ||
567 | |||
568 | struct rtc_device *rtc = | ||
569 | container_of(work, struct rtc_device, irqwork); | ||
570 | |||
571 | mutex_lock(&rtc->ops_lock); | ||
572 | again: | ||
573 | __rtc_read_time(rtc, &tm); | ||
574 | now = rtc_tm_to_ktime(tm); | ||
575 | while ((next = timerqueue_getnext(&rtc->timerqueue))) { | ||
576 | if (next->expires.tv64 > now.tv64) | ||
577 | break; | ||
578 | |||
579 | /* expire timer */ | ||
580 | timer = container_of(next, struct rtc_timer, node); | ||
581 | timerqueue_del(&rtc->timerqueue, &timer->node); | ||
582 | timer->enabled = 0; | ||
583 | if (timer->task.func) | ||
584 | timer->task.func(timer->task.private_data); | ||
585 | |||
586 | /* Re-add/fwd periodic timers */ | ||
587 | if (ktime_to_ns(timer->period)) { | ||
588 | timer->node.expires = ktime_add(timer->node.expires, | ||
589 | timer->period); | ||
590 | timer->enabled = 1; | ||
591 | timerqueue_add(&rtc->timerqueue, &timer->node); | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* Set next alarm */ | ||
596 | if (next) { | ||
597 | struct rtc_wkalrm alarm; | ||
598 | int err; | ||
599 | alarm.time = rtc_ktime_to_tm(next->expires); | ||
600 | alarm.enabled = 1; | ||
601 | err = __rtc_set_alarm(rtc, &alarm); | ||
602 | if (err == -ETIME) | ||
603 | goto again; | ||
604 | } | ||
605 | |||
606 | mutex_unlock(&rtc->ops_lock); | ||
607 | } | ||
608 | |||
609 | |||
610 | /* rtc_timer_init - Initializes an rtc_timer | ||
611 | * @timer: timer to be intiialized | ||
612 | * @f: function pointer to be called when timer fires | ||
613 | * @data: private data passed to function pointer | ||
614 | * | ||
615 | * Kernel interface to initializing an rtc_timer. | ||
616 | */ | ||
617 | void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data) | ||
618 | { | ||
619 | timerqueue_init(&timer->node); | ||
620 | timer->enabled = 0; | ||
621 | timer->task.func = f; | ||
622 | timer->task.private_data = data; | ||
623 | } | ||
624 | |||
625 | /* rtc_timer_start - Sets an rtc_timer to fire in the future | ||
626 | * @ rtc: rtc device to be used | ||
627 | * @ timer: timer being set | ||
628 | * @ expires: time at which to expire the timer | ||
629 | * @ period: period that the timer will recur | ||
630 | * | ||
631 | * Kernel interface to set an rtc_timer | ||
632 | */ | ||
633 | int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, | ||
634 | ktime_t expires, ktime_t period) | ||
635 | { | ||
636 | int ret = 0; | ||
637 | mutex_lock(&rtc->ops_lock); | ||
638 | if (timer->enabled) | ||
639 | rtc_timer_remove(rtc, timer); | ||
640 | |||
641 | timer->node.expires = expires; | ||
642 | timer->period = period; | ||
643 | |||
644 | ret = rtc_timer_enqueue(rtc, timer); | ||
645 | |||
646 | mutex_unlock(&rtc->ops_lock); | ||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | /* rtc_timer_cancel - Stops an rtc_timer | ||
651 | * @ rtc: rtc device to be used | ||
652 | * @ timer: timer being set | ||
653 | * | ||
654 | * Kernel interface to cancel an rtc_timer | ||
655 | */ | ||
656 | int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) | ||
657 | { | ||
658 | int ret = 0; | ||
659 | mutex_lock(&rtc->ops_lock); | ||
660 | if (timer->enabled) | ||
661 | rtc_timer_remove(rtc, timer); | ||
662 | mutex_unlock(&rtc->ops_lock); | ||
663 | return ret; | ||
664 | } | ||
665 | |||
666 | |||