diff options
Diffstat (limited to 'drivers/rtc/interface.c')
| -rw-r--r-- | drivers/rtc/interface.c | 557 |
1 files changed, 356 insertions, 201 deletions
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c816238aa..cb2f0728fd7 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
| @@ -14,15 +14,14 @@ | |||
| 14 | #include <linux/rtc.h> | 14 | #include <linux/rtc.h> |
| 15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
| 16 | #include <linux/log2.h> | 16 | #include <linux/log2.h> |
| 17 | #include <linux/workqueue.h> | ||
| 17 | 18 | ||
| 18 | int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | 19 | static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); |
| 20 | static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); | ||
| 21 | |||
| 22 | static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | ||
| 19 | { | 23 | { |
| 20 | int err; | 24 | int err; |
| 21 | |||
| 22 | err = mutex_lock_interruptible(&rtc->ops_lock); | ||
| 23 | if (err) | ||
| 24 | return err; | ||
| 25 | |||
| 26 | if (!rtc->ops) | 25 | if (!rtc->ops) |
| 27 | err = -ENODEV; | 26 | err = -ENODEV; |
| 28 | else if (!rtc->ops->read_time) | 27 | else if (!rtc->ops->read_time) |
| @@ -31,7 +30,18 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | |||
| 31 | memset(tm, 0, sizeof(struct rtc_time)); | 30 | memset(tm, 0, sizeof(struct rtc_time)); |
| 32 | err = rtc->ops->read_time(rtc->dev.parent, tm); | 31 | err = rtc->ops->read_time(rtc->dev.parent, tm); |
| 33 | } | 32 | } |
| 33 | return err; | ||
| 34 | } | ||
| 34 | 35 | ||
| 36 | int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) | ||
| 37 | { | ||
| 38 | int err; | ||
| 39 | |||
| 40 | err = mutex_lock_interruptible(&rtc->ops_lock); | ||
| 41 | if (err) | ||
| 42 | return err; | ||
| 43 | |||
| 44 | err = __rtc_read_time(rtc, tm); | ||
| 35 | mutex_unlock(&rtc->ops_lock); | 45 | mutex_unlock(&rtc->ops_lock); |
| 36 | return err; | 46 | return err; |
| 37 | } | 47 | } |
| @@ -106,188 +116,60 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) | |||
| 106 | } | 116 | } |
| 107 | EXPORT_SYMBOL_GPL(rtc_set_mmss); | 117 | EXPORT_SYMBOL_GPL(rtc_set_mmss); |
| 108 | 118 | ||
| 109 | static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 119 | int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
| 110 | { | 120 | { |
| 111 | int err; | 121 | int err; |
| 112 | 122 | ||
| 113 | err = mutex_lock_interruptible(&rtc->ops_lock); | 123 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 114 | if (err) | 124 | if (err) |
| 115 | return err; | 125 | return err; |
| 116 | |||
| 117 | if (rtc->ops == NULL) | 126 | if (rtc->ops == NULL) |
| 118 | err = -ENODEV; | 127 | err = -ENODEV; |
| 119 | else if (!rtc->ops->read_alarm) | 128 | else if (!rtc->ops->read_alarm) |
| 120 | err = -EINVAL; | 129 | err = -EINVAL; |
| 121 | else { | 130 | else { |
| 122 | memset(alarm, 0, sizeof(struct rtc_wkalrm)); | 131 | memset(alarm, 0, sizeof(struct rtc_wkalrm)); |
| 123 | err = rtc->ops->read_alarm(rtc->dev.parent, alarm); | 132 | alarm->enabled = rtc->aie_timer.enabled; |
| 133 | alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); | ||
| 124 | } | 134 | } |
| 125 | |||
| 126 | mutex_unlock(&rtc->ops_lock); | 135 | mutex_unlock(&rtc->ops_lock); |
| 136 | |||
| 127 | return err; | 137 | return err; |
| 128 | } | 138 | } |
| 139 | EXPORT_SYMBOL_GPL(rtc_read_alarm); | ||
| 129 | 140 | ||
| 130 | int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 141 | int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
| 131 | { | 142 | { |
| 143 | struct rtc_time tm; | ||
| 144 | long now, scheduled; | ||
| 132 | int err; | 145 | int err; |
| 133 | struct rtc_time before, now; | ||
| 134 | int first_time = 1; | ||
| 135 | unsigned long t_now, t_alm; | ||
| 136 | enum { none, day, month, year } missing = none; | ||
| 137 | unsigned days; | ||
| 138 | |||
| 139 | /* The lower level RTC driver may return -1 in some fields, | ||
| 140 | * creating invalid alarm->time values, for reasons like: | ||
| 141 | * | ||
| 142 | * - The hardware may not be capable of filling them in; | ||
| 143 | * many alarms match only on time-of-day fields, not | ||
| 144 | * day/month/year calendar data. | ||
| 145 | * | ||
| 146 | * - Some hardware uses illegal values as "wildcard" match | ||
| 147 | * values, which non-Linux firmware (like a BIOS) may try | ||
| 148 | * to set up as e.g. "alarm 15 minutes after each hour". | ||
| 149 | * Linux uses only oneshot alarms. | ||
| 150 | * | ||
| 151 | * When we see that here, we deal with it by using values from | ||
| 152 | * a current RTC timestamp for any missing (-1) values. The | ||
| 153 | * RTC driver prevents "periodic alarm" modes. | ||
| 154 | * | ||
| 155 | * But this can be racey, because some fields of the RTC timestamp | ||
| 156 | * may have wrapped in the interval since we read the RTC alarm, | ||
| 157 | * which would lead to us inserting inconsistent values in place | ||
| 158 | * of the -1 fields. | ||
| 159 | * | ||
| 160 | * Reading the alarm and timestamp in the reverse sequence | ||
| 161 | * would have the same race condition, and not solve the issue. | ||
| 162 | * | ||
| 163 | * So, we must first read the RTC timestamp, | ||
| 164 | * then read the RTC alarm value, | ||
| 165 | * and then read a second RTC timestamp. | ||
| 166 | * | ||
| 167 | * If any fields of the second timestamp have changed | ||
| 168 | * when compared with the first timestamp, then we know | ||
| 169 | * our timestamp may be inconsistent with that used by | ||
| 170 | * the low-level rtc_read_alarm_internal() function. | ||
| 171 | * | ||
| 172 | * So, when the two timestamps disagree, we just loop and do | ||
| 173 | * the process again to get a fully consistent set of values. | ||
| 174 | * | ||
| 175 | * This could all instead be done in the lower level driver, | ||
| 176 | * but since more than one lower level RTC implementation needs it, | ||
| 177 | * then it's probably best best to do it here instead of there.. | ||
| 178 | */ | ||
| 179 | 146 | ||
| 180 | /* Get the "before" timestamp */ | 147 | err = rtc_valid_tm(&alarm->time); |
| 181 | err = rtc_read_time(rtc, &before); | 148 | if (err) |
| 182 | if (err < 0) | ||
| 183 | return err; | 149 | return err; |
| 184 | do { | 150 | rtc_tm_to_time(&alarm->time, &scheduled); |
| 185 | if (!first_time) | ||
| 186 | memcpy(&before, &now, sizeof(struct rtc_time)); | ||
| 187 | first_time = 0; | ||
| 188 | |||
| 189 | /* get the RTC alarm values, which may be incomplete */ | ||
| 190 | err = rtc_read_alarm_internal(rtc, alarm); | ||
| 191 | if (err) | ||
| 192 | return err; | ||
| 193 | if (!alarm->enabled) | ||
| 194 | return 0; | ||
| 195 | |||
| 196 | /* full-function RTCs won't have such missing fields */ | ||
| 197 | if (rtc_valid_tm(&alarm->time) == 0) | ||
| 198 | return 0; | ||
| 199 | |||
| 200 | /* get the "after" timestamp, to detect wrapped fields */ | ||
| 201 | err = rtc_read_time(rtc, &now); | ||
| 202 | if (err < 0) | ||
| 203 | return err; | ||
| 204 | |||
| 205 | /* note that tm_sec is a "don't care" value here: */ | ||
| 206 | } while ( before.tm_min != now.tm_min | ||
| 207 | || before.tm_hour != now.tm_hour | ||
| 208 | || before.tm_mon != now.tm_mon | ||
| 209 | || before.tm_year != now.tm_year); | ||
| 210 | 151 | ||
| 211 | /* Fill in the missing alarm fields using the timestamp; we | 152 | /* Make sure we're not setting alarms in the past */ |
| 212 | * know there's at least one since alarm->time is invalid. | 153 | err = __rtc_read_time(rtc, &tm); |
| 154 | rtc_tm_to_time(&tm, &now); | ||
| 155 | if (scheduled <= now) | ||
| 156 | return -ETIME; | ||
| 157 | /* | ||
| 158 | * XXX - We just checked to make sure the alarm time is not | ||
| 159 | * in the past, but there is still a race window where if | ||
| 160 | * the is alarm set for the next second and the second ticks | ||
| 161 | * over right here, before we set the alarm. | ||
| 213 | */ | 162 | */ |
| 214 | if (alarm->time.tm_sec == -1) | ||
| 215 | alarm->time.tm_sec = now.tm_sec; | ||
| 216 | if (alarm->time.tm_min == -1) | ||
| 217 | alarm->time.tm_min = now.tm_min; | ||
| 218 | if (alarm->time.tm_hour == -1) | ||
| 219 | alarm->time.tm_hour = now.tm_hour; | ||
| 220 | |||
| 221 | /* For simplicity, only support date rollover for now */ | ||
| 222 | if (alarm->time.tm_mday == -1) { | ||
| 223 | alarm->time.tm_mday = now.tm_mday; | ||
| 224 | missing = day; | ||
| 225 | } | ||
| 226 | if (alarm->time.tm_mon == -1) { | ||
| 227 | alarm->time.tm_mon = now.tm_mon; | ||
| 228 | if (missing == none) | ||
| 229 | missing = month; | ||
| 230 | } | ||
| 231 | if (alarm->time.tm_year == -1) { | ||
| 232 | alarm->time.tm_year = now.tm_year; | ||
| 233 | if (missing == none) | ||
| 234 | missing = year; | ||
| 235 | } | ||
| 236 | 163 | ||
| 237 | /* with luck, no rollover is needed */ | 164 | if (!rtc->ops) |
| 238 | rtc_tm_to_time(&now, &t_now); | 165 | err = -ENODEV; |
| 239 | rtc_tm_to_time(&alarm->time, &t_alm); | 166 | else if (!rtc->ops->set_alarm) |
| 240 | if (t_now < t_alm) | 167 | err = -EINVAL; |
| 241 | goto done; | 168 | else |
| 242 | 169 | err = rtc->ops->set_alarm(rtc->dev.parent, alarm); | |
| 243 | switch (missing) { | ||
| 244 | |||
| 245 | /* 24 hour rollover ... if it's now 10am Monday, an alarm that | ||
| 246 | * that will trigger at 5am will do so at 5am Tuesday, which | ||
| 247 | * could also be in the next month or year. This is a common | ||
| 248 | * case, especially for PCs. | ||
| 249 | */ | ||
| 250 | case day: | ||
| 251 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); | ||
| 252 | t_alm += 24 * 60 * 60; | ||
| 253 | rtc_time_to_tm(t_alm, &alarm->time); | ||
| 254 | break; | ||
| 255 | |||
| 256 | /* Month rollover ... if it's the 31th, an alarm on the 3rd will | ||
| 257 | * be next month. An alarm matching on the 30th, 29th, or 28th | ||
| 258 | * may end up in the month after that! Many newer PCs support | ||
| 259 | * this type of alarm. | ||
| 260 | */ | ||
| 261 | case month: | ||
| 262 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); | ||
| 263 | do { | ||
| 264 | if (alarm->time.tm_mon < 11) | ||
| 265 | alarm->time.tm_mon++; | ||
| 266 | else { | ||
| 267 | alarm->time.tm_mon = 0; | ||
| 268 | alarm->time.tm_year++; | ||
| 269 | } | ||
| 270 | days = rtc_month_days(alarm->time.tm_mon, | ||
| 271 | alarm->time.tm_year); | ||
| 272 | } while (days < alarm->time.tm_mday); | ||
| 273 | break; | ||
| 274 | |||
| 275 | /* Year rollover ... easy except for leap years! */ | ||
| 276 | case year: | ||
| 277 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); | ||
| 278 | do { | ||
| 279 | alarm->time.tm_year++; | ||
| 280 | } while (rtc_valid_tm(&alarm->time) != 0); | ||
| 281 | break; | ||
| 282 | |||
| 283 | default: | ||
| 284 | dev_warn(&rtc->dev, "alarm rollover not handled\n"); | ||
| 285 | } | ||
| 286 | 170 | ||
| 287 | done: | 171 | return err; |
| 288 | return 0; | ||
| 289 | } | 172 | } |
| 290 | EXPORT_SYMBOL_GPL(rtc_read_alarm); | ||
| 291 | 173 | ||
| 292 | int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 174 | int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
| 293 | { | 175 | { |
| @@ -300,14 +182,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
| 300 | err = mutex_lock_interruptible(&rtc->ops_lock); | 182 | err = mutex_lock_interruptible(&rtc->ops_lock); |
| 301 | if (err) | 183 | if (err) |
| 302 | return err; | 184 | return err; |
| 303 | 185 | if (rtc->aie_timer.enabled) { | |
| 304 | if (!rtc->ops) | 186 | rtc_timer_remove(rtc, &rtc->aie_timer); |
| 305 | err = -ENODEV; | 187 | } |
| 306 | else if (!rtc->ops->set_alarm) | 188 | rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); |
| 307 | err = -EINVAL; | 189 | rtc->aie_timer.period = ktime_set(0, 0); |
| 308 | else | 190 | if (alarm->enabled) { |
| 309 | err = rtc->ops->set_alarm(rtc->dev.parent, alarm); | 191 | err = rtc_timer_enqueue(rtc, &rtc->aie_timer); |
| 310 | 192 | } | |
| 311 | mutex_unlock(&rtc->ops_lock); | 193 | mutex_unlock(&rtc->ops_lock); |
| 312 | return err; | 194 | return err; |
| 313 | } | 195 | } |
| @@ -319,7 +201,16 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
| 319 | if (err) | 201 | if (err) |
| 320 | return err; | 202 | return err; |
| 321 | 203 | ||
| 322 | if (!rtc->ops) | 204 | if (rtc->aie_timer.enabled != enabled) { |
| 205 | if (enabled) | ||
| 206 | err = rtc_timer_enqueue(rtc, &rtc->aie_timer); | ||
| 207 | else | ||
| 208 | rtc_timer_remove(rtc, &rtc->aie_timer); | ||
| 209 | } | ||
| 210 | |||
| 211 | if (err) | ||
| 212 | /* nothing */; | ||
| 213 | else if (!rtc->ops) | ||
| 323 | err = -ENODEV; | 214 | err = -ENODEV; |
| 324 | else if (!rtc->ops->alarm_irq_enable) | 215 | else if (!rtc->ops->alarm_irq_enable) |
| 325 | err = -EINVAL; | 216 | err = -EINVAL; |
| @@ -340,19 +231,28 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
| 340 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 231 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL |
| 341 | if (enabled == 0 && rtc->uie_irq_active) { | 232 | if (enabled == 0 && rtc->uie_irq_active) { |
| 342 | mutex_unlock(&rtc->ops_lock); | 233 | mutex_unlock(&rtc->ops_lock); |
| 343 | return rtc_dev_update_irq_enable_emul(rtc, enabled); | 234 | return rtc_dev_update_irq_enable_emul(rtc, 0); |
| 344 | } | 235 | } |
| 345 | #endif | 236 | #endif |
| 237 | /* make sure we're changing state */ | ||
| 238 | if (rtc->uie_rtctimer.enabled == enabled) | ||
| 239 | goto out; | ||
| 240 | |||
| 241 | if (enabled) { | ||
| 242 | struct rtc_time tm; | ||
| 243 | ktime_t now, onesec; | ||
| 244 | |||
| 245 | __rtc_read_time(rtc, &tm); | ||
| 246 | onesec = ktime_set(1, 0); | ||
| 247 | now = rtc_tm_to_ktime(tm); | ||
| 248 | rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); | ||
| 249 | rtc->uie_rtctimer.period = ktime_set(1, 0); | ||
| 250 | err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); | ||
| 251 | } else | ||
| 252 | rtc_timer_remove(rtc, &rtc->uie_rtctimer); | ||
| 346 | 253 | ||
| 347 | if (!rtc->ops) | 254 | out: |
| 348 | err = -ENODEV; | ||
| 349 | else if (!rtc->ops->update_irq_enable) | ||
| 350 | err = -EINVAL; | ||
| 351 | else | ||
| 352 | err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled); | ||
| 353 | |||
| 354 | mutex_unlock(&rtc->ops_lock); | 255 | mutex_unlock(&rtc->ops_lock); |
| 355 | |||
| 356 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 256 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL |
| 357 | /* | 257 | /* |
| 358 | * Enable emulation if the driver did not provide | 258 | * Enable emulation if the driver did not provide |
| @@ -364,25 +264,30 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
| 364 | err = rtc_dev_update_irq_enable_emul(rtc, enabled); | 264 | err = rtc_dev_update_irq_enable_emul(rtc, enabled); |
| 365 | #endif | 265 | #endif |
| 366 | return err; | 266 | return err; |
| 267 | |||
| 367 | } | 268 | } |
| 368 | EXPORT_SYMBOL_GPL(rtc_update_irq_enable); | 269 | EXPORT_SYMBOL_GPL(rtc_update_irq_enable); |
| 369 | 270 | ||
| 271 | |||
| 370 | /** | 272 | /** |
| 371 | * rtc_update_irq - report RTC periodic, alarm, and/or update irqs | 273 | * rtc_handle_legacy_irq - AIE, UIE and PIE event hook |
| 372 | * @rtc: the rtc device | 274 | * @rtc: pointer to the rtc device |
| 373 | * @num: how many irqs are being reported (usually one) | 275 | * |
| 374 | * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF | 276 | * This function is called when an AIE, UIE or PIE mode interrupt |
| 375 | * Context: any | 277 | * has occured (or been emulated). |
| 278 | * | ||
| 279 | * Triggers the registered irq_task function callback. | ||
| 376 | */ | 280 | */ |
| 377 | void rtc_update_irq(struct rtc_device *rtc, | 281 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) |
| 378 | unsigned long num, unsigned long events) | ||
| 379 | { | 282 | { |
| 380 | unsigned long flags; | 283 | unsigned long flags; |
| 381 | 284 | ||
| 285 | /* mark one irq of the appropriate mode */ | ||
| 382 | spin_lock_irqsave(&rtc->irq_lock, flags); | 286 | spin_lock_irqsave(&rtc->irq_lock, flags); |
| 383 | rtc->irq_data = (rtc->irq_data + (num << 8)) | events; | 287 | rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode); |
| 384 | spin_unlock_irqrestore(&rtc->irq_lock, flags); | 288 | spin_unlock_irqrestore(&rtc->irq_lock, flags); |
| 385 | 289 | ||
| 290 | /* call the task func */ | ||
| 386 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 291 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| 387 | if (rtc->irq_task) | 292 | if (rtc->irq_task) |
| 388 | rtc->irq_task->func(rtc->irq_task->private_data); | 293 | rtc->irq_task->func(rtc->irq_task->private_data); |
| @@ -391,6 +296,69 @@ void rtc_update_irq(struct rtc_device *rtc, | |||
| 391 | wake_up_interruptible(&rtc->irq_queue); | 296 | wake_up_interruptible(&rtc->irq_queue); |
| 392 | kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); | 297 | kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); |
| 393 | } | 298 | } |
| 299 | |||
| 300 | |||
| 301 | /** | ||
| 302 | * rtc_aie_update_irq - AIE mode rtctimer hook | ||
| 303 | * @private: pointer to the rtc_device | ||
| 304 | * | ||
| 305 | * This functions is called when the aie_timer expires. | ||
| 306 | */ | ||
| 307 | void rtc_aie_update_irq(void *private) | ||
| 308 | { | ||
| 309 | struct rtc_device *rtc = (struct rtc_device *)private; | ||
| 310 | rtc_handle_legacy_irq(rtc, 1, RTC_AF); | ||
| 311 | } | ||
| 312 | |||
| 313 | |||
| 314 | /** | ||
| 315 | * rtc_uie_update_irq - UIE mode rtctimer hook | ||
| 316 | * @private: pointer to the rtc_device | ||
| 317 | * | ||
| 318 | * This functions is called when the uie_timer expires. | ||
| 319 | */ | ||
| 320 | void rtc_uie_update_irq(void *private) | ||
| 321 | { | ||
| 322 | struct rtc_device *rtc = (struct rtc_device *)private; | ||
| 323 | rtc_handle_legacy_irq(rtc, 1, RTC_UF); | ||
| 324 | } | ||
| 325 | |||
| 326 | |||
| 327 | /** | ||
| 328 | * rtc_pie_update_irq - PIE mode hrtimer hook | ||
| 329 | * @timer: pointer to the pie mode hrtimer | ||
| 330 | * | ||
| 331 | * This function is used to emulate PIE mode interrupts | ||
| 332 | * using an hrtimer. This function is called when the periodic | ||
| 333 | * hrtimer expires. | ||
| 334 | */ | ||
| 335 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) | ||
| 336 | { | ||
| 337 | struct rtc_device *rtc; | ||
| 338 | ktime_t period; | ||
| 339 | int count; | ||
| 340 | rtc = container_of(timer, struct rtc_device, pie_timer); | ||
| 341 | |||
| 342 | period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); | ||
| 343 | count = hrtimer_forward_now(timer, period); | ||
| 344 | |||
| 345 | rtc_handle_legacy_irq(rtc, count, RTC_PF); | ||
| 346 | |||
| 347 | return HRTIMER_RESTART; | ||
| 348 | } | ||
| 349 | |||
| 350 | /** | ||
| 351 | * rtc_update_irq - Triggered when a RTC interrupt occurs. | ||
| 352 | * @rtc: the rtc device | ||
| 353 | * @num: how many irqs are being reported (usually one) | ||
| 354 | * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF | ||
| 355 | * Context: any | ||
| 356 | */ | ||
| 357 | void rtc_update_irq(struct rtc_device *rtc, | ||
| 358 | unsigned long num, unsigned long events) | ||
| 359 | { | ||
| 360 | schedule_work(&rtc->irqwork); | ||
| 361 | } | ||
| 394 | EXPORT_SYMBOL_GPL(rtc_update_irq); | 362 | EXPORT_SYMBOL_GPL(rtc_update_irq); |
| 395 | 363 | ||
| 396 | static int __rtc_match(struct device *dev, void *data) | 364 | static int __rtc_match(struct device *dev, void *data) |
| @@ -477,18 +445,20 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled | |||
| 477 | int err = 0; | 445 | int err = 0; |
| 478 | unsigned long flags; | 446 | unsigned long flags; |
| 479 | 447 | ||
| 480 | if (rtc->ops->irq_set_state == NULL) | ||
| 481 | return -ENXIO; | ||
| 482 | |||
| 483 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 448 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| 484 | if (rtc->irq_task != NULL && task == NULL) | 449 | if (rtc->irq_task != NULL && task == NULL) |
| 485 | err = -EBUSY; | 450 | err = -EBUSY; |
| 486 | if (rtc->irq_task != task) | 451 | if (rtc->irq_task != task) |
| 487 | err = -EACCES; | 452 | err = -EACCES; |
| 488 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
| 489 | 453 | ||
| 490 | if (err == 0) | 454 | if (enabled) { |
| 491 | err = rtc->ops->irq_set_state(rtc->dev.parent, enabled); | 455 | ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); |
| 456 | hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); | ||
| 457 | } else { | ||
| 458 | hrtimer_cancel(&rtc->pie_timer); | ||
| 459 | } | ||
| 460 | rtc->pie_enabled = enabled; | ||
| 461 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
| 492 | 462 | ||
| 493 | return err; | 463 | return err; |
| 494 | } | 464 | } |
| @@ -509,21 +479,206 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) | |||
| 509 | int err = 0; | 479 | int err = 0; |
| 510 | unsigned long flags; | 480 | unsigned long flags; |
| 511 | 481 | ||
| 512 | if (rtc->ops->irq_set_freq == NULL) | 482 | if (freq <= 0) |
| 513 | return -ENXIO; | 483 | return -EINVAL; |
| 514 | 484 | ||
| 515 | spin_lock_irqsave(&rtc->irq_task_lock, flags); | 485 | spin_lock_irqsave(&rtc->irq_task_lock, flags); |
| 516 | if (rtc->irq_task != NULL && task == NULL) | 486 | if (rtc->irq_task != NULL && task == NULL) |
| 517 | err = -EBUSY; | 487 | err = -EBUSY; |
| 518 | if (rtc->irq_task != task) | 488 | if (rtc->irq_task != task) |
| 519 | err = -EACCES; | 489 | err = -EACCES; |
| 520 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
| 521 | |||
| 522 | if (err == 0) { | 490 | if (err == 0) { |
| 523 | err = rtc->ops->irq_set_freq(rtc->dev.parent, freq); | 491 | rtc->irq_freq = freq; |
| 524 | if (err == 0) | 492 | if (rtc->pie_enabled) { |
| 525 | rtc->irq_freq = freq; | 493 | ktime_t period; |
| 494 | hrtimer_cancel(&rtc->pie_timer); | ||
| 495 | period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); | ||
| 496 | hrtimer_start(&rtc->pie_timer, period, | ||
| 497 | HRTIMER_MODE_REL); | ||
| 498 | } | ||
| 526 | } | 499 | } |
| 500 | spin_unlock_irqrestore(&rtc->irq_task_lock, flags); | ||
| 527 | return err; | 501 | return err; |
| 528 | } | 502 | } |
| 529 | EXPORT_SYMBOL_GPL(rtc_irq_set_freq); | 503 | EXPORT_SYMBOL_GPL(rtc_irq_set_freq); |
| 504 | |||
| 505 | /** | ||
| 506 | * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue | ||
| 507 | * @rtc rtc device | ||
| 508 | * @timer timer being added. | ||
| 509 | * | ||
| 510 | * Enqueues a timer onto the rtc devices timerqueue and sets | ||
| 511 | * the next alarm event appropriately. | ||
| 512 | * | ||
| 513 | * Sets the enabled bit on the added timer. | ||
| 514 | * | ||
| 515 | * Must hold ops_lock for proper serialization of timerqueue | ||
| 516 | */ | ||
| 517 | static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) | ||
| 518 | { | ||
| 519 | timer->enabled = 1; | ||
| 520 | timerqueue_add(&rtc->timerqueue, &timer->node); | ||
| 521 | if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { | ||
| 522 | struct rtc_wkalrm alarm; | ||
| 523 | int err; | ||
| 524 | alarm.time = rtc_ktime_to_tm(timer->node.expires); | ||
| 525 | alarm.enabled = 1; | ||
| 526 | err = __rtc_set_alarm(rtc, &alarm); | ||
| 527 | if (err == -ETIME) | ||
| 528 | schedule_work(&rtc->irqwork); | ||
| 529 | else if (err) { | ||
| 530 | timerqueue_del(&rtc->timerqueue, &timer->node); | ||
| 531 | timer->enabled = 0; | ||
| 532 | return err; | ||
| 533 | } | ||
| 534 | } | ||
| 535 | return 0; | ||
| 536 | } | ||
| 537 | |||
| 538 | /** | ||
| 539 | * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue | ||
| 540 | * @rtc rtc device | ||
| 541 | * @timer timer being removed. | ||
| 542 | * | ||
| 543 | * Removes a timer onto the rtc devices timerqueue and sets | ||
| 544 | * the next alarm event appropriately. | ||
| 545 | * | ||
| 546 | * Clears the enabled bit on the removed timer. | ||
| 547 | * | ||
| 548 | * Must hold ops_lock for proper serialization of timerqueue | ||
| 549 | */ | ||
| 550 | static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) | ||
| 551 | { | ||
| 552 | struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); | ||
| 553 | timerqueue_del(&rtc->timerqueue, &timer->node); | ||
| 554 | timer->enabled = 0; | ||
| 555 | if (next == &timer->node) { | ||
| 556 | struct rtc_wkalrm alarm; | ||
| 557 | int err; | ||
| 558 | next = timerqueue_getnext(&rtc->timerqueue); | ||
| 559 | if (!next) | ||
| 560 | return; | ||
| 561 | alarm.time = rtc_ktime_to_tm(next->expires); | ||
| 562 | alarm.enabled = 1; | ||
| 563 | err = __rtc_set_alarm(rtc, &alarm); | ||
| 564 | if (err == -ETIME) | ||
| 565 | schedule_work(&rtc->irqwork); | ||
| 566 | } | ||
| 567 | } | ||
| 568 | |||
| 569 | /** | ||
| 570 | * rtc_timer_do_work - Expires rtc timers | ||
| 571 | * @rtc rtc device | ||
| 572 | * @timer timer being removed. | ||
| 573 | * | ||
| 574 | * Expires rtc timers. Reprograms next alarm event if needed. | ||
| 575 | * Called via worktask. | ||
| 576 | * | ||
| 577 | * Serializes access to timerqueue via ops_lock mutex | ||
| 578 | */ | ||
| 579 | void rtc_timer_do_work(struct work_struct *work) | ||
| 580 | { | ||
| 581 | struct rtc_timer *timer; | ||
| 582 | struct timerqueue_node *next; | ||
| 583 | ktime_t now; | ||
| 584 | struct rtc_time tm; | ||
| 585 | |||
| 586 | struct rtc_device *rtc = | ||
| 587 | container_of(work, struct rtc_device, irqwork); | ||
| 588 | |||
| 589 | mutex_lock(&rtc->ops_lock); | ||
| 590 | again: | ||
| 591 | __rtc_read_time(rtc, &tm); | ||
| 592 | now = rtc_tm_to_ktime(tm); | ||
| 593 | while ((next = timerqueue_getnext(&rtc->timerqueue))) { | ||
| 594 | if (next->expires.tv64 > now.tv64) | ||
| 595 | break; | ||
| 596 | |||
| 597 | /* expire timer */ | ||
| 598 | timer = container_of(next, struct rtc_timer, node); | ||
| 599 | timerqueue_del(&rtc->timerqueue, &timer->node); | ||
| 600 | timer->enabled = 0; | ||
| 601 | if (timer->task.func) | ||
| 602 | timer->task.func(timer->task.private_data); | ||
| 603 | |||
| 604 | /* Re-add/fwd periodic timers */ | ||
| 605 | if (ktime_to_ns(timer->period)) { | ||
| 606 | timer->node.expires = ktime_add(timer->node.expires, | ||
| 607 | timer->period); | ||
| 608 | timer->enabled = 1; | ||
| 609 | timerqueue_add(&rtc->timerqueue, &timer->node); | ||
| 610 | } | ||
| 611 | } | ||
| 612 | |||
| 613 | /* Set next alarm */ | ||
| 614 | if (next) { | ||
| 615 | struct rtc_wkalrm alarm; | ||
| 616 | int err; | ||
| 617 | alarm.time = rtc_ktime_to_tm(next->expires); | ||
| 618 | alarm.enabled = 1; | ||
| 619 | err = __rtc_set_alarm(rtc, &alarm); | ||
| 620 | if (err == -ETIME) | ||
| 621 | goto again; | ||
| 622 | } | ||
| 623 | |||
| 624 | mutex_unlock(&rtc->ops_lock); | ||
| 625 | } | ||
| 626 | |||
| 627 | |||
| 628 | /* rtc_timer_init - Initializes an rtc_timer | ||
| 629 | * @timer: timer to be intiialized | ||
| 630 | * @f: function pointer to be called when timer fires | ||
| 631 | * @data: private data passed to function pointer | ||
| 632 | * | ||
| 633 | * Kernel interface to initializing an rtc_timer. | ||
| 634 | */ | ||
| 635 | void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data) | ||
| 636 | { | ||
| 637 | timerqueue_init(&timer->node); | ||
| 638 | timer->enabled = 0; | ||
| 639 | timer->task.func = f; | ||
| 640 | timer->task.private_data = data; | ||
| 641 | } | ||
| 642 | |||
| 643 | /* rtc_timer_start - Sets an rtc_timer to fire in the future | ||
| 644 | * @ rtc: rtc device to be used | ||
| 645 | * @ timer: timer being set | ||
| 646 | * @ expires: time at which to expire the timer | ||
| 647 | * @ period: period that the timer will recur | ||
| 648 | * | ||
| 649 | * Kernel interface to set an rtc_timer | ||
| 650 | */ | ||
| 651 | int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, | ||
| 652 | ktime_t expires, ktime_t period) | ||
| 653 | { | ||
| 654 | int ret = 0; | ||
| 655 | mutex_lock(&rtc->ops_lock); | ||
| 656 | if (timer->enabled) | ||
| 657 | rtc_timer_remove(rtc, timer); | ||
| 658 | |||
| 659 | timer->node.expires = expires; | ||
| 660 | timer->period = period; | ||
| 661 | |||
| 662 | ret = rtc_timer_enqueue(rtc, timer); | ||
| 663 | |||
| 664 | mutex_unlock(&rtc->ops_lock); | ||
| 665 | return ret; | ||
| 666 | } | ||
| 667 | |||
| 668 | /* rtc_timer_cancel - Stops an rtc_timer | ||
| 669 | * @ rtc: rtc device to be used | ||
| 670 | * @ timer: timer being set | ||
| 671 | * | ||
| 672 | * Kernel interface to cancel an rtc_timer | ||
| 673 | */ | ||
| 674 | int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) | ||
| 675 | { | ||
| 676 | int ret = 0; | ||
| 677 | mutex_lock(&rtc->ops_lock); | ||
| 678 | if (timer->enabled) | ||
| 679 | rtc_timer_remove(rtc, timer); | ||
| 680 | mutex_unlock(&rtc->ops_lock); | ||
| 681 | return ret; | ||
| 682 | } | ||
| 683 | |||
| 684 | |||
