diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 49 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 4 | ||||
-rw-r--r-- | kernel/kexec.c | 2 | ||||
-rw-r--r-- | kernel/module.c | 319 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 11 | ||||
-rw-r--r-- | kernel/ptrace.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_debug.c | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 20 | ||||
-rw-r--r-- | kernel/time.c | 54 | ||||
-rw-r--r-- | kernel/time/ntp.c | 398 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 17 | ||||
-rw-r--r-- | kernel/workqueue.c | 6 |
14 files changed, 482 insertions, 413 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 4a856a3643bb..32c254a8ab9a 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -955,7 +955,8 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) | |||
955 | __put_user(txc.jitcnt, &utp->jitcnt) || | 955 | __put_user(txc.jitcnt, &utp->jitcnt) || |
956 | __put_user(txc.calcnt, &utp->calcnt) || | 956 | __put_user(txc.calcnt, &utp->calcnt) || |
957 | __put_user(txc.errcnt, &utp->errcnt) || | 957 | __put_user(txc.errcnt, &utp->errcnt) || |
958 | __put_user(txc.stbcnt, &utp->stbcnt)) | 958 | __put_user(txc.stbcnt, &utp->stbcnt) || |
959 | __put_user(txc.tai, &utp->tai)) | ||
959 | ret = -EFAULT; | 960 | ret = -EFAULT; |
960 | 961 | ||
961 | return ret; | 962 | return ret; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 46e4ad1723f0..46d6611a33bb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -150,6 +150,26 @@ void disable_irq(unsigned int irq) | |||
150 | } | 150 | } |
151 | EXPORT_SYMBOL(disable_irq); | 151 | EXPORT_SYMBOL(disable_irq); |
152 | 152 | ||
153 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | ||
154 | { | ||
155 | switch (desc->depth) { | ||
156 | case 0: | ||
157 | printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | ||
158 | WARN_ON(1); | ||
159 | break; | ||
160 | case 1: { | ||
161 | unsigned int status = desc->status & ~IRQ_DISABLED; | ||
162 | |||
163 | /* Prevent probing on this irq: */ | ||
164 | desc->status = status | IRQ_NOPROBE; | ||
165 | check_irq_resend(desc, irq); | ||
166 | /* fall-through */ | ||
167 | } | ||
168 | default: | ||
169 | desc->depth--; | ||
170 | } | ||
171 | } | ||
172 | |||
153 | /** | 173 | /** |
154 | * enable_irq - enable handling of an irq | 174 | * enable_irq - enable handling of an irq |
155 | * @irq: Interrupt to enable | 175 | * @irq: Interrupt to enable |
@@ -169,22 +189,7 @@ void enable_irq(unsigned int irq) | |||
169 | return; | 189 | return; |
170 | 190 | ||
171 | spin_lock_irqsave(&desc->lock, flags); | 191 | spin_lock_irqsave(&desc->lock, flags); |
172 | switch (desc->depth) { | 192 | __enable_irq(desc, irq); |
173 | case 0: | ||
174 | printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | ||
175 | WARN_ON(1); | ||
176 | break; | ||
177 | case 1: { | ||
178 | unsigned int status = desc->status & ~IRQ_DISABLED; | ||
179 | |||
180 | /* Prevent probing on this irq: */ | ||
181 | desc->status = status | IRQ_NOPROBE; | ||
182 | check_irq_resend(desc, irq); | ||
183 | /* fall-through */ | ||
184 | } | ||
185 | default: | ||
186 | desc->depth--; | ||
187 | } | ||
188 | spin_unlock_irqrestore(&desc->lock, flags); | 193 | spin_unlock_irqrestore(&desc->lock, flags); |
189 | } | 194 | } |
190 | EXPORT_SYMBOL(enable_irq); | 195 | EXPORT_SYMBOL(enable_irq); |
@@ -365,7 +370,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
365 | compat_irq_chip_set_default_handler(desc); | 370 | compat_irq_chip_set_default_handler(desc); |
366 | 371 | ||
367 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 372 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | |
368 | IRQ_INPROGRESS); | 373 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
369 | 374 | ||
370 | if (!(desc->status & IRQ_NOAUTOEN)) { | 375 | if (!(desc->status & IRQ_NOAUTOEN)) { |
371 | desc->depth = 0; | 376 | desc->depth = 0; |
@@ -381,6 +386,16 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
381 | /* Reset broken irq detection when installing new handler */ | 386 | /* Reset broken irq detection when installing new handler */ |
382 | desc->irq_count = 0; | 387 | desc->irq_count = 0; |
383 | desc->irqs_unhandled = 0; | 388 | desc->irqs_unhandled = 0; |
389 | |||
390 | /* | ||
391 | * Check whether we disabled the irq via the spurious handler | ||
392 | * before. Reenable it and give it another chance. | ||
393 | */ | ||
394 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | ||
395 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | ||
396 | __enable_irq(desc, irq); | ||
397 | } | ||
398 | |||
384 | spin_unlock_irqrestore(&desc->lock, flags); | 399 | spin_unlock_irqrestore(&desc->lock, flags); |
385 | 400 | ||
386 | new->irq = irq; | 401 | new->irq = irq; |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 088dabbf2d6a..c66d3f10e853 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -209,8 +209,8 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
209 | * Now kill the IRQ | 209 | * Now kill the IRQ |
210 | */ | 210 | */ |
211 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 211 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
212 | desc->status |= IRQ_DISABLED; | 212 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
213 | desc->depth = 1; | 213 | desc->depth++; |
214 | desc->chip->disable(irq); | 214 | desc->chip->disable(irq); |
215 | } | 215 | } |
216 | desc->irqs_unhandled = 0; | 216 | desc->irqs_unhandled = 0; |
diff --git a/kernel/kexec.c b/kernel/kexec.c index cb85c79989b4..1c5fcacbcf33 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1217,7 +1217,7 @@ static int __init parse_crashkernel_mem(char *cmdline, | |||
1217 | } | 1217 | } |
1218 | 1218 | ||
1219 | /* match ? */ | 1219 | /* match ? */ |
1220 | if (system_ram >= start && system_ram <= end) { | 1220 | if (system_ram >= start && system_ram < end) { |
1221 | *crash_size = size; | 1221 | *crash_size = size; |
1222 | break; | 1222 | break; |
1223 | } | 1223 | } |
diff --git a/kernel/module.c b/kernel/module.c index 8d6cccc6c3cf..8674a390a2e8 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -164,131 +164,140 @@ static const struct kernel_symbol *lookup_symbol(const char *name, | |||
164 | return NULL; | 164 | return NULL; |
165 | } | 165 | } |
166 | 166 | ||
167 | static void printk_unused_warning(const char *name) | 167 | static bool always_ok(bool gplok, bool warn, const char *name) |
168 | { | 168 | { |
169 | printk(KERN_WARNING "Symbol %s is marked as UNUSED, " | 169 | return true; |
170 | "however this module is using it.\n", name); | ||
171 | printk(KERN_WARNING "This symbol will go away in the future.\n"); | ||
172 | printk(KERN_WARNING "Please evalute if this is the right api to use, " | ||
173 | "and if it really is, submit a report the linux kernel " | ||
174 | "mailinglist together with submitting your code for " | ||
175 | "inclusion.\n"); | ||
176 | } | 170 | } |
177 | 171 | ||
178 | /* Find a symbol, return value, crc and module which owns it */ | 172 | static bool printk_unused_warning(bool gplok, bool warn, const char *name) |
179 | static unsigned long __find_symbol(const char *name, | ||
180 | struct module **owner, | ||
181 | const unsigned long **crc, | ||
182 | int gplok) | ||
183 | { | 173 | { |
184 | struct module *mod; | 174 | if (warn) { |
185 | const struct kernel_symbol *ks; | 175 | printk(KERN_WARNING "Symbol %s is marked as UNUSED, " |
186 | 176 | "however this module is using it.\n", name); | |
187 | /* Core kernel first. */ | 177 | printk(KERN_WARNING |
188 | *owner = NULL; | 178 | "This symbol will go away in the future.\n"); |
189 | ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); | 179 | printk(KERN_WARNING |
190 | if (ks) { | 180 | "Please evalute if this is the right api to use and if " |
191 | *crc = symversion(__start___kcrctab, (ks - __start___ksymtab)); | 181 | "it really is, submit a report the linux kernel " |
192 | return ks->value; | 182 | "mailinglist together with submitting your code for " |
183 | "inclusion.\n"); | ||
193 | } | 184 | } |
194 | if (gplok) { | 185 | return true; |
195 | ks = lookup_symbol(name, __start___ksymtab_gpl, | 186 | } |
196 | __stop___ksymtab_gpl); | 187 | |
197 | if (ks) { | 188 | static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name) |
198 | *crc = symversion(__start___kcrctab_gpl, | 189 | { |
199 | (ks - __start___ksymtab_gpl)); | 190 | if (!gplok) |
200 | return ks->value; | 191 | return false; |
201 | } | 192 | return printk_unused_warning(gplok, warn, name); |
202 | } | 193 | } |
203 | ks = lookup_symbol(name, __start___ksymtab_gpl_future, | 194 | |
204 | __stop___ksymtab_gpl_future); | 195 | static bool gpl_only(bool gplok, bool warn, const char *name) |
205 | if (ks) { | 196 | { |
206 | if (!gplok) { | 197 | return gplok; |
207 | printk(KERN_WARNING "Symbol %s is being used " | 198 | } |
208 | "by a non-GPL module, which will not " | 199 | |
209 | "be allowed in the future\n", name); | 200 | static bool warn_if_not_gpl(bool gplok, bool warn, const char *name) |
210 | printk(KERN_WARNING "Please see the file " | 201 | { |
211 | "Documentation/feature-removal-schedule.txt " | 202 | if (!gplok && warn) { |
212 | "in the kernel source tree for more " | 203 | printk(KERN_WARNING "Symbol %s is being used " |
213 | "details.\n"); | 204 | "by a non-GPL module, which will not " |
214 | } | 205 | "be allowed in the future\n", name); |
215 | *crc = symversion(__start___kcrctab_gpl_future, | 206 | printk(KERN_WARNING "Please see the file " |
216 | (ks - __start___ksymtab_gpl_future)); | 207 | "Documentation/feature-removal-schedule.txt " |
217 | return ks->value; | 208 | "in the kernel source tree for more details.\n"); |
218 | } | 209 | } |
210 | return true; | ||
211 | } | ||
219 | 212 | ||
220 | ks = lookup_symbol(name, __start___ksymtab_unused, | 213 | struct symsearch { |
221 | __stop___ksymtab_unused); | 214 | const struct kernel_symbol *start, *stop; |
222 | if (ks) { | 215 | const unsigned long *crcs; |
223 | printk_unused_warning(name); | 216 | bool (*check)(bool gplok, bool warn, const char *name); |
224 | *crc = symversion(__start___kcrctab_unused, | 217 | }; |
225 | (ks - __start___ksymtab_unused)); | 218 | |
226 | return ks->value; | 219 | /* Look through this array of symbol tables for a symbol match which |
220 | * passes the check function. */ | ||
221 | static const struct kernel_symbol *search_symarrays(const struct symsearch *arr, | ||
222 | unsigned int num, | ||
223 | const char *name, | ||
224 | bool gplok, | ||
225 | bool warn, | ||
226 | const unsigned long **crc) | ||
227 | { | ||
228 | unsigned int i; | ||
229 | const struct kernel_symbol *ks; | ||
230 | |||
231 | for (i = 0; i < num; i++) { | ||
232 | ks = lookup_symbol(name, arr[i].start, arr[i].stop); | ||
233 | if (!ks || !arr[i].check(gplok, warn, name)) | ||
234 | continue; | ||
235 | |||
236 | if (crc) | ||
237 | *crc = symversion(arr[i].crcs, ks - arr[i].start); | ||
238 | return ks; | ||
227 | } | 239 | } |
240 | return NULL; | ||
241 | } | ||
242 | |||
243 | /* Find a symbol, return value, (optional) crc and (optional) module | ||
244 | * which owns it */ | ||
245 | static unsigned long find_symbol(const char *name, | ||
246 | struct module **owner, | ||
247 | const unsigned long **crc, | ||
248 | bool gplok, | ||
249 | bool warn) | ||
250 | { | ||
251 | struct module *mod; | ||
252 | const struct kernel_symbol *ks; | ||
253 | const struct symsearch arr[] = { | ||
254 | { __start___ksymtab, __stop___ksymtab, __start___kcrctab, | ||
255 | always_ok }, | ||
256 | { __start___ksymtab_gpl, __stop___ksymtab_gpl, | ||
257 | __start___kcrctab_gpl, gpl_only }, | ||
258 | { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, | ||
259 | __start___kcrctab_gpl_future, warn_if_not_gpl }, | ||
260 | { __start___ksymtab_unused, __stop___ksymtab_unused, | ||
261 | __start___kcrctab_unused, printk_unused_warning }, | ||
262 | { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, | ||
263 | __start___kcrctab_unused_gpl, gpl_only_unused_warning }, | ||
264 | }; | ||
228 | 265 | ||
229 | if (gplok) | 266 | /* Core kernel first. */ |
230 | ks = lookup_symbol(name, __start___ksymtab_unused_gpl, | 267 | ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc); |
231 | __stop___ksymtab_unused_gpl); | ||
232 | if (ks) { | 268 | if (ks) { |
233 | printk_unused_warning(name); | 269 | if (owner) |
234 | *crc = symversion(__start___kcrctab_unused_gpl, | 270 | *owner = NULL; |
235 | (ks - __start___ksymtab_unused_gpl)); | ||
236 | return ks->value; | 271 | return ks->value; |
237 | } | 272 | } |
238 | 273 | ||
239 | /* Now try modules. */ | 274 | /* Now try modules. */ |
240 | list_for_each_entry(mod, &modules, list) { | 275 | list_for_each_entry(mod, &modules, list) { |
241 | *owner = mod; | 276 | struct symsearch arr[] = { |
242 | ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); | 277 | { mod->syms, mod->syms + mod->num_syms, mod->crcs, |
243 | if (ks) { | 278 | always_ok }, |
244 | *crc = symversion(mod->crcs, (ks - mod->syms)); | 279 | { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, |
245 | return ks->value; | 280 | mod->gpl_crcs, gpl_only }, |
246 | } | 281 | { mod->gpl_future_syms, |
247 | 282 | mod->gpl_future_syms + mod->num_gpl_future_syms, | |
248 | if (gplok) { | 283 | mod->gpl_future_crcs, warn_if_not_gpl }, |
249 | ks = lookup_symbol(name, mod->gpl_syms, | 284 | { mod->unused_syms, |
250 | mod->gpl_syms + mod->num_gpl_syms); | 285 | mod->unused_syms + mod->num_unused_syms, |
251 | if (ks) { | 286 | mod->unused_crcs, printk_unused_warning }, |
252 | *crc = symversion(mod->gpl_crcs, | 287 | { mod->unused_gpl_syms, |
253 | (ks - mod->gpl_syms)); | 288 | mod->unused_gpl_syms + mod->num_unused_gpl_syms, |
254 | return ks->value; | 289 | mod->unused_gpl_crcs, gpl_only_unused_warning }, |
255 | } | 290 | }; |
256 | } | 291 | |
257 | ks = lookup_symbol(name, mod->unused_syms, mod->unused_syms + mod->num_unused_syms); | 292 | ks = search_symarrays(arr, ARRAY_SIZE(arr), |
293 | name, gplok, warn, crc); | ||
258 | if (ks) { | 294 | if (ks) { |
259 | printk_unused_warning(name); | 295 | if (owner) |
260 | *crc = symversion(mod->unused_crcs, (ks - mod->unused_syms)); | 296 | *owner = mod; |
261 | return ks->value; | ||
262 | } | ||
263 | |||
264 | if (gplok) { | ||
265 | ks = lookup_symbol(name, mod->unused_gpl_syms, | ||
266 | mod->unused_gpl_syms + mod->num_unused_gpl_syms); | ||
267 | if (ks) { | ||
268 | printk_unused_warning(name); | ||
269 | *crc = symversion(mod->unused_gpl_crcs, | ||
270 | (ks - mod->unused_gpl_syms)); | ||
271 | return ks->value; | ||
272 | } | ||
273 | } | ||
274 | ks = lookup_symbol(name, mod->gpl_future_syms, | ||
275 | (mod->gpl_future_syms + | ||
276 | mod->num_gpl_future_syms)); | ||
277 | if (ks) { | ||
278 | if (!gplok) { | ||
279 | printk(KERN_WARNING "Symbol %s is being used " | ||
280 | "by a non-GPL module, which will not " | ||
281 | "be allowed in the future\n", name); | ||
282 | printk(KERN_WARNING "Please see the file " | ||
283 | "Documentation/feature-removal-schedule.txt " | ||
284 | "in the kernel source tree for more " | ||
285 | "details.\n"); | ||
286 | } | ||
287 | *crc = symversion(mod->gpl_future_crcs, | ||
288 | (ks - mod->gpl_future_syms)); | ||
289 | return ks->value; | 297 | return ks->value; |
290 | } | 298 | } |
291 | } | 299 | } |
300 | |||
292 | DEBUGP("Failed to find symbol %s\n", name); | 301 | DEBUGP("Failed to find symbol %s\n", name); |
293 | return -ENOENT; | 302 | return -ENOENT; |
294 | } | 303 | } |
@@ -736,12 +745,13 @@ sys_delete_module(const char __user *name_user, unsigned int flags) | |||
736 | if (!forced && module_refcount(mod) != 0) | 745 | if (!forced && module_refcount(mod) != 0) |
737 | wait_for_zero_refcount(mod); | 746 | wait_for_zero_refcount(mod); |
738 | 747 | ||
748 | mutex_unlock(&module_mutex); | ||
739 | /* Final destruction now noone is using it. */ | 749 | /* Final destruction now noone is using it. */ |
740 | if (mod->exit != NULL) { | 750 | if (mod->exit != NULL) |
741 | mutex_unlock(&module_mutex); | ||
742 | mod->exit(); | 751 | mod->exit(); |
743 | mutex_lock(&module_mutex); | 752 | blocking_notifier_call_chain(&module_notify_list, |
744 | } | 753 | MODULE_STATE_GOING, mod); |
754 | mutex_lock(&module_mutex); | ||
745 | /* Store the name of the last unloaded module for diagnostic purposes */ | 755 | /* Store the name of the last unloaded module for diagnostic purposes */ |
746 | strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); | 756 | strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); |
747 | free_module(mod); | 757 | free_module(mod); |
@@ -777,10 +787,9 @@ static void print_unload_info(struct seq_file *m, struct module *mod) | |||
777 | void __symbol_put(const char *symbol) | 787 | void __symbol_put(const char *symbol) |
778 | { | 788 | { |
779 | struct module *owner; | 789 | struct module *owner; |
780 | const unsigned long *crc; | ||
781 | 790 | ||
782 | preempt_disable(); | 791 | preempt_disable(); |
783 | if (IS_ERR_VALUE(__find_symbol(symbol, &owner, &crc, 1))) | 792 | if (IS_ERR_VALUE(find_symbol(symbol, &owner, NULL, true, false))) |
784 | BUG(); | 793 | BUG(); |
785 | module_put(owner); | 794 | module_put(owner); |
786 | preempt_enable(); | 795 | preempt_enable(); |
@@ -924,13 +933,10 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
924 | struct module *mod) | 933 | struct module *mod) |
925 | { | 934 | { |
926 | const unsigned long *crc; | 935 | const unsigned long *crc; |
927 | struct module *owner; | ||
928 | 936 | ||
929 | if (IS_ERR_VALUE(__find_symbol("struct_module", | 937 | if (IS_ERR_VALUE(find_symbol("struct_module", NULL, &crc, true, false))) |
930 | &owner, &crc, 1))) | ||
931 | BUG(); | 938 | BUG(); |
932 | return check_version(sechdrs, versindex, "struct_module", mod, | 939 | return check_version(sechdrs, versindex, "struct_module", mod, crc); |
933 | crc); | ||
934 | } | 940 | } |
935 | 941 | ||
936 | /* First part is kernel version, which we ignore. */ | 942 | /* First part is kernel version, which we ignore. */ |
@@ -974,8 +980,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs, | |||
974 | unsigned long ret; | 980 | unsigned long ret; |
975 | const unsigned long *crc; | 981 | const unsigned long *crc; |
976 | 982 | ||
977 | ret = __find_symbol(name, &owner, &crc, | 983 | ret = find_symbol(name, &owner, &crc, |
978 | !(mod->taints & TAINT_PROPRIETARY_MODULE)); | 984 | !(mod->taints & TAINT_PROPRIETARY_MODULE), true); |
979 | if (!IS_ERR_VALUE(ret)) { | 985 | if (!IS_ERR_VALUE(ret)) { |
980 | /* use_module can fail due to OOM, | 986 | /* use_module can fail due to OOM, |
981 | or module initialization or unloading */ | 987 | or module initialization or unloading */ |
@@ -991,6 +997,20 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs, | |||
991 | * J. Corbet <corbet@lwn.net> | 997 | * J. Corbet <corbet@lwn.net> |
992 | */ | 998 | */ |
993 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) | 999 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) |
1000 | struct module_sect_attr | ||
1001 | { | ||
1002 | struct module_attribute mattr; | ||
1003 | char *name; | ||
1004 | unsigned long address; | ||
1005 | }; | ||
1006 | |||
1007 | struct module_sect_attrs | ||
1008 | { | ||
1009 | struct attribute_group grp; | ||
1010 | unsigned int nsections; | ||
1011 | struct module_sect_attr attrs[0]; | ||
1012 | }; | ||
1013 | |||
994 | static ssize_t module_sect_show(struct module_attribute *mattr, | 1014 | static ssize_t module_sect_show(struct module_attribute *mattr, |
995 | struct module *mod, char *buf) | 1015 | struct module *mod, char *buf) |
996 | { | 1016 | { |
@@ -1001,7 +1021,7 @@ static ssize_t module_sect_show(struct module_attribute *mattr, | |||
1001 | 1021 | ||
1002 | static void free_sect_attrs(struct module_sect_attrs *sect_attrs) | 1022 | static void free_sect_attrs(struct module_sect_attrs *sect_attrs) |
1003 | { | 1023 | { |
1004 | int section; | 1024 | unsigned int section; |
1005 | 1025 | ||
1006 | for (section = 0; section < sect_attrs->nsections; section++) | 1026 | for (section = 0; section < sect_attrs->nsections; section++) |
1007 | kfree(sect_attrs->attrs[section].name); | 1027 | kfree(sect_attrs->attrs[section].name); |
@@ -1362,10 +1382,9 @@ void *__symbol_get(const char *symbol) | |||
1362 | { | 1382 | { |
1363 | struct module *owner; | 1383 | struct module *owner; |
1364 | unsigned long value; | 1384 | unsigned long value; |
1365 | const unsigned long *crc; | ||
1366 | 1385 | ||
1367 | preempt_disable(); | 1386 | preempt_disable(); |
1368 | value = __find_symbol(symbol, &owner, &crc, 1); | 1387 | value = find_symbol(symbol, &owner, NULL, true, true); |
1369 | if (IS_ERR_VALUE(value)) | 1388 | if (IS_ERR_VALUE(value)) |
1370 | value = 0; | 1389 | value = 0; |
1371 | else if (strong_try_module_get(owner)) | 1390 | else if (strong_try_module_get(owner)) |
@@ -1382,33 +1401,33 @@ EXPORT_SYMBOL_GPL(__symbol_get); | |||
1382 | */ | 1401 | */ |
1383 | static int verify_export_symbols(struct module *mod) | 1402 | static int verify_export_symbols(struct module *mod) |
1384 | { | 1403 | { |
1385 | const char *name = NULL; | 1404 | unsigned int i; |
1386 | unsigned long i, ret = 0; | ||
1387 | struct module *owner; | 1405 | struct module *owner; |
1388 | const unsigned long *crc; | 1406 | const struct kernel_symbol *s; |
1389 | 1407 | struct { | |
1390 | for (i = 0; i < mod->num_syms; i++) | 1408 | const struct kernel_symbol *sym; |
1391 | if (!IS_ERR_VALUE(__find_symbol(mod->syms[i].name, | 1409 | unsigned int num; |
1392 | &owner, &crc, 1))) { | 1410 | } arr[] = { |
1393 | name = mod->syms[i].name; | 1411 | { mod->syms, mod->num_syms }, |
1394 | ret = -ENOEXEC; | 1412 | { mod->gpl_syms, mod->num_gpl_syms }, |
1395 | goto dup; | 1413 | { mod->gpl_future_syms, mod->num_gpl_future_syms }, |
1396 | } | 1414 | { mod->unused_syms, mod->num_unused_syms }, |
1415 | { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, | ||
1416 | }; | ||
1397 | 1417 | ||
1398 | for (i = 0; i < mod->num_gpl_syms; i++) | 1418 | for (i = 0; i < ARRAY_SIZE(arr); i++) { |
1399 | if (!IS_ERR_VALUE(__find_symbol(mod->gpl_syms[i].name, | 1419 | for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { |
1400 | &owner, &crc, 1))) { | 1420 | if (!IS_ERR_VALUE(find_symbol(s->name, &owner, |
1401 | name = mod->gpl_syms[i].name; | 1421 | NULL, true, false))) { |
1402 | ret = -ENOEXEC; | 1422 | printk(KERN_ERR |
1403 | goto dup; | 1423 | "%s: exports duplicate symbol %s" |
1424 | " (owned by %s)\n", | ||
1425 | mod->name, s->name, module_name(owner)); | ||
1426 | return -ENOEXEC; | ||
1427 | } | ||
1404 | } | 1428 | } |
1405 | 1429 | } | |
1406 | dup: | 1430 | return 0; |
1407 | if (ret) | ||
1408 | printk(KERN_ERR "%s: exports duplicate symbol %s (owned by %s)\n", | ||
1409 | mod->name, name, module_name(owner)); | ||
1410 | |||
1411 | return ret; | ||
1412 | } | 1431 | } |
1413 | 1432 | ||
1414 | /* Change all symbols so that st_value encodes the pointer directly. */ | 1433 | /* Change all symbols so that st_value encodes the pointer directly. */ |
@@ -1814,8 +1833,9 @@ static struct module *load_module(void __user *umod, | |||
1814 | unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME); | 1833 | unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME); |
1815 | #endif | 1834 | #endif |
1816 | 1835 | ||
1817 | /* Don't keep modinfo section */ | 1836 | /* Don't keep modinfo and version sections. */ |
1818 | sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 1837 | sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
1838 | sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | ||
1819 | #ifdef CONFIG_KALLSYMS | 1839 | #ifdef CONFIG_KALLSYMS |
1820 | /* Keep symbol and string tables for decoding later. */ | 1840 | /* Keep symbol and string tables for decoding later. */ |
1821 | sechdrs[symindex].sh_flags |= SHF_ALLOC; | 1841 | sechdrs[symindex].sh_flags |= SHF_ALLOC; |
@@ -1977,7 +1997,8 @@ static struct module *load_module(void __user *umod, | |||
1977 | mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; | 1997 | mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; |
1978 | mod->unused_gpl_syms = (void *)sechdrs[unusedgplindex].sh_addr; | 1998 | mod->unused_gpl_syms = (void *)sechdrs[unusedgplindex].sh_addr; |
1979 | if (unusedgplcrcindex) | 1999 | if (unusedgplcrcindex) |
1980 | mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr; | 2000 | mod->unused_gpl_crcs |
2001 | = (void *)sechdrs[unusedgplcrcindex].sh_addr; | ||
1981 | 2002 | ||
1982 | #ifdef CONFIG_MODVERSIONS | 2003 | #ifdef CONFIG_MODVERSIONS |
1983 | if ((mod->num_syms && !crcindex) || | 2004 | if ((mod->num_syms && !crcindex) || |
@@ -2171,6 +2192,8 @@ sys_init_module(void __user *umod, | |||
2171 | mod->state = MODULE_STATE_GOING; | 2192 | mod->state = MODULE_STATE_GOING; |
2172 | synchronize_sched(); | 2193 | synchronize_sched(); |
2173 | module_put(mod); | 2194 | module_put(mod); |
2195 | blocking_notifier_call_chain(&module_notify_list, | ||
2196 | MODULE_STATE_GOING, mod); | ||
2174 | mutex_lock(&module_mutex); | 2197 | mutex_lock(&module_mutex); |
2175 | free_module(mod); | 2198 | free_module(mod); |
2176 | mutex_unlock(&module_mutex); | 2199 | mutex_unlock(&module_mutex); |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index ae5c6c147c4b..f1525ad06cb3 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -4,8 +4,9 @@ | |||
4 | 4 | ||
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | #include <linux/posix-timers.h> | 6 | #include <linux/posix-timers.h> |
7 | #include <asm/uaccess.h> | ||
8 | #include <linux/errno.h> | 7 | #include <linux/errno.h> |
8 | #include <linux/math64.h> | ||
9 | #include <asm/uaccess.h> | ||
9 | 10 | ||
10 | static int check_clock(const clockid_t which_clock) | 11 | static int check_clock(const clockid_t which_clock) |
11 | { | 12 | { |
@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock, | |||
47 | union cpu_time_count cpu, | 48 | union cpu_time_count cpu, |
48 | struct timespec *tp) | 49 | struct timespec *tp) |
49 | { | 50 | { |
50 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 51 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) |
51 | tp->tv_sec = div_long_long_rem(cpu.sched, | 52 | *tp = ns_to_timespec(cpu.sched); |
52 | NSEC_PER_SEC, &tp->tv_nsec); | 53 | else |
53 | } else { | ||
54 | cputime_to_timespec(cpu.cpu, tp); | 54 | cputime_to_timespec(cpu.cpu, tp); |
55 | } | ||
56 | } | 55 | } |
57 | 56 | ||
58 | static inline int cpu_time_before(const clockid_t which_clock, | 57 | static inline int cpu_time_before(const clockid_t which_clock, |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index dcc199c43a12..6c19e94fd0a5 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -534,7 +534,6 @@ struct task_struct *ptrace_get_task_struct(pid_t pid) | |||
534 | #define arch_ptrace_attach(child) do { } while (0) | 534 | #define arch_ptrace_attach(child) do { } while (0) |
535 | #endif | 535 | #endif |
536 | 536 | ||
537 | #ifndef __ARCH_SYS_PTRACE | ||
538 | asmlinkage long sys_ptrace(long request, long pid, long addr, long data) | 537 | asmlinkage long sys_ptrace(long request, long pid, long addr, long data) |
539 | { | 538 | { |
540 | struct task_struct *child; | 539 | struct task_struct *child; |
@@ -582,7 +581,6 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data) | |||
582 | unlock_kernel(); | 581 | unlock_kernel(); |
583 | return ret; | 582 | return ret; |
584 | } | 583 | } |
585 | #endif /* __ARCH_SYS_PTRACE */ | ||
586 | 584 | ||
587 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) | 585 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) |
588 | { | 586 | { |
diff --git a/kernel/sched.c b/kernel/sched.c index e2f7f5acc807..34bcc5bc120e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
8025 | 8025 | ||
8026 | se->my_q = cfs_rq; | 8026 | se->my_q = cfs_rq; |
8027 | se->load.weight = tg->shares; | 8027 | se->load.weight = tg->shares; |
8028 | se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); | 8028 | se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight); |
8029 | se->parent = parent; | 8029 | se->parent = parent; |
8030 | } | 8030 | } |
8031 | #endif | 8031 | #endif |
@@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares) | |||
8692 | dequeue_entity(cfs_rq, se, 0); | 8692 | dequeue_entity(cfs_rq, se, 0); |
8693 | 8693 | ||
8694 | se->load.weight = shares; | 8694 | se->load.weight = shares; |
8695 | se->load.inv_weight = div64_64((1ULL<<32), shares); | 8695 | se->load.inv_weight = div64_u64((1ULL<<32), shares); |
8696 | 8696 | ||
8697 | if (on_rq) | 8697 | if (on_rq) |
8698 | enqueue_entity(cfs_rq, se, 0); | 8698 | enqueue_entity(cfs_rq, se, 0); |
@@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
8787 | if (runtime == RUNTIME_INF) | 8787 | if (runtime == RUNTIME_INF) |
8788 | return 1ULL << 16; | 8788 | return 1ULL << 16; |
8789 | 8789 | ||
8790 | return div64_64(runtime << 16, period); | 8790 | return div64_u64(runtime << 16, period); |
8791 | } | 8791 | } |
8792 | 8792 | ||
8793 | #ifdef CONFIG_CGROUP_SCHED | 8793 | #ifdef CONFIG_CGROUP_SCHED |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 8a9498e7c831..6b4a12558e88 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
357 | 357 | ||
358 | avg_per_cpu = p->se.sum_exec_runtime; | 358 | avg_per_cpu = p->se.sum_exec_runtime; |
359 | if (p->se.nr_migrations) { | 359 | if (p->se.nr_migrations) { |
360 | avg_per_cpu = div64_64(avg_per_cpu, | 360 | avg_per_cpu = div64_u64(avg_per_cpu, |
361 | p->se.nr_migrations); | 361 | p->se.nr_migrations); |
362 | } else { | 362 | } else { |
363 | avg_per_cpu = -1LL; | 363 | avg_per_cpu = -1LL; |
364 | } | 364 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 3c44956ee7e2..36e061740047 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -589,16 +589,20 @@ static void takeover_tasklets(unsigned int cpu) | |||
589 | local_irq_disable(); | 589 | local_irq_disable(); |
590 | 590 | ||
591 | /* Find end, append list for that CPU. */ | 591 | /* Find end, append list for that CPU. */ |
592 | *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head; | 592 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
593 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; | 593 | *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; |
594 | per_cpu(tasklet_vec, cpu).head = NULL; | 594 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; |
595 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | 595 | per_cpu(tasklet_vec, cpu).head = NULL; |
596 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | ||
597 | } | ||
596 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 598 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
597 | 599 | ||
598 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; | 600 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
599 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; | 601 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; |
600 | per_cpu(tasklet_hi_vec, cpu).head = NULL; | 602 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; |
601 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | 603 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
604 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | ||
605 | } | ||
602 | raise_softirq_irqoff(HI_SOFTIRQ); | 606 | raise_softirq_irqoff(HI_SOFTIRQ); |
603 | 607 | ||
604 | local_irq_enable(); | 608 | local_irq_enable(); |
diff --git a/kernel/time.c b/kernel/time.c index 86729042e4cd..cbe0d5a222ff 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/security.h> | 36 | #include <linux/security.h> |
37 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/math64.h> | ||
39 | 40 | ||
40 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
41 | #include <asm/unistd.h> | 42 | #include <asm/unistd.h> |
@@ -391,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec); | |||
391 | struct timespec ns_to_timespec(const s64 nsec) | 392 | struct timespec ns_to_timespec(const s64 nsec) |
392 | { | 393 | { |
393 | struct timespec ts; | 394 | struct timespec ts; |
395 | s32 rem; | ||
394 | 396 | ||
395 | if (!nsec) | 397 | if (!nsec) |
396 | return (struct timespec) {0, 0}; | 398 | return (struct timespec) {0, 0}; |
397 | 399 | ||
398 | ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec); | 400 | ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); |
399 | if (unlikely(nsec < 0)) | 401 | if (unlikely(rem < 0)) { |
400 | set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec); | 402 | ts.tv_sec--; |
403 | rem += NSEC_PER_SEC; | ||
404 | } | ||
405 | ts.tv_nsec = rem; | ||
401 | 406 | ||
402 | return ts; | 407 | return ts; |
403 | } | 408 | } |
@@ -527,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) | |||
527 | * Convert jiffies to nanoseconds and separate with | 532 | * Convert jiffies to nanoseconds and separate with |
528 | * one divide. | 533 | * one divide. |
529 | */ | 534 | */ |
530 | u64 nsec = (u64)jiffies * TICK_NSEC; | 535 | u32 rem; |
531 | value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec); | 536 | value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, |
537 | NSEC_PER_SEC, &rem); | ||
538 | value->tv_nsec = rem; | ||
532 | } | 539 | } |
533 | EXPORT_SYMBOL(jiffies_to_timespec); | 540 | EXPORT_SYMBOL(jiffies_to_timespec); |
534 | 541 | ||
@@ -566,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) | |||
566 | * Convert jiffies to nanoseconds and separate with | 573 | * Convert jiffies to nanoseconds and separate with |
567 | * one divide. | 574 | * one divide. |
568 | */ | 575 | */ |
569 | u64 nsec = (u64)jiffies * TICK_NSEC; | 576 | u32 rem; |
570 | long tv_usec; | ||
571 | 577 | ||
572 | value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec); | 578 | value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, |
573 | tv_usec /= NSEC_PER_USEC; | 579 | NSEC_PER_SEC, &rem); |
574 | value->tv_usec = tv_usec; | 580 | value->tv_usec = rem / NSEC_PER_USEC; |
575 | } | 581 | } |
576 | EXPORT_SYMBOL(jiffies_to_timeval); | 582 | EXPORT_SYMBOL(jiffies_to_timeval); |
577 | 583 | ||
@@ -587,9 +593,7 @@ clock_t jiffies_to_clock_t(long x) | |||
587 | return x / (HZ / USER_HZ); | 593 | return x / (HZ / USER_HZ); |
588 | # endif | 594 | # endif |
589 | #else | 595 | #else |
590 | u64 tmp = (u64)x * TICK_NSEC; | 596 | return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); |
591 | do_div(tmp, (NSEC_PER_SEC / USER_HZ)); | ||
592 | return (long)tmp; | ||
593 | #endif | 597 | #endif |
594 | } | 598 | } |
595 | EXPORT_SYMBOL(jiffies_to_clock_t); | 599 | EXPORT_SYMBOL(jiffies_to_clock_t); |
@@ -601,16 +605,12 @@ unsigned long clock_t_to_jiffies(unsigned long x) | |||
601 | return ~0UL; | 605 | return ~0UL; |
602 | return x * (HZ / USER_HZ); | 606 | return x * (HZ / USER_HZ); |
603 | #else | 607 | #else |
604 | u64 jif; | ||
605 | |||
606 | /* Don't worry about loss of precision here .. */ | 608 | /* Don't worry about loss of precision here .. */ |
607 | if (x >= ~0UL / HZ * USER_HZ) | 609 | if (x >= ~0UL / HZ * USER_HZ) |
608 | return ~0UL; | 610 | return ~0UL; |
609 | 611 | ||
610 | /* .. but do try to contain it here */ | 612 | /* .. but do try to contain it here */ |
611 | jif = x * (u64) HZ; | 613 | return div_u64((u64)x * HZ, USER_HZ); |
612 | do_div(jif, USER_HZ); | ||
613 | return jif; | ||
614 | #endif | 614 | #endif |
615 | } | 615 | } |
616 | EXPORT_SYMBOL(clock_t_to_jiffies); | 616 | EXPORT_SYMBOL(clock_t_to_jiffies); |
@@ -619,10 +619,9 @@ u64 jiffies_64_to_clock_t(u64 x) | |||
619 | { | 619 | { |
620 | #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 | 620 | #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 |
621 | # if HZ < USER_HZ | 621 | # if HZ < USER_HZ |
622 | x *= USER_HZ; | 622 | x = div_u64(x * USER_HZ, HZ); |
623 | do_div(x, HZ); | ||
624 | # elif HZ > USER_HZ | 623 | # elif HZ > USER_HZ |
625 | do_div(x, HZ / USER_HZ); | 624 | x = div_u64(x, HZ / USER_HZ); |
626 | # else | 625 | # else |
627 | /* Nothing to do */ | 626 | /* Nothing to do */ |
628 | # endif | 627 | # endif |
@@ -632,8 +631,7 @@ u64 jiffies_64_to_clock_t(u64 x) | |||
632 | * but even this doesn't overflow in hundreds of years | 631 | * but even this doesn't overflow in hundreds of years |
633 | * in 64 bits, so.. | 632 | * in 64 bits, so.. |
634 | */ | 633 | */ |
635 | x *= TICK_NSEC; | 634 | x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); |
636 | do_div(x, (NSEC_PER_SEC / USER_HZ)); | ||
637 | #endif | 635 | #endif |
638 | return x; | 636 | return x; |
639 | } | 637 | } |
@@ -642,21 +640,17 @@ EXPORT_SYMBOL(jiffies_64_to_clock_t); | |||
642 | u64 nsec_to_clock_t(u64 x) | 640 | u64 nsec_to_clock_t(u64 x) |
643 | { | 641 | { |
644 | #if (NSEC_PER_SEC % USER_HZ) == 0 | 642 | #if (NSEC_PER_SEC % USER_HZ) == 0 |
645 | do_div(x, (NSEC_PER_SEC / USER_HZ)); | 643 | return div_u64(x, NSEC_PER_SEC / USER_HZ); |
646 | #elif (USER_HZ % 512) == 0 | 644 | #elif (USER_HZ % 512) == 0 |
647 | x *= USER_HZ/512; | 645 | return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); |
648 | do_div(x, (NSEC_PER_SEC / 512)); | ||
649 | #else | 646 | #else |
650 | /* | 647 | /* |
651 | * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, | 648 | * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, |
652 | * overflow after 64.99 years. | 649 | * overflow after 64.99 years. |
653 | * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... | 650 | * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... |
654 | */ | 651 | */ |
655 | x *= 9; | 652 | return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); |
656 | do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2)) / | ||
657 | USER_HZ)); | ||
658 | #endif | 653 | #endif |
659 | return x; | ||
660 | } | 654 | } |
661 | 655 | ||
662 | #if (BITS_PER_LONG < 64) | 656 | #if (BITS_PER_LONG < 64) |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5fd9b9469770..5125ddd8196b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -15,7 +15,8 @@ | |||
15 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/capability.h> | 17 | #include <linux/capability.h> |
18 | #include <asm/div64.h> | 18 | #include <linux/math64.h> |
19 | #include <linux/clocksource.h> | ||
19 | #include <asm/timex.h> | 20 | #include <asm/timex.h> |
20 | 21 | ||
21 | /* | 22 | /* |
@@ -23,11 +24,14 @@ | |||
23 | */ | 24 | */ |
24 | unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ | 25 | unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ |
25 | unsigned long tick_nsec; /* ACTHZ period (nsec) */ | 26 | unsigned long tick_nsec; /* ACTHZ period (nsec) */ |
26 | static u64 tick_length, tick_length_base; | 27 | u64 tick_length; |
28 | static u64 tick_length_base; | ||
29 | |||
30 | static struct hrtimer leap_timer; | ||
27 | 31 | ||
28 | #define MAX_TICKADJ 500 /* microsecs */ | 32 | #define MAX_TICKADJ 500 /* microsecs */ |
29 | #define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ | 33 | #define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ |
30 | TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ) | 34 | NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) |
31 | 35 | ||
32 | /* | 36 | /* |
33 | * phase-lock loop variables | 37 | * phase-lock loop variables |
@@ -35,11 +39,12 @@ static u64 tick_length, tick_length_base; | |||
35 | /* TIME_ERROR prevents overwriting the CMOS clock */ | 39 | /* TIME_ERROR prevents overwriting the CMOS clock */ |
36 | static int time_state = TIME_OK; /* clock synchronization status */ | 40 | static int time_state = TIME_OK; /* clock synchronization status */ |
37 | int time_status = STA_UNSYNC; /* clock status bits */ | 41 | int time_status = STA_UNSYNC; /* clock status bits */ |
38 | static s64 time_offset; /* time adjustment (ns) */ | 42 | static long time_tai; /* TAI offset (s) */ |
43 | static s64 time_offset; /* time adjustment (ns) */ | ||
39 | static long time_constant = 2; /* pll time constant */ | 44 | static long time_constant = 2; /* pll time constant */ |
40 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ | 45 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ |
41 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ | 46 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ |
42 | long time_freq; /* frequency offset (scaled ppm)*/ | 47 | static s64 time_freq; /* frequency offset (scaled ns/s)*/ |
43 | static long time_reftime; /* time at last adjustment (s) */ | 48 | static long time_reftime; /* time at last adjustment (s) */ |
44 | long time_adjust; | 49 | long time_adjust; |
45 | static long ntp_tick_adj; | 50 | static long ntp_tick_adj; |
@@ -47,16 +52,56 @@ static long ntp_tick_adj; | |||
47 | static void ntp_update_frequency(void) | 52 | static void ntp_update_frequency(void) |
48 | { | 53 | { |
49 | u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) | 54 | u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) |
50 | << TICK_LENGTH_SHIFT; | 55 | << NTP_SCALE_SHIFT; |
51 | second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT; | 56 | second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT; |
52 | second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); | 57 | second_length += time_freq; |
53 | 58 | ||
54 | tick_length_base = second_length; | 59 | tick_length_base = second_length; |
55 | 60 | ||
56 | do_div(second_length, HZ); | 61 | tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT; |
57 | tick_nsec = second_length >> TICK_LENGTH_SHIFT; | 62 | tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); |
63 | } | ||
64 | |||
65 | static void ntp_update_offset(long offset) | ||
66 | { | ||
67 | long mtemp; | ||
68 | s64 freq_adj; | ||
69 | |||
70 | if (!(time_status & STA_PLL)) | ||
71 | return; | ||
58 | 72 | ||
59 | do_div(tick_length_base, NTP_INTERVAL_FREQ); | 73 | if (!(time_status & STA_NANO)) |
74 | offset *= NSEC_PER_USEC; | ||
75 | |||
76 | /* | ||
77 | * Scale the phase adjustment and | ||
78 | * clamp to the operating range. | ||
79 | */ | ||
80 | offset = min(offset, MAXPHASE); | ||
81 | offset = max(offset, -MAXPHASE); | ||
82 | |||
83 | /* | ||
84 | * Select how the frequency is to be controlled | ||
85 | * and in which mode (PLL or FLL). | ||
86 | */ | ||
87 | if (time_status & STA_FREQHOLD || time_reftime == 0) | ||
88 | time_reftime = xtime.tv_sec; | ||
89 | mtemp = xtime.tv_sec - time_reftime; | ||
90 | time_reftime = xtime.tv_sec; | ||
91 | |||
92 | freq_adj = (s64)offset * mtemp; | ||
93 | freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant); | ||
94 | time_status &= ~STA_MODE; | ||
95 | if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { | ||
96 | freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL), | ||
97 | mtemp); | ||
98 | time_status |= STA_MODE; | ||
99 | } | ||
100 | freq_adj += time_freq; | ||
101 | freq_adj = min(freq_adj, MAXFREQ_SCALED); | ||
102 | time_freq = max(freq_adj, -MAXFREQ_SCALED); | ||
103 | |||
104 | time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); | ||
60 | } | 105 | } |
61 | 106 | ||
62 | /** | 107 | /** |
@@ -78,62 +123,70 @@ void ntp_clear(void) | |||
78 | } | 123 | } |
79 | 124 | ||
80 | /* | 125 | /* |
81 | * this routine handles the overflow of the microsecond field | 126 | * Leap second processing. If in leap-insert state at the end of the |
82 | * | 127 | * day, the system clock is set back one second; if in leap-delete |
83 | * The tricky bits of code to handle the accurate clock support | 128 | * state, the system clock is set ahead one second. |
84 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | ||
85 | * They were originally developed for SUN and DEC kernels. | ||
86 | * All the kudos should go to Dave for this stuff. | ||
87 | */ | 129 | */ |
88 | void second_overflow(void) | 130 | static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) |
89 | { | 131 | { |
90 | long time_adj; | 132 | enum hrtimer_restart res = HRTIMER_NORESTART; |
91 | 133 | ||
92 | /* Bump the maxerror field */ | 134 | write_seqlock_irq(&xtime_lock); |
93 | time_maxerror += MAXFREQ >> SHIFT_USEC; | ||
94 | if (time_maxerror > NTP_PHASE_LIMIT) { | ||
95 | time_maxerror = NTP_PHASE_LIMIT; | ||
96 | time_status |= STA_UNSYNC; | ||
97 | } | ||
98 | 135 | ||
99 | /* | ||
100 | * Leap second processing. If in leap-insert state at the end of the | ||
101 | * day, the system clock is set back one second; if in leap-delete | ||
102 | * state, the system clock is set ahead one second. The microtime() | ||
103 | * routine or external clock driver will insure that reported time is | ||
104 | * always monotonic. The ugly divides should be replaced. | ||
105 | */ | ||
106 | switch (time_state) { | 136 | switch (time_state) { |
107 | case TIME_OK: | 137 | case TIME_OK: |
108 | if (time_status & STA_INS) | ||
109 | time_state = TIME_INS; | ||
110 | else if (time_status & STA_DEL) | ||
111 | time_state = TIME_DEL; | ||
112 | break; | 138 | break; |
113 | case TIME_INS: | 139 | case TIME_INS: |
114 | if (xtime.tv_sec % 86400 == 0) { | 140 | xtime.tv_sec--; |
115 | xtime.tv_sec--; | 141 | wall_to_monotonic.tv_sec++; |
116 | wall_to_monotonic.tv_sec++; | 142 | time_state = TIME_OOP; |
117 | time_state = TIME_OOP; | 143 | printk(KERN_NOTICE "Clock: " |
118 | printk(KERN_NOTICE "Clock: inserting leap second " | 144 | "inserting leap second 23:59:60 UTC\n"); |
119 | "23:59:60 UTC\n"); | 145 | leap_timer.expires = ktime_add_ns(leap_timer.expires, |
120 | } | 146 | NSEC_PER_SEC); |
147 | res = HRTIMER_RESTART; | ||
121 | break; | 148 | break; |
122 | case TIME_DEL: | 149 | case TIME_DEL: |
123 | if ((xtime.tv_sec + 1) % 86400 == 0) { | 150 | xtime.tv_sec++; |
124 | xtime.tv_sec++; | 151 | time_tai--; |
125 | wall_to_monotonic.tv_sec--; | 152 | wall_to_monotonic.tv_sec--; |
126 | time_state = TIME_WAIT; | 153 | time_state = TIME_WAIT; |
127 | printk(KERN_NOTICE "Clock: deleting leap second " | 154 | printk(KERN_NOTICE "Clock: " |
128 | "23:59:59 UTC\n"); | 155 | "deleting leap second 23:59:59 UTC\n"); |
129 | } | ||
130 | break; | 156 | break; |
131 | case TIME_OOP: | 157 | case TIME_OOP: |
158 | time_tai++; | ||
132 | time_state = TIME_WAIT; | 159 | time_state = TIME_WAIT; |
133 | break; | 160 | /* fall through */ |
134 | case TIME_WAIT: | 161 | case TIME_WAIT: |
135 | if (!(time_status & (STA_INS | STA_DEL))) | 162 | if (!(time_status & (STA_INS | STA_DEL))) |
136 | time_state = TIME_OK; | 163 | time_state = TIME_OK; |
164 | break; | ||
165 | } | ||
166 | update_vsyscall(&xtime, clock); | ||
167 | |||
168 | write_sequnlock_irq(&xtime_lock); | ||
169 | |||
170 | return res; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * this routine handles the overflow of the microsecond field | ||
175 | * | ||
176 | * The tricky bits of code to handle the accurate clock support | ||
177 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | ||
178 | * They were originally developed for SUN and DEC kernels. | ||
179 | * All the kudos should go to Dave for this stuff. | ||
180 | */ | ||
181 | void second_overflow(void) | ||
182 | { | ||
183 | s64 time_adj; | ||
184 | |||
185 | /* Bump the maxerror field */ | ||
186 | time_maxerror += MAXFREQ / NSEC_PER_USEC; | ||
187 | if (time_maxerror > NTP_PHASE_LIMIT) { | ||
188 | time_maxerror = NTP_PHASE_LIMIT; | ||
189 | time_status |= STA_UNSYNC; | ||
137 | } | 190 | } |
138 | 191 | ||
139 | /* | 192 | /* |
@@ -143,7 +196,7 @@ void second_overflow(void) | |||
143 | tick_length = tick_length_base; | 196 | tick_length = tick_length_base; |
144 | time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); | 197 | time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); |
145 | time_offset -= time_adj; | 198 | time_offset -= time_adj; |
146 | tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); | 199 | tick_length += time_adj; |
147 | 200 | ||
148 | if (unlikely(time_adjust)) { | 201 | if (unlikely(time_adjust)) { |
149 | if (time_adjust > MAX_TICKADJ) { | 202 | if (time_adjust > MAX_TICKADJ) { |
@@ -154,25 +207,12 @@ void second_overflow(void) | |||
154 | tick_length -= MAX_TICKADJ_SCALED; | 207 | tick_length -= MAX_TICKADJ_SCALED; |
155 | } else { | 208 | } else { |
156 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / | 209 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / |
157 | NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT; | 210 | NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT; |
158 | time_adjust = 0; | 211 | time_adjust = 0; |
159 | } | 212 | } |
160 | } | 213 | } |
161 | } | 214 | } |
162 | 215 | ||
163 | /* | ||
164 | * Return how long ticks are at the moment, that is, how much time | ||
165 | * update_wall_time_one_tick will add to xtime next time we call it | ||
166 | * (assuming no calls to do_adjtimex in the meantime). | ||
167 | * The return value is in fixed-point nanoseconds shifted by the | ||
168 | * specified number of bits to the right of the binary point. | ||
169 | * This function has no side-effects. | ||
170 | */ | ||
171 | u64 current_tick_length(void) | ||
172 | { | ||
173 | return tick_length; | ||
174 | } | ||
175 | |||
176 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 216 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
177 | 217 | ||
178 | /* Disable the cmos update - used by virtualization and embedded */ | 218 | /* Disable the cmos update - used by virtualization and embedded */ |
@@ -236,8 +276,8 @@ static inline void notify_cmos_timer(void) { } | |||
236 | */ | 276 | */ |
237 | int do_adjtimex(struct timex *txc) | 277 | int do_adjtimex(struct timex *txc) |
238 | { | 278 | { |
239 | long mtemp, save_adjust, rem; | 279 | struct timespec ts; |
240 | s64 freq_adj, temp64; | 280 | long save_adjust, sec; |
241 | int result; | 281 | int result; |
242 | 282 | ||
243 | /* In order to modify anything, you gotta be super-user! */ | 283 | /* In order to modify anything, you gotta be super-user! */ |
@@ -247,147 +287,132 @@ int do_adjtimex(struct timex *txc) | |||
247 | /* Now we validate the data before disabling interrupts */ | 287 | /* Now we validate the data before disabling interrupts */ |
248 | 288 | ||
249 | if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { | 289 | if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { |
250 | /* singleshot must not be used with any other mode bits */ | 290 | /* singleshot must not be used with any other mode bits */ |
251 | if (txc->modes != ADJ_OFFSET_SINGLESHOT && | 291 | if (txc->modes & ~ADJ_OFFSET_SS_READ) |
252 | txc->modes != ADJ_OFFSET_SS_READ) | ||
253 | return -EINVAL; | 292 | return -EINVAL; |
254 | } | 293 | } |
255 | 294 | ||
256 | if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) | ||
257 | /* adjustment Offset limited to +- .512 seconds */ | ||
258 | if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) | ||
259 | return -EINVAL; | ||
260 | |||
261 | /* if the quartz is off by more than 10% something is VERY wrong ! */ | 295 | /* if the quartz is off by more than 10% something is VERY wrong ! */ |
262 | if (txc->modes & ADJ_TICK) | 296 | if (txc->modes & ADJ_TICK) |
263 | if (txc->tick < 900000/USER_HZ || | 297 | if (txc->tick < 900000/USER_HZ || |
264 | txc->tick > 1100000/USER_HZ) | 298 | txc->tick > 1100000/USER_HZ) |
265 | return -EINVAL; | 299 | return -EINVAL; |
266 | 300 | ||
301 | if (time_state != TIME_OK && txc->modes & ADJ_STATUS) | ||
302 | hrtimer_cancel(&leap_timer); | ||
303 | getnstimeofday(&ts); | ||
304 | |||
267 | write_seqlock_irq(&xtime_lock); | 305 | write_seqlock_irq(&xtime_lock); |
268 | result = time_state; /* mostly `TIME_OK' */ | ||
269 | 306 | ||
270 | /* Save for later - semantics of adjtime is to return old value */ | 307 | /* Save for later - semantics of adjtime is to return old value */ |
271 | save_adjust = time_adjust; | 308 | save_adjust = time_adjust; |
272 | 309 | ||
273 | #if 0 /* STA_CLOCKERR is never set yet */ | ||
274 | time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ | ||
275 | #endif | ||
276 | /* If there are input parameters, then process them */ | 310 | /* If there are input parameters, then process them */ |
277 | if (txc->modes) | 311 | if (txc->modes) { |
278 | { | 312 | if (txc->modes & ADJ_STATUS) { |
279 | if (txc->modes & ADJ_STATUS) /* only set allowed bits */ | 313 | if ((time_status & STA_PLL) && |
280 | time_status = (txc->status & ~STA_RONLY) | | 314 | !(txc->status & STA_PLL)) { |
281 | (time_status & STA_RONLY); | 315 | time_state = TIME_OK; |
282 | 316 | time_status = STA_UNSYNC; | |
283 | if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ | 317 | } |
284 | if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { | 318 | /* only set allowed bits */ |
285 | result = -EINVAL; | 319 | time_status &= STA_RONLY; |
286 | goto leave; | 320 | time_status |= txc->status & ~STA_RONLY; |
287 | } | 321 | |
288 | time_freq = ((s64)txc->freq * NSEC_PER_USEC) | 322 | switch (time_state) { |
289 | >> (SHIFT_USEC - SHIFT_NSEC); | 323 | case TIME_OK: |
290 | } | 324 | start_timer: |
291 | 325 | sec = ts.tv_sec; | |
292 | if (txc->modes & ADJ_MAXERROR) { | 326 | if (time_status & STA_INS) { |
293 | if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { | 327 | time_state = TIME_INS; |
294 | result = -EINVAL; | 328 | sec += 86400 - sec % 86400; |
295 | goto leave; | 329 | hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS); |
330 | } else if (time_status & STA_DEL) { | ||
331 | time_state = TIME_DEL; | ||
332 | sec += 86400 - (sec + 1) % 86400; | ||
333 | hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS); | ||
334 | } | ||
335 | break; | ||
336 | case TIME_INS: | ||
337 | case TIME_DEL: | ||
338 | time_state = TIME_OK; | ||
339 | goto start_timer; | ||
340 | break; | ||
341 | case TIME_WAIT: | ||
342 | if (!(time_status & (STA_INS | STA_DEL))) | ||
343 | time_state = TIME_OK; | ||
344 | break; | ||
345 | case TIME_OOP: | ||
346 | hrtimer_restart(&leap_timer); | ||
347 | break; | ||
348 | } | ||
296 | } | 349 | } |
297 | time_maxerror = txc->maxerror; | ||
298 | } | ||
299 | 350 | ||
300 | if (txc->modes & ADJ_ESTERROR) { | 351 | if (txc->modes & ADJ_NANO) |
301 | if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { | 352 | time_status |= STA_NANO; |
302 | result = -EINVAL; | 353 | if (txc->modes & ADJ_MICRO) |
303 | goto leave; | 354 | time_status &= ~STA_NANO; |
355 | |||
356 | if (txc->modes & ADJ_FREQUENCY) { | ||
357 | time_freq = (s64)txc->freq * PPM_SCALE; | ||
358 | time_freq = min(time_freq, MAXFREQ_SCALED); | ||
359 | time_freq = max(time_freq, -MAXFREQ_SCALED); | ||
304 | } | 360 | } |
305 | time_esterror = txc->esterror; | ||
306 | } | ||
307 | 361 | ||
308 | if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ | 362 | if (txc->modes & ADJ_MAXERROR) |
309 | if (txc->constant < 0) { /* NTP v4 uses values > 6 */ | 363 | time_maxerror = txc->maxerror; |
310 | result = -EINVAL; | 364 | if (txc->modes & ADJ_ESTERROR) |
311 | goto leave; | 365 | time_esterror = txc->esterror; |
366 | |||
367 | if (txc->modes & ADJ_TIMECONST) { | ||
368 | time_constant = txc->constant; | ||
369 | if (!(time_status & STA_NANO)) | ||
370 | time_constant += 4; | ||
371 | time_constant = min(time_constant, (long)MAXTC); | ||
372 | time_constant = max(time_constant, 0l); | ||
312 | } | 373 | } |
313 | time_constant = min(txc->constant + 4, (long)MAXTC); | ||
314 | } | ||
315 | 374 | ||
316 | if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ | 375 | if (txc->modes & ADJ_TAI && txc->constant > 0) |
317 | if (txc->modes == ADJ_OFFSET_SINGLESHOT) { | 376 | time_tai = txc->constant; |
318 | /* adjtime() is independent from ntp_adjtime() */ | 377 | |
319 | time_adjust = txc->offset; | 378 | if (txc->modes & ADJ_OFFSET) { |
379 | if (txc->modes == ADJ_OFFSET_SINGLESHOT) | ||
380 | /* adjtime() is independent from ntp_adjtime() */ | ||
381 | time_adjust = txc->offset; | ||
382 | else | ||
383 | ntp_update_offset(txc->offset); | ||
320 | } | 384 | } |
321 | else if (time_status & STA_PLL) { | 385 | if (txc->modes & ADJ_TICK) |
322 | time_offset = txc->offset * NSEC_PER_USEC; | 386 | tick_usec = txc->tick; |
323 | 387 | ||
324 | /* | 388 | if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) |
325 | * Scale the phase adjustment and | 389 | ntp_update_frequency(); |
326 | * clamp to the operating range. | 390 | } |
327 | */ | 391 | |
328 | time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); | 392 | result = time_state; /* mostly `TIME_OK' */ |
329 | time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); | 393 | if (time_status & (STA_UNSYNC|STA_CLOCKERR)) |
330 | |||
331 | /* | ||
332 | * Select whether the frequency is to be controlled | ||
333 | * and in which mode (PLL or FLL). Clamp to the operating | ||
334 | * range. Ugly multiply/divide should be replaced someday. | ||
335 | */ | ||
336 | |||
337 | if (time_status & STA_FREQHOLD || time_reftime == 0) | ||
338 | time_reftime = xtime.tv_sec; | ||
339 | mtemp = xtime.tv_sec - time_reftime; | ||
340 | time_reftime = xtime.tv_sec; | ||
341 | |||
342 | freq_adj = time_offset * mtemp; | ||
343 | freq_adj = shift_right(freq_adj, time_constant * 2 + | ||
344 | (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); | ||
345 | if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { | ||
346 | u64 utemp64; | ||
347 | temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL); | ||
348 | if (time_offset < 0) { | ||
349 | utemp64 = -temp64; | ||
350 | do_div(utemp64, mtemp); | ||
351 | freq_adj -= utemp64; | ||
352 | } else { | ||
353 | utemp64 = temp64; | ||
354 | do_div(utemp64, mtemp); | ||
355 | freq_adj += utemp64; | ||
356 | } | ||
357 | } | ||
358 | freq_adj += time_freq; | ||
359 | freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); | ||
360 | time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); | ||
361 | time_offset = div_long_long_rem_signed(time_offset, | ||
362 | NTP_INTERVAL_FREQ, | ||
363 | &rem); | ||
364 | time_offset <<= SHIFT_UPDATE; | ||
365 | } /* STA_PLL */ | ||
366 | } /* txc->modes & ADJ_OFFSET */ | ||
367 | if (txc->modes & ADJ_TICK) | ||
368 | tick_usec = txc->tick; | ||
369 | |||
370 | if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) | ||
371 | ntp_update_frequency(); | ||
372 | } /* txc->modes */ | ||
373 | leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) | ||
374 | result = TIME_ERROR; | 394 | result = TIME_ERROR; |
375 | 395 | ||
376 | if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || | 396 | if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || |
377 | (txc->modes == ADJ_OFFSET_SS_READ)) | 397 | (txc->modes == ADJ_OFFSET_SS_READ)) |
378 | txc->offset = save_adjust; | 398 | txc->offset = save_adjust; |
379 | else | 399 | else { |
380 | txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * | 400 | txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, |
381 | NTP_INTERVAL_FREQ / 1000; | 401 | NTP_SCALE_SHIFT); |
382 | txc->freq = (time_freq / NSEC_PER_USEC) << | 402 | if (!(time_status & STA_NANO)) |
383 | (SHIFT_USEC - SHIFT_NSEC); | 403 | txc->offset /= NSEC_PER_USEC; |
404 | } | ||
405 | txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) * | ||
406 | (s64)PPM_SCALE_INV, | ||
407 | NTP_SCALE_SHIFT); | ||
384 | txc->maxerror = time_maxerror; | 408 | txc->maxerror = time_maxerror; |
385 | txc->esterror = time_esterror; | 409 | txc->esterror = time_esterror; |
386 | txc->status = time_status; | 410 | txc->status = time_status; |
387 | txc->constant = time_constant; | 411 | txc->constant = time_constant; |
388 | txc->precision = 1; | 412 | txc->precision = 1; |
389 | txc->tolerance = MAXFREQ; | 413 | txc->tolerance = MAXFREQ_SCALED / PPM_SCALE; |
390 | txc->tick = tick_usec; | 414 | txc->tick = tick_usec; |
415 | txc->tai = time_tai; | ||
391 | 416 | ||
392 | /* PPS is not implemented, so these are zero */ | 417 | /* PPS is not implemented, so these are zero */ |
393 | txc->ppsfreq = 0; | 418 | txc->ppsfreq = 0; |
@@ -399,9 +424,15 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) | |||
399 | txc->errcnt = 0; | 424 | txc->errcnt = 0; |
400 | txc->stbcnt = 0; | 425 | txc->stbcnt = 0; |
401 | write_sequnlock_irq(&xtime_lock); | 426 | write_sequnlock_irq(&xtime_lock); |
402 | do_gettimeofday(&txc->time); | 427 | |
428 | txc->time.tv_sec = ts.tv_sec; | ||
429 | txc->time.tv_usec = ts.tv_nsec; | ||
430 | if (!(time_status & STA_NANO)) | ||
431 | txc->time.tv_usec /= NSEC_PER_USEC; | ||
432 | |||
403 | notify_cmos_timer(); | 433 | notify_cmos_timer(); |
404 | return(result); | 434 | |
435 | return result; | ||
405 | } | 436 | } |
406 | 437 | ||
407 | static int __init ntp_tick_adj_setup(char *str) | 438 | static int __init ntp_tick_adj_setup(char *str) |
@@ -411,3 +442,10 @@ static int __init ntp_tick_adj_setup(char *str) | |||
411 | } | 442 | } |
412 | 443 | ||
413 | __setup("ntp_tick_adj=", ntp_tick_adj_setup); | 444 | __setup("ntp_tick_adj=", ntp_tick_adj_setup); |
445 | |||
446 | void __init ntp_init(void) | ||
447 | { | ||
448 | ntp_clear(); | ||
449 | hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | ||
450 | leap_timer.function = ntp_leap_second; | ||
451 | } | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 2d6087c7cf98..e91c29f961c9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -53,7 +53,7 @@ void update_xtime_cache(u64 nsec) | |||
53 | timespec_add_ns(&xtime_cache, nsec); | 53 | timespec_add_ns(&xtime_cache, nsec); |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct clocksource *clock; /* pointer to current clocksource */ | 56 | struct clocksource *clock; |
57 | 57 | ||
58 | 58 | ||
59 | #ifdef CONFIG_GENERIC_TIME | 59 | #ifdef CONFIG_GENERIC_TIME |
@@ -246,7 +246,7 @@ void __init timekeeping_init(void) | |||
246 | 246 | ||
247 | write_seqlock_irqsave(&xtime_lock, flags); | 247 | write_seqlock_irqsave(&xtime_lock, flags); |
248 | 248 | ||
249 | ntp_clear(); | 249 | ntp_init(); |
250 | 250 | ||
251 | clock = clocksource_get_next(); | 251 | clock = clocksource_get_next(); |
252 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 252 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
@@ -371,7 +371,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |||
371 | * here. This is tuned so that an error of about 1 msec is adjusted | 371 | * here. This is tuned so that an error of about 1 msec is adjusted |
372 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | 372 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
373 | */ | 373 | */ |
374 | error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); | 374 | error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
375 | error2 = abs(error2); | 375 | error2 = abs(error2); |
376 | for (look_ahead = 0; error2 > 0; look_ahead++) | 376 | for (look_ahead = 0; error2 > 0; look_ahead++) |
377 | error2 >>= 2; | 377 | error2 >>= 2; |
@@ -380,8 +380,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |||
380 | * Now calculate the error in (1 << look_ahead) ticks, but first | 380 | * Now calculate the error in (1 << look_ahead) ticks, but first |
381 | * remove the single look ahead already included in the error. | 381 | * remove the single look ahead already included in the error. |
382 | */ | 382 | */ |
383 | tick_error = current_tick_length() >> | 383 | tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1); |
384 | (TICK_LENGTH_SHIFT - clock->shift + 1); | ||
385 | tick_error -= clock->xtime_interval >> 1; | 384 | tick_error -= clock->xtime_interval >> 1; |
386 | error = ((error - tick_error) >> look_ahead) + tick_error; | 385 | error = ((error - tick_error) >> look_ahead) + tick_error; |
387 | 386 | ||
@@ -412,7 +411,7 @@ static void clocksource_adjust(s64 offset) | |||
412 | s64 error, interval = clock->cycle_interval; | 411 | s64 error, interval = clock->cycle_interval; |
413 | int adj; | 412 | int adj; |
414 | 413 | ||
415 | error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); | 414 | error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1); |
416 | if (error > interval) { | 415 | if (error > interval) { |
417 | error >>= 2; | 416 | error >>= 2; |
418 | if (likely(error <= interval)) | 417 | if (likely(error <= interval)) |
@@ -434,7 +433,7 @@ static void clocksource_adjust(s64 offset) | |||
434 | clock->xtime_interval += interval; | 433 | clock->xtime_interval += interval; |
435 | clock->xtime_nsec -= offset; | 434 | clock->xtime_nsec -= offset; |
436 | clock->error -= (interval - offset) << | 435 | clock->error -= (interval - offset) << |
437 | (TICK_LENGTH_SHIFT - clock->shift); | 436 | (NTP_SCALE_SHIFT - clock->shift); |
438 | } | 437 | } |
439 | 438 | ||
440 | /** | 439 | /** |
@@ -473,8 +472,8 @@ void update_wall_time(void) | |||
473 | } | 472 | } |
474 | 473 | ||
475 | /* accumulate error between NTP and clock interval */ | 474 | /* accumulate error between NTP and clock interval */ |
476 | clock->error += current_tick_length(); | 475 | clock->error += tick_length; |
477 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | 476 | clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); |
478 | } | 477 | } |
479 | 478 | ||
480 | /* correct the clock when NTP error is too big */ | 479 | /* correct the clock when NTP error is too big */ |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 721093a22561..29fc39f1029c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -195,7 +195,6 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
195 | int queue_delayed_work(struct workqueue_struct *wq, | 195 | int queue_delayed_work(struct workqueue_struct *wq, |
196 | struct delayed_work *dwork, unsigned long delay) | 196 | struct delayed_work *dwork, unsigned long delay) |
197 | { | 197 | { |
198 | timer_stats_timer_set_start_info(&dwork->timer); | ||
199 | if (delay == 0) | 198 | if (delay == 0) |
200 | return queue_work(wq, &dwork->work); | 199 | return queue_work(wq, &dwork->work); |
201 | 200 | ||
@@ -219,11 +218,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
219 | struct timer_list *timer = &dwork->timer; | 218 | struct timer_list *timer = &dwork->timer; |
220 | struct work_struct *work = &dwork->work; | 219 | struct work_struct *work = &dwork->work; |
221 | 220 | ||
222 | timer_stats_timer_set_start_info(&dwork->timer); | ||
223 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 221 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
224 | BUG_ON(timer_pending(timer)); | 222 | BUG_ON(timer_pending(timer)); |
225 | BUG_ON(!list_empty(&work->entry)); | 223 | BUG_ON(!list_empty(&work->entry)); |
226 | 224 | ||
225 | timer_stats_timer_set_start_info(&dwork->timer); | ||
226 | |||
227 | /* This stores cwq for the moment, for the timer_fn */ | 227 | /* This stores cwq for the moment, for the timer_fn */ |
228 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); | 228 | set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); |
229 | timer->expires = jiffies + delay; | 229 | timer->expires = jiffies + delay; |
@@ -564,7 +564,6 @@ EXPORT_SYMBOL(schedule_work); | |||
564 | int schedule_delayed_work(struct delayed_work *dwork, | 564 | int schedule_delayed_work(struct delayed_work *dwork, |
565 | unsigned long delay) | 565 | unsigned long delay) |
566 | { | 566 | { |
567 | timer_stats_timer_set_start_info(&dwork->timer); | ||
568 | return queue_delayed_work(keventd_wq, dwork, delay); | 567 | return queue_delayed_work(keventd_wq, dwork, delay); |
569 | } | 568 | } |
570 | EXPORT_SYMBOL(schedule_delayed_work); | 569 | EXPORT_SYMBOL(schedule_delayed_work); |
@@ -581,7 +580,6 @@ EXPORT_SYMBOL(schedule_delayed_work); | |||
581 | int schedule_delayed_work_on(int cpu, | 580 | int schedule_delayed_work_on(int cpu, |
582 | struct delayed_work *dwork, unsigned long delay) | 581 | struct delayed_work *dwork, unsigned long delay) |
583 | { | 582 | { |
584 | timer_stats_timer_set_start_info(&dwork->timer); | ||
585 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 583 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
586 | } | 584 | } |
587 | EXPORT_SYMBOL(schedule_delayed_work_on); | 585 | EXPORT_SYMBOL(schedule_delayed_work_on); |