aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/time.c13
-rw-r--r--drivers/video/fsl-diu-fb.c4
-rw-r--r--include/linux/math64.h21
-rw-r--r--include/linux/time.h16
-rw-r--r--lib/div64.c10
-rw-r--r--mm/nommu.c21
6 files changed, 45 insertions, 40 deletions
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c39e1a5aa241..52b2e3856980 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -12,6 +12,7 @@
12#include <linux/clocksource.h> 12#include <linux/clocksource.h>
13#include <linux/clockchips.h> 13#include <linux/clockchips.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/math64.h>
15 16
16#include <asm/xen/hypervisor.h> 17#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h> 18#include <asm/xen/hypercall.h>
@@ -150,11 +151,7 @@ static void do_stolen_accounting(void)
150 if (stolen < 0) 151 if (stolen < 0)
151 stolen = 0; 152 stolen = 0;
152 153
153 ticks = 0; 154 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
154 while (stolen >= NS_PER_TICK) {
155 ticks++;
156 stolen -= NS_PER_TICK;
157 }
158 __get_cpu_var(residual_stolen) = stolen; 155 __get_cpu_var(residual_stolen) = stolen;
159 account_steal_time(NULL, ticks); 156 account_steal_time(NULL, ticks);
160 157
@@ -166,11 +163,7 @@ static void do_stolen_accounting(void)
166 if (blocked < 0) 163 if (blocked < 0)
167 blocked = 0; 164 blocked = 0;
168 165
169 ticks = 0; 166 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
170 while (blocked >= NS_PER_TICK) {
171 ticks++;
172 blocked -= NS_PER_TICK;
173 }
174 __get_cpu_var(residual_blocked) = blocked; 167 __get_cpu_var(residual_blocked) = blocked;
175 account_steal_time(idle_task(smp_processor_id()), ticks); 168 account_steal_time(idle_task(smp_processor_id()), ticks);
176} 169}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index b50bb03cb5ab..0a2785361ca3 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1320,7 +1320,7 @@ static void free_irq_local(int irq)
1320 * Power management hooks. Note that we won't be called from IRQ context, 1320 * Power management hooks. Note that we won't be called from IRQ context,
1321 * unlike the blank functions above, so we may sleep. 1321 * unlike the blank functions above, so we may sleep.
1322 */ 1322 */
1323static int fsl_diu_suspend(struct of_device *dev, pm_message_t state) 1323static int fsl_diu_suspend(struct of_device *ofdev, pm_message_t state)
1324{ 1324{
1325 struct fsl_diu_data *machine_data; 1325 struct fsl_diu_data *machine_data;
1326 1326
@@ -1330,7 +1330,7 @@ static int fsl_diu_suspend(struct of_device *dev, pm_message_t state)
1330 return 0; 1330 return 0;
1331} 1331}
1332 1332
1333static int fsl_diu_resume(struct of_device *dev) 1333static int fsl_diu_resume(struct of_device *ofdev)
1334{ 1334{
1335 struct fsl_diu_data *machine_data; 1335 struct fsl_diu_data *machine_data;
1336 1336
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c1a5f81501ff..c87f1528703a 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -81,4 +81,25 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
81} 81}
82#endif 82#endif
83 83
84u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
85
86static __always_inline u32
87__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
88{
89 u32 ret = 0;
90
91 while (dividend >= divisor) {
92 /* The following asm() prevents the compiler from
93 optimising this loop into a modulo operation. */
94 asm("" : "+rm"(dividend));
95
96 dividend -= divisor;
97 ret++;
98 }
99
100 *remainder = dividend;
101
102 return ret;
103}
104
84#endif /* _LINUX_MATH64_H */ 105#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/time.h b/include/linux/time.h
index d32ef0ad4c0a..e15206a7e82e 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -6,6 +6,7 @@
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7# include <linux/cache.h> 7# include <linux/cache.h>
8# include <linux/seqlock.h> 8# include <linux/seqlock.h>
9# include <linux/math64.h>
9#endif 10#endif
10 11
11#ifndef _STRUCT_TIMESPEC 12#ifndef _STRUCT_TIMESPEC
@@ -169,18 +170,13 @@ extern struct timeval ns_to_timeval(const s64 nsec);
169 * timespec_add_ns - Adds nanoseconds to a timespec 170 * timespec_add_ns - Adds nanoseconds to a timespec
170 * @a: pointer to timespec to be incremented 171 * @a: pointer to timespec to be incremented
171 * @ns: unsigned nanoseconds value to be added 172 * @ns: unsigned nanoseconds value to be added
173 *
174 * This must always be inlined because its used from the x86-64 vdso,
175 * which cannot call other kernel functions.
172 */ 176 */
173static inline void timespec_add_ns(struct timespec *a, u64 ns) 177static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
174{ 178{
175 ns += a->tv_nsec; 179 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
176 while(unlikely(ns >= NSEC_PER_SEC)) {
177 /* The following asm() prevents the compiler from
178 * optimising this loop into a modulo operation. */
179 asm("" : "+r"(ns));
180
181 ns -= NSEC_PER_SEC;
182 a->tv_sec++;
183 }
184 a->tv_nsec = ns; 180 a->tv_nsec = ns;
185} 181}
186#endif /* __KERNEL__ */ 182#endif /* __KERNEL__ */
diff --git a/lib/div64.c b/lib/div64.c
index bb5bd0c0f030..a111eb8de9cf 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
98#endif 98#endif
99 99
100#endif /* BITS_PER_LONG == 32 */ 100#endif /* BITS_PER_LONG == 32 */
101
102/*
103 * Iterative div/mod for use when dividend is not expected to be much
104 * bigger than divisor.
105 */
106u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
107{
108 return __iter_div_u64_rem(dividend, divisor, remainder);
109}
110EXPORT_SYMBOL(iter_div_u64_rem);
diff --git a/mm/nommu.c b/mm/nommu.c
index 3abd0845bda4..4462b6a3fcb9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -104,21 +104,15 @@ EXPORT_SYMBOL(vmtruncate);
104unsigned int kobjsize(const void *objp) 104unsigned int kobjsize(const void *objp)
105{ 105{
106 struct page *page; 106 struct page *page;
107 int order = 0;
108 107
109 /* 108 /*
110 * If the object we have should not have ksize performed on it, 109 * If the object we have should not have ksize performed on it,
111 * return size of 0 110 * return size of 0
112 */ 111 */
113 if (!objp) 112 if (!objp || !virt_addr_valid(objp))
114 return 0;
115
116 if ((unsigned long)objp >= memory_end)
117 return 0; 113 return 0;
118 114
119 page = virt_to_head_page(objp); 115 page = virt_to_head_page(objp);
120 if (!page)
121 return 0;
122 116
123 /* 117 /*
124 * If the allocator sets PageSlab, we know the pointer came from 118 * If the allocator sets PageSlab, we know the pointer came from
@@ -129,18 +123,9 @@ unsigned int kobjsize(const void *objp)
129 123
130 /* 124 /*
131 * The ksize() function is only guaranteed to work for pointers 125 * The ksize() function is only guaranteed to work for pointers
132 * returned by kmalloc(). So handle arbitrary pointers, that we expect 126 * returned by kmalloc(). So handle arbitrary pointers here.
133 * always to be compound pages, here.
134 */
135 if (PageCompound(page))
136 order = compound_order(page);
137
138 /*
139 * Finally, handle arbitrary pointers that don't set PageSlab.
140 * Default to 0-order in the case when we're unable to ksize()
141 * the object.
142 */ 127 */
143 return PAGE_SIZE << order; 128 return PAGE_SIZE << compound_order(page);
144} 129}
145 130
146/* 131/*