aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-01-06 03:11:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:29 -0500
commitd3cb487149bd706aa6aeb02042332a450978dc1c (patch)
tree69051e0f9853314cf275e4e800faad950e3053c3 /include/linux/sched.h
parent070f80326a215d8e6c4fd6f175e28eb446c492bc (diff)
[PATCH] atomic_long_t & include/asm-generic/atomic.h V2
Several counters already have the need to use 64 atomic variables on 64 bit platforms (see mm_counter_t in sched.h). We have to do ugly ifdefs to fall back to 32 bit atomic on 32 bit platforms. The VM statistics patch that I am working on will also make more extensive use of atomic64. This patch introduces a new type atomic_long_t by providing definitions in asm-generic/atomic.h that works similar to the c "long" type. Its 32 bits on 32 bit platforms and 64 bits on 64 bit platforms. Also cleans up the determination of the mm_counter_t in sched.h. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h25
1 files changed, 6 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b0ad6f30679e..7da33619d5d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
254 * The mm counters are not protected by its page_table_lock, 254 * The mm counters are not protected by its page_table_lock,
255 * so must be incremented atomically. 255 * so must be incremented atomically.
256 */ 256 */
257#ifdef ATOMIC64_INIT 257#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
258#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) 258#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
259#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) 259#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
260#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) 260#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
261#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) 261#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
262#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) 262typedef atomic_long_t mm_counter_t;
263typedef atomic64_t mm_counter_t;
264#else /* !ATOMIC64_INIT */
265/*
266 * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
267 * that is, at 16TB if using 4kB page size.
268 */
269#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
270#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
271#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
272#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
273#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
274typedef atomic_t mm_counter_t;
275#endif /* !ATOMIC64_INIT */
276 263
277#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 264#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
278/* 265/*