From fce22848a1e9887d92c2a975494b69149808750e Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:22 +0900 Subject: this_cpu: Use this_cpu operations for NFS statistics Simplify NFS statistics and allow the use of optimized arch instructions. Acked-by: Tejun Heo CC: Trond Myklebust Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- fs/nfs/iostat.h | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h index ceda50aad73c..46d779abafd3 100644 --- a/fs/nfs/iostat.h +++ b/fs/nfs/iostat.h @@ -25,13 +25,7 @@ struct nfs_iostats { static inline void nfs_inc_server_stats(const struct nfs_server *server, enum nfs_stat_eventcounters stat) { - struct nfs_iostats *iostats; - int cpu; - - cpu = get_cpu(); - iostats = per_cpu_ptr(server->io_stats, cpu); - iostats->events[stat]++; - put_cpu(); + this_cpu_inc(server->io_stats->events[stat]); } static inline void nfs_inc_stats(const struct inode *inode, @@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server, enum nfs_stat_bytecounters stat, unsigned long addend) { - struct nfs_iostats *iostats; - int cpu; - - cpu = get_cpu(); - iostats = per_cpu_ptr(server->io_stats, cpu); - iostats->bytes[stat] += addend; - put_cpu(); + this_cpu_add(server->io_stats->bytes[stat], addend); } static inline void nfs_add_stats(const struct inode *inode, @@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode, enum nfs_stat_fscachecounters stat, unsigned long addend) { - struct nfs_iostats *iostats; - int cpu; - - cpu = get_cpu(); - iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu); - iostats->fscache[stat] += addend; - put_cpu(); + this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend); } #endif -- cgit v1.2.2 From ca0c9584b1f16bd5911893647cb7f1be82e60554 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:22 +0900 Subject: this_cpu: Straight transformations Use this_cpu_ptr and __this_cpu_ptr in locations where straight transformations are possible because per_cpu_ptr is used with either smp_processor_id() or raw_smp_processor_id(). cc: David Howells Acked-by: Tejun Heo cc: Ingo Molnar cc: Rusty Russell cc: Eric Dumazet Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- fs/ext4/mballoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index bba12824defa..d527fd384582 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3932,7 +3932,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) * per cpu locality group is to reduce the contention between block * request from multiple CPUs. */ - ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id()); + ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); /* we're going to use group allocation */ ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; -- cgit v1.2.2 From 7a9e02d6bb05b268dc403d7ee87ce4198062f838 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:23 +0900 Subject: this_cpu: xfs_icsb_modify_counters does not need "cpu" variable The xfs_icsb_modify_counters() function no longer needs the cpu variable if we use this_cpu_ptr() and we can get rid of get/put_cpu(). Acked-by: Tejun Heo Reviewed-by: Christoph Hellwig Acked-by: Olaf Weber Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- fs/xfs/xfs_mount.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 8b6c9e807efb..ccafe8ef7ad5 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -2389,12 +2389,12 @@ xfs_icsb_modify_counters( { xfs_icsb_cnts_t *icsbp; long long lcounter; /* long counter for 64 bit fields */ - int cpu, ret = 0; + int ret = 0; might_sleep(); again: - cpu = get_cpu(); - icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu); + preempt_disable(); + icsbp = this_cpu_ptr(mp->m_sb_cnts); /* * if the counter is disabled, go to slow path @@ -2438,11 +2438,11 @@ again: break; } xfs_icsb_unlock_cntr(icsbp); - put_cpu(); + preempt_enable(); return 0; slow_path: - put_cpu(); + preempt_enable(); /* * serialise with a mutex so we don't burn lots of cpu on @@ -2490,7 +2490,7 @@ slow_path: balance_counter: xfs_icsb_unlock_cntr(icsbp); - put_cpu(); + preempt_enable(); /* * We may have multiple threads here if multiple per-cpu -- cgit v1.2.2