aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blockgroup_lock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blockgroup_lock.h')
-rw-r--r--include/linux/blockgroup_lock.h60
1 files changed, 60 insertions, 0 deletions
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
new file mode 100644
index 000000000000..0137ee5dd43c
--- /dev/null
+++ b/include/linux/blockgroup_lock.h
@@ -0,0 +1,60 @@
1#ifndef _LINUX_BLOCKGROUP_LOCK_H
2#define _LINUX_BLOCKGROUP_LOCK_H
3/*
4 * Per-blockgroup locking for ext2 and ext3.
5 *
6 * Simple hashed spinlocking.
7 */
8
9#include <linux/config.h>
10#include <linux/spinlock.h>
11#include <linux/cache.h>
12
13#ifdef CONFIG_SMP
14
15/*
16 * We want a power-of-two. Is there a better way than this?
17 */
18
19#if NR_CPUS >= 32
20#define NR_BG_LOCKS 128
21#elif NR_CPUS >= 16
22#define NR_BG_LOCKS 64
23#elif NR_CPUS >= 8
24#define NR_BG_LOCKS 32
25#elif NR_CPUS >= 4
26#define NR_BG_LOCKS 16
27#elif NR_CPUS >= 2
28#define NR_BG_LOCKS 8
29#else
30#define NR_BG_LOCKS 4
31#endif
32
33#else /* CONFIG_SMP */
34#define NR_BG_LOCKS 1
35#endif /* CONFIG_SMP */
36
37struct bgl_lock {
38 spinlock_t lock;
39} ____cacheline_aligned_in_smp;
40
41struct blockgroup_lock {
42 struct bgl_lock locks[NR_BG_LOCKS];
43};
44
45static inline void bgl_lock_init(struct blockgroup_lock *bgl)
46{
47 int i;
48
49 for (i = 0; i < NR_BG_LOCKS; i++)
50 spin_lock_init(&bgl->locks[i].lock);
51}
52
53/*
54 * The accessor is a macro so we can embed a blockgroup_lock into different
55 * superblock types
56 */
57#define sb_bgl_lock(sb, block_group) \
58 (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock)
59
60#endif