diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2009-06-24 02:05:18 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2009-06-24 08:15:25 -0400 |
commit | c63e09ecccb50f930e899d7005edc5411ee86d4f (patch) | |
tree | aafa5169cde98a2bb8327fcc948c76dba0746322 /fs | |
parent | 7e325d3a6b117c7288bfc0755410e9d9d2b71326 (diff) |
Make allocation of anon devices cheaper
Standard trick - add a new variable (start) such that
for each n < start n is known to be busy. Allocation can
skip checking everything in [0..start) and if it returns
n, we can set start to n + 1. Freeing below start sets
start to what we'd just freed.
Of course, it still sucks if we do something like
free 0
allocate
allocate
in a loop - still O(n^2) time. However, on saner loads it
improves the things a lot and the entire thing is not worth
the trouble of switching to something with better worst-case
behaviour.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/super.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/fs/super.c b/fs/super.c index d40d53a22fb5..808ffd59e01b 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -608,6 +608,7 @@ void emergency_remount(void) | |||
608 | 608 | ||
609 | static DEFINE_IDA(unnamed_dev_ida); | 609 | static DEFINE_IDA(unnamed_dev_ida); |
610 | static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ | 610 | static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ |
611 | static int unnamed_dev_start = 0; /* don't bother trying below it */ | ||
611 | 612 | ||
612 | int set_anon_super(struct super_block *s, void *data) | 613 | int set_anon_super(struct super_block *s, void *data) |
613 | { | 614 | { |
@@ -618,7 +619,8 @@ int set_anon_super(struct super_block *s, void *data) | |||
618 | if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) | 619 | if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) |
619 | return -ENOMEM; | 620 | return -ENOMEM; |
620 | spin_lock(&unnamed_dev_lock); | 621 | spin_lock(&unnamed_dev_lock); |
621 | error = ida_get_new(&unnamed_dev_ida, &dev); | 622 | error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); |
623 | unnamed_dev_start = dev + 1; | ||
622 | spin_unlock(&unnamed_dev_lock); | 624 | spin_unlock(&unnamed_dev_lock); |
623 | if (error == -EAGAIN) | 625 | if (error == -EAGAIN) |
624 | /* We raced and lost with another CPU. */ | 626 | /* We raced and lost with another CPU. */ |
@@ -629,6 +631,7 @@ int set_anon_super(struct super_block *s, void *data) | |||
629 | if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { | 631 | if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { |
630 | spin_lock(&unnamed_dev_lock); | 632 | spin_lock(&unnamed_dev_lock); |
631 | ida_remove(&unnamed_dev_ida, dev); | 633 | ida_remove(&unnamed_dev_ida, dev); |
634 | unnamed_dev_start = dev; | ||
632 | spin_unlock(&unnamed_dev_lock); | 635 | spin_unlock(&unnamed_dev_lock); |
633 | return -EMFILE; | 636 | return -EMFILE; |
634 | } | 637 | } |
@@ -645,6 +648,8 @@ void kill_anon_super(struct super_block *sb) | |||
645 | generic_shutdown_super(sb); | 648 | generic_shutdown_super(sb); |
646 | spin_lock(&unnamed_dev_lock); | 649 | spin_lock(&unnamed_dev_lock); |
647 | ida_remove(&unnamed_dev_ida, slot); | 650 | ida_remove(&unnamed_dev_ida, slot); |
651 | if (slot < unnamed_dev_start) | ||
652 | unnamed_dev_start = slot; | ||
648 | spin_unlock(&unnamed_dev_lock); | 653 | spin_unlock(&unnamed_dev_lock); |
649 | } | 654 | } |
650 | 655 | ||