diff options
Diffstat (limited to 'fs')
75 files changed, 649 insertions, 802 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index ea1134eb47c8..8e8356c1c229 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/poll.h> | 31 | #include <linux/poll.h> |
32 | #include <linux/kthread.h> | 32 | #include <linux/kthread.h> |
33 | #include <linux/idr.h> | 33 | #include <linux/idr.h> |
34 | #include <linux/mutex.h> | ||
34 | 35 | ||
35 | #include "debug.h" | 36 | #include "debug.h" |
36 | #include "v9fs.h" | 37 | #include "v9fs.h" |
@@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, | |||
110 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); | 111 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); |
111 | static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); | 112 | static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); |
112 | 113 | ||
113 | static DECLARE_MUTEX(v9fs_mux_task_lock); | 114 | static DEFINE_MUTEX(v9fs_mux_task_lock); |
114 | static struct workqueue_struct *v9fs_mux_wq; | 115 | static struct workqueue_struct *v9fs_mux_wq; |
115 | 116 | ||
116 | static int v9fs_mux_num; | 117 | static int v9fs_mux_num; |
@@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) | |||
166 | 167 | ||
167 | dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, | 168 | dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, |
168 | v9fs_mux_poll_task_num); | 169 | v9fs_mux_poll_task_num); |
169 | up(&v9fs_mux_task_lock); | 170 | mutex_lock(&v9fs_mux_task_lock); |
170 | 171 | ||
171 | n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); | 172 | n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); |
172 | if (n > v9fs_mux_poll_task_num) { | 173 | if (n > v9fs_mux_poll_task_num) { |
@@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) | |||
225 | } | 226 | } |
226 | 227 | ||
227 | v9fs_mux_num++; | 228 | v9fs_mux_num++; |
228 | down(&v9fs_mux_task_lock); | 229 | mutex_unlock(&v9fs_mux_task_lock); |
229 | 230 | ||
230 | return 0; | 231 | return 0; |
231 | } | 232 | } |
@@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) | |||
235 | int i; | 236 | int i; |
236 | struct v9fs_mux_poll_task *vpt; | 237 | struct v9fs_mux_poll_task *vpt; |
237 | 238 | ||
238 | up(&v9fs_mux_task_lock); | 239 | mutex_lock(&v9fs_mux_task_lock); |
239 | vpt = m->poll_task; | 240 | vpt = m->poll_task; |
240 | list_del(&m->mux_list); | 241 | list_del(&m->mux_list); |
241 | for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | 242 | for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { |
@@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) | |||
252 | v9fs_mux_poll_task_num--; | 253 | v9fs_mux_poll_task_num--; |
253 | } | 254 | } |
254 | v9fs_mux_num--; | 255 | v9fs_mux_num--; |
255 | down(&v9fs_mux_task_lock); | 256 | mutex_unlock(&v9fs_mux_task_lock); |
256 | } | 257 | } |
257 | 258 | ||
258 | /** | 259 | /** |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 3ad8455f8577..651a9e14d9a9 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -614,6 +614,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
614 | 614 | ||
615 | sb = dir->i_sb; | 615 | sb = dir->i_sb; |
616 | v9ses = v9fs_inode2v9ses(dir); | 616 | v9ses = v9fs_inode2v9ses(dir); |
617 | dentry->d_op = &v9fs_dentry_operations; | ||
617 | dirfid = v9fs_fid_lookup(dentry->d_parent); | 618 | dirfid = v9fs_fid_lookup(dentry->d_parent); |
618 | 619 | ||
619 | if (!dirfid) { | 620 | if (!dirfid) { |
@@ -681,8 +682,6 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
681 | goto FreeFcall; | 682 | goto FreeFcall; |
682 | 683 | ||
683 | fid->qid = fcall->params.rstat.stat.qid; | 684 | fid->qid = fcall->params.rstat.stat.qid; |
684 | |||
685 | dentry->d_op = &v9fs_dentry_operations; | ||
686 | v9fs_stat2inode(&fcall->params.rstat.stat, inode, inode->i_sb); | 685 | v9fs_stat2inode(&fcall->params.rstat.stat, inode, inode->i_sb); |
687 | 686 | ||
688 | d_add(dentry, inode); | 687 | d_add(dentry, inode); |
diff --git a/fs/adfs/file.c b/fs/adfs/file.c index afebbfde6968..6af10885f9d6 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c | |||
@@ -19,11 +19,7 @@ | |||
19 | * | 19 | * |
20 | * adfs regular file handling primitives | 20 | * adfs regular file handling primitives |
21 | */ | 21 | */ |
22 | #include <linux/errno.h> | ||
23 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
24 | #include <linux/fcntl.h> | ||
25 | #include <linux/time.h> | ||
26 | #include <linux/stat.h> | ||
27 | #include <linux/buffer_head.h> /* for file_fsync() */ | 23 | #include <linux/buffer_head.h> /* for file_fsync() */ |
28 | #include <linux/adfs_fs.h> | 24 | #include <linux/adfs_fs.h> |
29 | 25 | ||
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 385bed09b0d8..f54c5b21f876 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -13,6 +13,7 @@ | |||
13 | /* Internal header file for autofs */ | 13 | /* Internal header file for autofs */ |
14 | 14 | ||
15 | #include <linux/auto_fs4.h> | 15 | #include <linux/auto_fs4.h> |
16 | #include <linux/mutex.h> | ||
16 | #include <linux/list.h> | 17 | #include <linux/list.h> |
17 | 18 | ||
18 | /* This is the range of ioctl() numbers we claim as ours */ | 19 | /* This is the range of ioctl() numbers we claim as ours */ |
@@ -102,7 +103,7 @@ struct autofs_sb_info { | |||
102 | int reghost_enabled; | 103 | int reghost_enabled; |
103 | int needs_reghost; | 104 | int needs_reghost; |
104 | struct super_block *sb; | 105 | struct super_block *sb; |
105 | struct semaphore wq_sem; | 106 | struct mutex wq_mutex; |
106 | spinlock_t fs_lock; | 107 | spinlock_t fs_lock; |
107 | struct autofs_wait_queue *queues; /* Wait queue pointer */ | 108 | struct autofs_wait_queue *queues; /* Wait queue pointer */ |
108 | }; | 109 | }; |
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 2d3082854a29..1ad98d48e550 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) | |||
269 | sbi->sb = s; | 269 | sbi->sb = s; |
270 | sbi->version = 0; | 270 | sbi->version = 0; |
271 | sbi->sub_version = 0; | 271 | sbi->sub_version = 0; |
272 | init_MUTEX(&sbi->wq_sem); | 272 | mutex_init(&sbi->wq_mutex); |
273 | spin_lock_init(&sbi->fs_lock); | 273 | spin_lock_init(&sbi->fs_lock); |
274 | sbi->queues = NULL; | 274 | sbi->queues = NULL; |
275 | s->s_blocksize = 1024; | 275 | s->s_blocksize = 1024; |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 394ff36ef8f1..be78e9378c03 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
178 | return -ENOENT; | 178 | return -ENOENT; |
179 | } | 179 | } |
180 | 180 | ||
181 | if (down_interruptible(&sbi->wq_sem)) { | 181 | if (mutex_lock_interruptible(&sbi->wq_mutex)) { |
182 | kfree(name); | 182 | kfree(name); |
183 | return -EINTR; | 183 | return -EINTR; |
184 | } | 184 | } |
@@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
194 | /* Can't wait for an expire if there's no mount */ | 194 | /* Can't wait for an expire if there's no mount */ |
195 | if (notify == NFY_NONE && !d_mountpoint(dentry)) { | 195 | if (notify == NFY_NONE && !d_mountpoint(dentry)) { |
196 | kfree(name); | 196 | kfree(name); |
197 | up(&sbi->wq_sem); | 197 | mutex_unlock(&sbi->wq_mutex); |
198 | return -ENOENT; | 198 | return -ENOENT; |
199 | } | 199 | } |
200 | 200 | ||
@@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
202 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); | 202 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); |
203 | if ( !wq ) { | 203 | if ( !wq ) { |
204 | kfree(name); | 204 | kfree(name); |
205 | up(&sbi->wq_sem); | 205 | mutex_unlock(&sbi->wq_mutex); |
206 | return -ENOMEM; | 206 | return -ENOMEM; |
207 | } | 207 | } |
208 | 208 | ||
@@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
218 | wq->status = -EINTR; /* Status return if interrupted */ | 218 | wq->status = -EINTR; /* Status return if interrupted */ |
219 | atomic_set(&wq->wait_ctr, 2); | 219 | atomic_set(&wq->wait_ctr, 2); |
220 | atomic_set(&wq->notified, 1); | 220 | atomic_set(&wq->notified, 1); |
221 | up(&sbi->wq_sem); | 221 | mutex_unlock(&sbi->wq_mutex); |
222 | } else { | 222 | } else { |
223 | atomic_inc(&wq->wait_ctr); | 223 | atomic_inc(&wq->wait_ctr); |
224 | up(&sbi->wq_sem); | 224 | mutex_unlock(&sbi->wq_mutex); |
225 | kfree(name); | 225 | kfree(name); |
226 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | 226 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", |
227 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | 227 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); |
@@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok | |||
282 | { | 282 | { |
283 | struct autofs_wait_queue *wq, **wql; | 283 | struct autofs_wait_queue *wq, **wql; |
284 | 284 | ||
285 | down(&sbi->wq_sem); | 285 | mutex_lock(&sbi->wq_mutex); |
286 | for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { | 286 | for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { |
287 | if ( wq->wait_queue_token == wait_queue_token ) | 287 | if ( wq->wait_queue_token == wait_queue_token ) |
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | 290 | ||
291 | if ( !wq ) { | 291 | if ( !wq ) { |
292 | up(&sbi->wq_sem); | 292 | mutex_unlock(&sbi->wq_mutex); |
293 | return -EINVAL; | 293 | return -EINVAL; |
294 | } | 294 | } |
295 | 295 | ||
296 | *wql = wq->next; /* Unlink from chain */ | 296 | *wql = wq->next; /* Unlink from chain */ |
297 | up(&sbi->wq_sem); | 297 | mutex_unlock(&sbi->wq_mutex); |
298 | kfree(wq->name); | 298 | kfree(wq->name); |
299 | wq->name = NULL; /* Do not wait on this queue */ | 299 | wq->name = NULL; /* Do not wait on this queue */ |
300 | 300 | ||
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 2d365cb8eec6..dd6048ce0532 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -561,7 +561,7 @@ befs_utf2nls(struct super_block *sb, const char *in, | |||
561 | * @sb: Superblock | 561 | * @sb: Superblock |
562 | * @src: Input string buffer in NLS format | 562 | * @src: Input string buffer in NLS format |
563 | * @srclen: Length of input string in bytes | 563 | * @srclen: Length of input string in bytes |
564 | * @dest: The output string in UTF8 format | 564 | * @dest: The output string in UTF-8 format |
565 | * @destlen: Length of the output buffer | 565 | * @destlen: Length of the output buffer |
566 | * | 566 | * |
567 | * Converts input string @src, which is in the format of the loaded NLS map, | 567 | * Converts input string @src, which is in the format of the loaded NLS map, |
@@ -1243,11 +1243,11 @@ static int __init init_bio(void) | |||
1243 | scale = 4; | 1243 | scale = 4; |
1244 | 1244 | ||
1245 | /* | 1245 | /* |
1246 | * scale number of entries | 1246 | * Limit number of entries reserved -- mempools are only used when |
1247 | * the system is completely unable to allocate memory, so we only | ||
1248 | * need enough to make progress. | ||
1247 | */ | 1249 | */ |
1248 | bvec_pool_entries = megabytes * 2; | 1250 | bvec_pool_entries = 1 + scale; |
1249 | if (bvec_pool_entries > 256) | ||
1250 | bvec_pool_entries = 256; | ||
1251 | 1251 | ||
1252 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); | 1252 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); |
1253 | if (!fs_bio_set) | 1253 | if (!fs_bio_set) |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 6e50346fb1ee..44d05e6e34db 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
265 | SLAB_CTOR_CONSTRUCTOR) | 265 | SLAB_CTOR_CONSTRUCTOR) |
266 | { | 266 | { |
267 | memset(bdev, 0, sizeof(*bdev)); | 267 | memset(bdev, 0, sizeof(*bdev)); |
268 | sema_init(&bdev->bd_sem, 1); | 268 | mutex_init(&bdev->bd_mutex); |
269 | sema_init(&bdev->bd_mount_sem, 1); | 269 | mutex_init(&bdev->bd_mount_mutex); |
270 | INIT_LIST_HEAD(&bdev->bd_inodes); | 270 | INIT_LIST_HEAD(&bdev->bd_inodes); |
271 | INIT_LIST_HEAD(&bdev->bd_list); | 271 | INIT_LIST_HEAD(&bdev->bd_list); |
272 | inode_init_once(&ei->vfs_inode); | 272 | inode_init_once(&ei->vfs_inode); |
@@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file) | |||
574 | } | 574 | } |
575 | owner = disk->fops->owner; | 575 | owner = disk->fops->owner; |
576 | 576 | ||
577 | down(&bdev->bd_sem); | 577 | mutex_lock(&bdev->bd_mutex); |
578 | if (!bdev->bd_openers) { | 578 | if (!bdev->bd_openers) { |
579 | bdev->bd_disk = disk; | 579 | bdev->bd_disk = disk; |
580 | bdev->bd_contains = bdev; | 580 | bdev->bd_contains = bdev; |
@@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file) | |||
605 | if (ret) | 605 | if (ret) |
606 | goto out_first; | 606 | goto out_first; |
607 | bdev->bd_contains = whole; | 607 | bdev->bd_contains = whole; |
608 | down(&whole->bd_sem); | 608 | mutex_lock(&whole->bd_mutex); |
609 | whole->bd_part_count++; | 609 | whole->bd_part_count++; |
610 | p = disk->part[part - 1]; | 610 | p = disk->part[part - 1]; |
611 | bdev->bd_inode->i_data.backing_dev_info = | 611 | bdev->bd_inode->i_data.backing_dev_info = |
612 | whole->bd_inode->i_data.backing_dev_info; | 612 | whole->bd_inode->i_data.backing_dev_info; |
613 | if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { | 613 | if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { |
614 | whole->bd_part_count--; | 614 | whole->bd_part_count--; |
615 | up(&whole->bd_sem); | 615 | mutex_unlock(&whole->bd_mutex); |
616 | ret = -ENXIO; | 616 | ret = -ENXIO; |
617 | goto out_first; | 617 | goto out_first; |
618 | } | 618 | } |
619 | kobject_get(&p->kobj); | 619 | kobject_get(&p->kobj); |
620 | bdev->bd_part = p; | 620 | bdev->bd_part = p; |
621 | bd_set_size(bdev, (loff_t) p->nr_sects << 9); | 621 | bd_set_size(bdev, (loff_t) p->nr_sects << 9); |
622 | up(&whole->bd_sem); | 622 | mutex_unlock(&whole->bd_mutex); |
623 | } | 623 | } |
624 | } else { | 624 | } else { |
625 | put_disk(disk); | 625 | put_disk(disk); |
@@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file) | |||
633 | if (bdev->bd_invalidated) | 633 | if (bdev->bd_invalidated) |
634 | rescan_partitions(bdev->bd_disk, bdev); | 634 | rescan_partitions(bdev->bd_disk, bdev); |
635 | } else { | 635 | } else { |
636 | down(&bdev->bd_contains->bd_sem); | 636 | mutex_lock(&bdev->bd_contains->bd_mutex); |
637 | bdev->bd_contains->bd_part_count++; | 637 | bdev->bd_contains->bd_part_count++; |
638 | up(&bdev->bd_contains->bd_sem); | 638 | mutex_unlock(&bdev->bd_contains->bd_mutex); |
639 | } | 639 | } |
640 | } | 640 | } |
641 | bdev->bd_openers++; | 641 | bdev->bd_openers++; |
642 | up(&bdev->bd_sem); | 642 | mutex_unlock(&bdev->bd_mutex); |
643 | unlock_kernel(); | 643 | unlock_kernel(); |
644 | return 0; | 644 | return 0; |
645 | 645 | ||
@@ -652,7 +652,7 @@ out_first: | |||
652 | put_disk(disk); | 652 | put_disk(disk); |
653 | module_put(owner); | 653 | module_put(owner); |
654 | out: | 654 | out: |
655 | up(&bdev->bd_sem); | 655 | mutex_unlock(&bdev->bd_mutex); |
656 | unlock_kernel(); | 656 | unlock_kernel(); |
657 | if (ret) | 657 | if (ret) |
658 | bdput(bdev); | 658 | bdput(bdev); |
@@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev) | |||
714 | struct inode *bd_inode = bdev->bd_inode; | 714 | struct inode *bd_inode = bdev->bd_inode; |
715 | struct gendisk *disk = bdev->bd_disk; | 715 | struct gendisk *disk = bdev->bd_disk; |
716 | 716 | ||
717 | down(&bdev->bd_sem); | 717 | mutex_lock(&bdev->bd_mutex); |
718 | lock_kernel(); | 718 | lock_kernel(); |
719 | if (!--bdev->bd_openers) { | 719 | if (!--bdev->bd_openers) { |
720 | sync_blockdev(bdev); | 720 | sync_blockdev(bdev); |
@@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev) | |||
724 | if (disk->fops->release) | 724 | if (disk->fops->release) |
725 | ret = disk->fops->release(bd_inode, NULL); | 725 | ret = disk->fops->release(bd_inode, NULL); |
726 | } else { | 726 | } else { |
727 | down(&bdev->bd_contains->bd_sem); | 727 | mutex_lock(&bdev->bd_contains->bd_mutex); |
728 | bdev->bd_contains->bd_part_count--; | 728 | bdev->bd_contains->bd_part_count--; |
729 | up(&bdev->bd_contains->bd_sem); | 729 | mutex_unlock(&bdev->bd_contains->bd_mutex); |
730 | } | 730 | } |
731 | if (!bdev->bd_openers) { | 731 | if (!bdev->bd_openers) { |
732 | struct module *owner = disk->fops->owner; | 732 | struct module *owner = disk->fops->owner; |
@@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev) | |||
746 | bdev->bd_contains = NULL; | 746 | bdev->bd_contains = NULL; |
747 | } | 747 | } |
748 | unlock_kernel(); | 748 | unlock_kernel(); |
749 | up(&bdev->bd_sem); | 749 | mutex_unlock(&bdev->bd_mutex); |
750 | bdput(bdev); | 750 | bdput(bdev); |
751 | return ret; | 751 | return ret; |
752 | } | 752 | } |
diff --git a/fs/buffer.c b/fs/buffer.c index a9b399402007..0d6ca7bac6c8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev) | |||
201 | * freeze_bdev -- lock a filesystem and force it into a consistent state | 201 | * freeze_bdev -- lock a filesystem and force it into a consistent state |
202 | * @bdev: blockdevice to lock | 202 | * @bdev: blockdevice to lock |
203 | * | 203 | * |
204 | * This takes the block device bd_mount_sem to make sure no new mounts | 204 | * This takes the block device bd_mount_mutex to make sure no new mounts |
205 | * happen on bdev until thaw_bdev() is called. | 205 | * happen on bdev until thaw_bdev() is called. |
206 | * If a superblock is found on this device, we take the s_umount semaphore | 206 | * If a superblock is found on this device, we take the s_umount semaphore |
207 | * on it to make sure nobody unmounts until the snapshot creation is done. | 207 | * on it to make sure nobody unmounts until the snapshot creation is done. |
@@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) | |||
210 | { | 210 | { |
211 | struct super_block *sb; | 211 | struct super_block *sb; |
212 | 212 | ||
213 | down(&bdev->bd_mount_sem); | 213 | mutex_lock(&bdev->bd_mount_mutex); |
214 | sb = get_super(bdev); | 214 | sb = get_super(bdev); |
215 | if (sb && !(sb->s_flags & MS_RDONLY)) { | 215 | if (sb && !(sb->s_flags & MS_RDONLY)) { |
216 | sb->s_frozen = SB_FREEZE_WRITE; | 216 | sb->s_frozen = SB_FREEZE_WRITE; |
@@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) | |||
264 | drop_super(sb); | 264 | drop_super(sb); |
265 | } | 265 | } |
266 | 266 | ||
267 | up(&bdev->bd_mount_sem); | 267 | mutex_unlock(&bdev->bd_mount_mutex); |
268 | } | 268 | } |
269 | EXPORT_SYMBOL(thaw_bdev); | 269 | EXPORT_SYMBOL(thaw_bdev); |
270 | 270 | ||
@@ -3051,68 +3051,6 @@ asmlinkage long sys_bdflush(int func, long data) | |||
3051 | } | 3051 | } |
3052 | 3052 | ||
3053 | /* | 3053 | /* |
3054 | * Migration function for pages with buffers. This function can only be used | ||
3055 | * if the underlying filesystem guarantees that no other references to "page" | ||
3056 | * exist. | ||
3057 | */ | ||
3058 | #ifdef CONFIG_MIGRATION | ||
3059 | int buffer_migrate_page(struct page *newpage, struct page *page) | ||
3060 | { | ||
3061 | struct address_space *mapping = page->mapping; | ||
3062 | struct buffer_head *bh, *head; | ||
3063 | int rc; | ||
3064 | |||
3065 | if (!mapping) | ||
3066 | return -EAGAIN; | ||
3067 | |||
3068 | if (!page_has_buffers(page)) | ||
3069 | return migrate_page(newpage, page); | ||
3070 | |||
3071 | head = page_buffers(page); | ||
3072 | |||
3073 | rc = migrate_page_remove_references(newpage, page, 3); | ||
3074 | if (rc) | ||
3075 | return rc; | ||
3076 | |||
3077 | bh = head; | ||
3078 | do { | ||
3079 | get_bh(bh); | ||
3080 | lock_buffer(bh); | ||
3081 | bh = bh->b_this_page; | ||
3082 | |||
3083 | } while (bh != head); | ||
3084 | |||
3085 | ClearPagePrivate(page); | ||
3086 | set_page_private(newpage, page_private(page)); | ||
3087 | set_page_private(page, 0); | ||
3088 | put_page(page); | ||
3089 | get_page(newpage); | ||
3090 | |||
3091 | bh = head; | ||
3092 | do { | ||
3093 | set_bh_page(bh, newpage, bh_offset(bh)); | ||
3094 | bh = bh->b_this_page; | ||
3095 | |||
3096 | } while (bh != head); | ||
3097 | |||
3098 | SetPagePrivate(newpage); | ||
3099 | |||
3100 | migrate_page_copy(newpage, page); | ||
3101 | |||
3102 | bh = head; | ||
3103 | do { | ||
3104 | unlock_buffer(bh); | ||
3105 | put_bh(bh); | ||
3106 | bh = bh->b_this_page; | ||
3107 | |||
3108 | } while (bh != head); | ||
3109 | |||
3110 | return 0; | ||
3111 | } | ||
3112 | EXPORT_SYMBOL(buffer_migrate_page); | ||
3113 | #endif | ||
3114 | |||
3115 | /* | ||
3116 | * Buffer-head allocation | 3054 | * Buffer-head allocation |
3117 | */ | 3055 | */ |
3118 | static kmem_cache_t *bh_cachep; | 3056 | static kmem_cache_t *bh_cachep; |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index d335015473a5..cb68efba35db 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -160,7 +160,7 @@ improperly zeroed buffer in CIFS Unix extensions set times call. | |||
160 | Version 1.25 | 160 | Version 1.25 |
161 | ------------ | 161 | ------------ |
162 | Fix internationalization problem in cifs readdir with filenames that map to | 162 | Fix internationalization problem in cifs readdir with filenames that map to |
163 | longer UTF8 strings than the string on the wire was in Unicode. Add workaround | 163 | longer UTF-8 strings than the string on the wire was in Unicode. Add workaround |
164 | for readdir to netapp servers. Fix search rewind (seek into readdir to return | 164 | for readdir to netapp servers. Fix search rewind (seek into readdir to return |
165 | non-consecutive entries). Do not do readdir when server negotiates | 165 | non-consecutive entries). Do not do readdir when server negotiates |
166 | buffer size to small to fit filename. Add support for reading POSIX ACLs from | 166 | buffer size to small to fit filename. Add support for reading POSIX ACLs from |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index fed55e3c53df..632561dd9c50 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
138 | cifs_sb = CIFS_SB(inode->i_sb); | 138 | cifs_sb = CIFS_SB(inode->i_sb); |
139 | pTcon = cifs_sb->tcon; | 139 | pTcon = cifs_sb->tcon; |
140 | 140 | ||
141 | down(&direntry->d_sb->s_vfs_rename_sem); | 141 | mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); |
142 | full_path = build_path_from_dentry(direntry); | 142 | full_path = build_path_from_dentry(direntry); |
143 | up(&direntry->d_sb->s_vfs_rename_sem); | 143 | mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); |
144 | if(full_path == NULL) { | 144 | if(full_path == NULL) { |
145 | FreeXid(xid); | 145 | FreeXid(xid); |
146 | return -ENOMEM; | 146 | return -ENOMEM; |
@@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
317 | cifs_sb = CIFS_SB(inode->i_sb); | 317 | cifs_sb = CIFS_SB(inode->i_sb); |
318 | pTcon = cifs_sb->tcon; | 318 | pTcon = cifs_sb->tcon; |
319 | 319 | ||
320 | down(&direntry->d_sb->s_vfs_rename_sem); | 320 | mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); |
321 | full_path = build_path_from_dentry(direntry); | 321 | full_path = build_path_from_dentry(direntry); |
322 | up(&direntry->d_sb->s_vfs_rename_sem); | 322 | mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); |
323 | if(full_path == NULL) | 323 | if(full_path == NULL) |
324 | rc = -ENOMEM; | 324 | rc = -ENOMEM; |
325 | else if (pTcon->ses->capabilities & CAP_UNIX) { | 325 | else if (pTcon->ses->capabilities & CAP_UNIX) { |
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c index a7a47bb36bf3..ec4dfe9bf5ef 100644 --- a/fs/cifs/fcntl.c +++ b/fs/cifs/fcntl.c | |||
@@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg) | |||
86 | cifs_sb = CIFS_SB(file->f_dentry->d_sb); | 86 | cifs_sb = CIFS_SB(file->f_dentry->d_sb); |
87 | pTcon = cifs_sb->tcon; | 87 | pTcon = cifs_sb->tcon; |
88 | 88 | ||
89 | down(&file->f_dentry->d_sb->s_vfs_rename_sem); | 89 | mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); |
90 | full_path = build_path_from_dentry(file->f_dentry); | 90 | full_path = build_path_from_dentry(file->f_dentry); |
91 | up(&file->f_dentry->d_sb->s_vfs_rename_sem); | 91 | mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); |
92 | 92 | ||
93 | if(full_path == NULL) { | 93 | if(full_path == NULL) { |
94 | rc = -ENOMEM; | 94 | rc = -ENOMEM; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 675bd2568297..165d67426381 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file) | |||
203 | } | 203 | } |
204 | } | 204 | } |
205 | 205 | ||
206 | down(&inode->i_sb->s_vfs_rename_sem); | 206 | mutex_lock(&inode->i_sb->s_vfs_rename_mutex); |
207 | full_path = build_path_from_dentry(file->f_dentry); | 207 | full_path = build_path_from_dentry(file->f_dentry); |
208 | up(&inode->i_sb->s_vfs_rename_sem); | 208 | mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); |
209 | if (full_path == NULL) { | 209 | if (full_path == NULL) { |
210 | FreeXid(xid); | 210 | FreeXid(xid); |
211 | return -ENOMEM; | 211 | return -ENOMEM; |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 59359911f481..ff93a9f81d1c 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) | |||
574 | 574 | ||
575 | /* Unlink can be called from rename so we can not grab the sem here | 575 | /* Unlink can be called from rename so we can not grab the sem here |
576 | since we deadlock otherwise */ | 576 | since we deadlock otherwise */ |
577 | /* down(&direntry->d_sb->s_vfs_rename_sem);*/ | 577 | /* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/ |
578 | full_path = build_path_from_dentry(direntry); | 578 | full_path = build_path_from_dentry(direntry); |
579 | /* up(&direntry->d_sb->s_vfs_rename_sem);*/ | 579 | /* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/ |
580 | if (full_path == NULL) { | 580 | if (full_path == NULL) { |
581 | FreeXid(xid); | 581 | FreeXid(xid); |
582 | return -ENOMEM; | 582 | return -ENOMEM; |
@@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
718 | cifs_sb = CIFS_SB(inode->i_sb); | 718 | cifs_sb = CIFS_SB(inode->i_sb); |
719 | pTcon = cifs_sb->tcon; | 719 | pTcon = cifs_sb->tcon; |
720 | 720 | ||
721 | down(&inode->i_sb->s_vfs_rename_sem); | 721 | mutex_lock(&inode->i_sb->s_vfs_rename_mutex); |
722 | full_path = build_path_from_dentry(direntry); | 722 | full_path = build_path_from_dentry(direntry); |
723 | up(&inode->i_sb->s_vfs_rename_sem); | 723 | mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); |
724 | if (full_path == NULL) { | 724 | if (full_path == NULL) { |
725 | FreeXid(xid); | 725 | FreeXid(xid); |
726 | return -ENOMEM; | 726 | return -ENOMEM; |
@@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
803 | cifs_sb = CIFS_SB(inode->i_sb); | 803 | cifs_sb = CIFS_SB(inode->i_sb); |
804 | pTcon = cifs_sb->tcon; | 804 | pTcon = cifs_sb->tcon; |
805 | 805 | ||
806 | down(&inode->i_sb->s_vfs_rename_sem); | 806 | mutex_lock(&inode->i_sb->s_vfs_rename_mutex); |
807 | full_path = build_path_from_dentry(direntry); | 807 | full_path = build_path_from_dentry(direntry); |
808 | up(&inode->i_sb->s_vfs_rename_sem); | 808 | mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); |
809 | if (full_path == NULL) { | 809 | if (full_path == NULL) { |
810 | FreeXid(xid); | 810 | FreeXid(xid); |
811 | return -ENOMEM; | 811 | return -ENOMEM; |
@@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1137 | rc = 0; | 1137 | rc = 0; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | down(&direntry->d_sb->s_vfs_rename_sem); | 1140 | mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); |
1141 | full_path = build_path_from_dentry(direntry); | 1141 | full_path = build_path_from_dentry(direntry); |
1142 | up(&direntry->d_sb->s_vfs_rename_sem); | 1142 | mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); |
1143 | if (full_path == NULL) { | 1143 | if (full_path == NULL) { |
1144 | FreeXid(xid); | 1144 | FreeXid(xid); |
1145 | return -ENOMEM; | 1145 | return -ENOMEM; |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 0f99aae33162..8d0da7c87c7b 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, | |||
48 | /* No need to check for cross device links since server will do that | 48 | /* No need to check for cross device links since server will do that |
49 | BB note DFS case in future though (when we may have to check) */ | 49 | BB note DFS case in future though (when we may have to check) */ |
50 | 50 | ||
51 | down(&inode->i_sb->s_vfs_rename_sem); | 51 | mutex_lock(&inode->i_sb->s_vfs_rename_mutex); |
52 | fromName = build_path_from_dentry(old_file); | 52 | fromName = build_path_from_dentry(old_file); |
53 | toName = build_path_from_dentry(direntry); | 53 | toName = build_path_from_dentry(direntry); |
54 | up(&inode->i_sb->s_vfs_rename_sem); | 54 | mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); |
55 | if((fromName == NULL) || (toName == NULL)) { | 55 | if((fromName == NULL) || (toName == NULL)) { |
56 | rc = -ENOMEM; | 56 | rc = -ENOMEM; |
57 | goto cifs_hl_exit; | 57 | goto cifs_hl_exit; |
@@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) | |||
103 | 103 | ||
104 | xid = GetXid(); | 104 | xid = GetXid(); |
105 | 105 | ||
106 | down(&direntry->d_sb->s_vfs_rename_sem); | 106 | mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); |
107 | full_path = build_path_from_dentry(direntry); | 107 | full_path = build_path_from_dentry(direntry); |
108 | up(&direntry->d_sb->s_vfs_rename_sem); | 108 | mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); |
109 | 109 | ||
110 | if (!full_path) | 110 | if (!full_path) |
111 | goto out_no_free; | 111 | goto out_no_free; |
@@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
164 | cifs_sb = CIFS_SB(inode->i_sb); | 164 | cifs_sb = CIFS_SB(inode->i_sb); |
165 | pTcon = cifs_sb->tcon; | 165 | pTcon = cifs_sb->tcon; |
166 | 166 | ||
167 | down(&inode->i_sb->s_vfs_rename_sem); | 167 | mutex_lock(&inode->i_sb->s_vfs_rename_mutex); |
168 | full_path = build_path_from_dentry(direntry); | 168 | full_path = build_path_from_dentry(direntry); |
169 | up(&inode->i_sb->s_vfs_rename_sem); | 169 | mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); |
170 | 170 | ||
171 | if(full_path == NULL) { | 171 | if(full_path == NULL) { |
172 | FreeXid(xid); | 172 | FreeXid(xid); |
@@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) | |||
232 | 232 | ||
233 | /* BB would it be safe against deadlock to grab this sem | 233 | /* BB would it be safe against deadlock to grab this sem |
234 | even though rename itself grabs the sem and calls lookup? */ | 234 | even though rename itself grabs the sem and calls lookup? */ |
235 | /* down(&inode->i_sb->s_vfs_rename_sem);*/ | 235 | /* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/ |
236 | full_path = build_path_from_dentry(direntry); | 236 | full_path = build_path_from_dentry(direntry); |
237 | /* up(&inode->i_sb->s_vfs_rename_sem);*/ | 237 | /* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/ |
238 | 238 | ||
239 | if(full_path == NULL) { | 239 | if(full_path == NULL) { |
240 | FreeXid(xid); | 240 | FreeXid(xid); |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 288cc048d37f..edb3b6eb34bc 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file) | |||
404 | if(pTcon == NULL) | 404 | if(pTcon == NULL) |
405 | return -EINVAL; | 405 | return -EINVAL; |
406 | 406 | ||
407 | down(&file->f_dentry->d_sb->s_vfs_rename_sem); | 407 | mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); |
408 | full_path = build_path_from_dentry(file->f_dentry); | 408 | full_path = build_path_from_dentry(file->f_dentry); |
409 | up(&file->f_dentry->d_sb->s_vfs_rename_sem); | 409 | mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); |
410 | 410 | ||
411 | if(full_path == NULL) { | 411 | if(full_path == NULL) { |
412 | return -ENOMEM; | 412 | return -ENOMEM; |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 777e3363c2a4..3938444d87b2 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name) | |||
62 | cifs_sb = CIFS_SB(sb); | 62 | cifs_sb = CIFS_SB(sb); |
63 | pTcon = cifs_sb->tcon; | 63 | pTcon = cifs_sb->tcon; |
64 | 64 | ||
65 | down(&sb->s_vfs_rename_sem); | 65 | mutex_lock(&sb->s_vfs_rename_mutex); |
66 | full_path = build_path_from_dentry(direntry); | 66 | full_path = build_path_from_dentry(direntry); |
67 | up(&sb->s_vfs_rename_sem); | 67 | mutex_unlock(&sb->s_vfs_rename_mutex); |
68 | if(full_path == NULL) { | 68 | if(full_path == NULL) { |
69 | FreeXid(xid); | 69 | FreeXid(xid); |
70 | return -ENOMEM; | 70 | return -ENOMEM; |
@@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, | |||
116 | cifs_sb = CIFS_SB(sb); | 116 | cifs_sb = CIFS_SB(sb); |
117 | pTcon = cifs_sb->tcon; | 117 | pTcon = cifs_sb->tcon; |
118 | 118 | ||
119 | down(&sb->s_vfs_rename_sem); | 119 | mutex_lock(&sb->s_vfs_rename_mutex); |
120 | full_path = build_path_from_dentry(direntry); | 120 | full_path = build_path_from_dentry(direntry); |
121 | up(&sb->s_vfs_rename_sem); | 121 | mutex_unlock(&sb->s_vfs_rename_mutex); |
122 | if(full_path == NULL) { | 122 | if(full_path == NULL) { |
123 | FreeXid(xid); | 123 | FreeXid(xid); |
124 | return -ENOMEM; | 124 | return -ENOMEM; |
@@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name, | |||
223 | cifs_sb = CIFS_SB(sb); | 223 | cifs_sb = CIFS_SB(sb); |
224 | pTcon = cifs_sb->tcon; | 224 | pTcon = cifs_sb->tcon; |
225 | 225 | ||
226 | down(&sb->s_vfs_rename_sem); | 226 | mutex_lock(&sb->s_vfs_rename_mutex); |
227 | full_path = build_path_from_dentry(direntry); | 227 | full_path = build_path_from_dentry(direntry); |
228 | up(&sb->s_vfs_rename_sem); | 228 | mutex_unlock(&sb->s_vfs_rename_mutex); |
229 | if(full_path == NULL) { | 229 | if(full_path == NULL) { |
230 | FreeXid(xid); | 230 | FreeXid(xid); |
231 | return -ENOMEM; | 231 | return -ENOMEM; |
@@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) | |||
341 | cifs_sb = CIFS_SB(sb); | 341 | cifs_sb = CIFS_SB(sb); |
342 | pTcon = cifs_sb->tcon; | 342 | pTcon = cifs_sb->tcon; |
343 | 343 | ||
344 | down(&sb->s_vfs_rename_sem); | 344 | mutex_lock(&sb->s_vfs_rename_mutex); |
345 | full_path = build_path_from_dentry(direntry); | 345 | full_path = build_path_from_dentry(direntry); |
346 | up(&sb->s_vfs_rename_sem); | 346 | mutex_unlock(&sb->s_vfs_rename_mutex); |
347 | if(full_path == NULL) { | 347 | if(full_path == NULL) { |
348 | FreeXid(xid); | 348 | FreeXid(xid); |
349 | return -ENOMEM; | 349 | return -ENOMEM; |
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index bfb8a230bac9..14c5620b5cab 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mount.h> | 18 | #include <linux/mount.h> |
19 | #include <linux/tty.h> | 19 | #include <linux/tty.h> |
20 | #include <linux/devpts_fs.h> | 20 | #include <linux/devpts_fs.h> |
21 | #include <linux/parser.h> | ||
21 | 22 | ||
22 | #define DEVPTS_SUPER_MAGIC 0x1cd1 | 23 | #define DEVPTS_SUPER_MAGIC 0x1cd1 |
23 | 24 | ||
@@ -32,39 +33,60 @@ static struct { | |||
32 | umode_t mode; | 33 | umode_t mode; |
33 | } config = {.mode = 0600}; | 34 | } config = {.mode = 0600}; |
34 | 35 | ||
36 | enum { | ||
37 | Opt_uid, Opt_gid, Opt_mode, | ||
38 | Opt_err | ||
39 | }; | ||
40 | |||
41 | static match_table_t tokens = { | ||
42 | {Opt_uid, "uid=%u"}, | ||
43 | {Opt_gid, "gid=%u"}, | ||
44 | {Opt_mode, "mode=%o"}, | ||
45 | {Opt_err, NULL} | ||
46 | }; | ||
47 | |||
35 | static int devpts_remount(struct super_block *sb, int *flags, char *data) | 48 | static int devpts_remount(struct super_block *sb, int *flags, char *data) |
36 | { | 49 | { |
37 | int setuid = 0; | 50 | char *p; |
38 | int setgid = 0; | 51 | |
39 | uid_t uid = 0; | 52 | config.setuid = 0; |
40 | gid_t gid = 0; | 53 | config.setgid = 0; |
41 | umode_t mode = 0600; | 54 | config.uid = 0; |
42 | char *this_char; | 55 | config.gid = 0; |
43 | 56 | config.mode = 0600; | |
44 | this_char = NULL; | 57 | |
45 | while ((this_char = strsep(&data, ",")) != NULL) { | 58 | while ((p = strsep(&data, ",")) != NULL) { |
46 | int n; | 59 | substring_t args[MAX_OPT_ARGS]; |
47 | char dummy; | 60 | int token; |
48 | if (!*this_char) | 61 | int option; |
62 | |||
63 | if (!*p) | ||
49 | continue; | 64 | continue; |
50 | if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { | 65 | |
51 | setuid = 1; | 66 | token = match_token(p, tokens, args); |
52 | uid = n; | 67 | switch (token) { |
53 | } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { | 68 | case Opt_uid: |
54 | setgid = 1; | 69 | if (match_int(&args[0], &option)) |
55 | gid = n; | 70 | return -EINVAL; |
56 | } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) | 71 | config.uid = option; |
57 | mode = n & ~S_IFMT; | 72 | config.setuid = 1; |
58 | else { | 73 | break; |
59 | printk("devpts: called with bogus options\n"); | 74 | case Opt_gid: |
75 | if (match_int(&args[0], &option)) | ||
76 | return -EINVAL; | ||
77 | config.gid = option; | ||
78 | config.setgid = 1; | ||
79 | break; | ||
80 | case Opt_mode: | ||
81 | if (match_octal(&args[0], &option)) | ||
82 | return -EINVAL; | ||
83 | config.mode = option & ~S_IFMT; | ||
84 | break; | ||
85 | default: | ||
86 | printk(KERN_ERR "devpts: called with bogus options\n"); | ||
60 | return -EINVAL; | 87 | return -EINVAL; |
61 | } | 88 | } |
62 | } | 89 | } |
63 | config.setuid = setuid; | ||
64 | config.setgid = setgid; | ||
65 | config.uid = uid; | ||
66 | config.gid = gid; | ||
67 | config.mode = mode; | ||
68 | 90 | ||
69 | return 0; | 91 | return 0; |
70 | } | 92 | } |
diff --git a/fs/dquot.c b/fs/dquot.c index 1966c890b48d..acf07e581f8c 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -103,12 +103,12 @@ | |||
103 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | 103 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that |
104 | * for altering the flag i_mutex is also needed). If operation is holding | 104 | * for altering the flag i_mutex is also needed). If operation is holding |
105 | * reference to dquot in other way (e.g. quotactl ops) it must be guarded by | 105 | * reference to dquot in other way (e.g. quotactl ops) it must be guarded by |
106 | * dqonoff_sem. | 106 | * dqonoff_mutex. |
107 | * This locking assures that: | 107 | * This locking assures that: |
108 | * a) update/access to dquot pointers in inode is serialized | 108 | * a) update/access to dquot pointers in inode is serialized |
109 | * b) everyone is guarded against invalidate_dquots() | 109 | * b) everyone is guarded against invalidate_dquots() |
110 | * | 110 | * |
111 | * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced | 111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced |
112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | 112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). |
113 | * Currently dquot is locked only when it is being read to memory (or space for | 113 | * Currently dquot is locked only when it is being read to memory (or space for |
114 | * it is being allocated) on the first dqget() and when it is being released on | 114 | * it is being allocated) on the first dqget() and when it is being released on |
@@ -118,9 +118,9 @@ | |||
118 | * spinlock to internal buffers before writing. | 118 | * spinlock to internal buffers before writing. |
119 | * | 119 | * |
120 | * Lock ordering (including related VFS locks) is the following: | 120 | * Lock ordering (including related VFS locks) is the following: |
121 | * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > | 121 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > |
122 | * > dquot->dq_lock > dqio_sem | 122 | * dqio_mutex |
123 | * i_mutex on quota files is special (it's below dqio_sem) | 123 | * i_mutex on quota files is special (it's below dqio_mutex) |
124 | */ | 124 | */ |
125 | 125 | ||
126 | static DEFINE_SPINLOCK(dq_list_lock); | 126 | static DEFINE_SPINLOCK(dq_list_lock); |
@@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot) | |||
281 | 281 | ||
282 | static void wait_on_dquot(struct dquot *dquot) | 282 | static void wait_on_dquot(struct dquot *dquot) |
283 | { | 283 | { |
284 | down(&dquot->dq_lock); | 284 | mutex_lock(&dquot->dq_lock); |
285 | up(&dquot->dq_lock); | 285 | mutex_unlock(&dquot->dq_lock); |
286 | } | 286 | } |
287 | 287 | ||
288 | #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) | 288 | #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) |
@@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot) | |||
321 | int ret = 0, ret2 = 0; | 321 | int ret = 0, ret2 = 0; |
322 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 322 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
323 | 323 | ||
324 | down(&dquot->dq_lock); | 324 | mutex_lock(&dquot->dq_lock); |
325 | down(&dqopt->dqio_sem); | 325 | mutex_lock(&dqopt->dqio_mutex); |
326 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) | 326 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) |
327 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); | 327 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); |
328 | if (ret < 0) | 328 | if (ret < 0) |
@@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot) | |||
343 | } | 343 | } |
344 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 344 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
345 | out_iolock: | 345 | out_iolock: |
346 | up(&dqopt->dqio_sem); | 346 | mutex_unlock(&dqopt->dqio_mutex); |
347 | up(&dquot->dq_lock); | 347 | mutex_unlock(&dquot->dq_lock); |
348 | return ret; | 348 | return ret; |
349 | } | 349 | } |
350 | 350 | ||
@@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot) | |||
356 | int ret = 0, ret2 = 0; | 356 | int ret = 0, ret2 = 0; |
357 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 357 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
358 | 358 | ||
359 | down(&dqopt->dqio_sem); | 359 | mutex_lock(&dqopt->dqio_mutex); |
360 | spin_lock(&dq_list_lock); | 360 | spin_lock(&dq_list_lock); |
361 | if (!clear_dquot_dirty(dquot)) { | 361 | if (!clear_dquot_dirty(dquot)) { |
362 | spin_unlock(&dq_list_lock); | 362 | spin_unlock(&dq_list_lock); |
@@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot) | |||
373 | ret = ret2; | 373 | ret = ret2; |
374 | } | 374 | } |
375 | out_sem: | 375 | out_sem: |
376 | up(&dqopt->dqio_sem); | 376 | mutex_unlock(&dqopt->dqio_mutex); |
377 | return ret; | 377 | return ret; |
378 | } | 378 | } |
379 | 379 | ||
@@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot) | |||
385 | int ret = 0, ret2 = 0; | 385 | int ret = 0, ret2 = 0; |
386 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 386 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
387 | 387 | ||
388 | down(&dquot->dq_lock); | 388 | mutex_lock(&dquot->dq_lock); |
389 | /* Check whether we are not racing with some other dqget() */ | 389 | /* Check whether we are not racing with some other dqget() */ |
390 | if (atomic_read(&dquot->dq_count) > 1) | 390 | if (atomic_read(&dquot->dq_count) > 1) |
391 | goto out_dqlock; | 391 | goto out_dqlock; |
392 | down(&dqopt->dqio_sem); | 392 | mutex_lock(&dqopt->dqio_mutex); |
393 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { | 393 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { |
394 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); | 394 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); |
395 | /* Write the info */ | 395 | /* Write the info */ |
@@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot) | |||
399 | ret = ret2; | 399 | ret = ret2; |
400 | } | 400 | } |
401 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 401 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
402 | up(&dqopt->dqio_sem); | 402 | mutex_unlock(&dqopt->dqio_mutex); |
403 | out_dqlock: | 403 | out_dqlock: |
404 | up(&dquot->dq_lock); | 404 | mutex_unlock(&dquot->dq_lock); |
405 | return ret; | 405 | return ret; |
406 | } | 406 | } |
407 | 407 | ||
408 | /* Invalidate all dquots on the list. Note that this function is called after | 408 | /* Invalidate all dquots on the list. Note that this function is called after |
409 | * quota is disabled and pointers from inodes removed so there cannot be new | 409 | * quota is disabled and pointers from inodes removed so there cannot be new |
410 | * quota users. Also because we hold dqonoff_sem there can be no quota users | 410 | * quota users. There can still be some users of quotas due to inodes being |
411 | * for this sb+type at all. */ | 411 | * just deleted or pruned by prune_icache() (those are not attached to any |
412 | * list). We have to wait for such users. | ||
413 | */ | ||
412 | static void invalidate_dquots(struct super_block *sb, int type) | 414 | static void invalidate_dquots(struct super_block *sb, int type) |
413 | { | 415 | { |
414 | struct dquot *dquot, *tmp; | 416 | struct dquot *dquot, *tmp; |
415 | 417 | ||
418 | restart: | ||
416 | spin_lock(&dq_list_lock); | 419 | spin_lock(&dq_list_lock); |
417 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { | 420 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { |
418 | if (dquot->dq_sb != sb) | 421 | if (dquot->dq_sb != sb) |
419 | continue; | 422 | continue; |
420 | if (dquot->dq_type != type) | 423 | if (dquot->dq_type != type) |
421 | continue; | 424 | continue; |
422 | #ifdef __DQUOT_PARANOIA | 425 | /* Wait for dquot users */ |
423 | if (atomic_read(&dquot->dq_count)) | 426 | if (atomic_read(&dquot->dq_count)) { |
424 | BUG(); | 427 | DEFINE_WAIT(wait); |
425 | #endif | 428 | |
426 | /* Quota now has no users and it has been written on last dqput() */ | 429 | atomic_inc(&dquot->dq_count); |
430 | prepare_to_wait(&dquot->dq_wait_unused, &wait, | ||
431 | TASK_UNINTERRUPTIBLE); | ||
432 | spin_unlock(&dq_list_lock); | ||
433 | /* Once dqput() wakes us up, we know it's time to free | ||
434 | * the dquot. | ||
435 | * IMPORTANT: we rely on the fact that there is always | ||
436 | * at most one process waiting for dquot to free. | ||
437 | * Otherwise dq_count would be > 1 and we would never | ||
438 | * wake up. | ||
439 | */ | ||
440 | if (atomic_read(&dquot->dq_count) > 1) | ||
441 | schedule(); | ||
442 | finish_wait(&dquot->dq_wait_unused, &wait); | ||
443 | dqput(dquot); | ||
444 | /* At this moment dquot() need not exist (it could be | ||
445 | * reclaimed by prune_dqcache(). Hence we must | ||
446 | * restart. */ | ||
447 | goto restart; | ||
448 | } | ||
449 | /* | ||
450 | * Quota now has no users and it has been written on last | ||
451 | * dqput() | ||
452 | */ | ||
427 | remove_dquot_hash(dquot); | 453 | remove_dquot_hash(dquot); |
428 | remove_free_dquot(dquot); | 454 | remove_free_dquot(dquot); |
429 | remove_inuse(dquot); | 455 | remove_inuse(dquot); |
@@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type) | |||
439 | struct quota_info *dqopt = sb_dqopt(sb); | 465 | struct quota_info *dqopt = sb_dqopt(sb); |
440 | int cnt; | 466 | int cnt; |
441 | 467 | ||
442 | down(&dqopt->dqonoff_sem); | 468 | mutex_lock(&dqopt->dqonoff_mutex); |
443 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 469 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
444 | if (type != -1 && cnt != type) | 470 | if (type != -1 && cnt != type) |
445 | continue; | 471 | continue; |
@@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type) | |||
474 | spin_lock(&dq_list_lock); | 500 | spin_lock(&dq_list_lock); |
475 | dqstats.syncs++; | 501 | dqstats.syncs++; |
476 | spin_unlock(&dq_list_lock); | 502 | spin_unlock(&dq_list_lock); |
477 | up(&dqopt->dqonoff_sem); | 503 | mutex_unlock(&dqopt->dqonoff_mutex); |
478 | 504 | ||
479 | return 0; | 505 | return 0; |
480 | } | 506 | } |
@@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | |||
515 | /* | 541 | /* |
516 | * Put reference to dquot | 542 | * Put reference to dquot |
517 | * NOTE: If you change this function please check whether dqput_blocks() works right... | 543 | * NOTE: If you change this function please check whether dqput_blocks() works right... |
518 | * MUST be called with either dqptr_sem or dqonoff_sem held | 544 | * MUST be called with either dqptr_sem or dqonoff_mutex held |
519 | */ | 545 | */ |
520 | static void dqput(struct dquot *dquot) | 546 | static void dqput(struct dquot *dquot) |
521 | { | 547 | { |
@@ -540,6 +566,10 @@ we_slept: | |||
540 | if (atomic_read(&dquot->dq_count) > 1) { | 566 | if (atomic_read(&dquot->dq_count) > 1) { |
541 | /* We have more than one user... nothing to do */ | 567 | /* We have more than one user... nothing to do */ |
542 | atomic_dec(&dquot->dq_count); | 568 | atomic_dec(&dquot->dq_count); |
569 | /* Releasing dquot during quotaoff phase? */ | ||
570 | if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && | ||
571 | atomic_read(&dquot->dq_count) == 1) | ||
572 | wake_up(&dquot->dq_wait_unused); | ||
543 | spin_unlock(&dq_list_lock); | 573 | spin_unlock(&dq_list_lock); |
544 | return; | 574 | return; |
545 | } | 575 | } |
@@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) | |||
576 | return NODQUOT; | 606 | return NODQUOT; |
577 | 607 | ||
578 | memset((caddr_t)dquot, 0, sizeof(struct dquot)); | 608 | memset((caddr_t)dquot, 0, sizeof(struct dquot)); |
579 | sema_init(&dquot->dq_lock, 1); | 609 | mutex_init(&dquot->dq_lock); |
580 | INIT_LIST_HEAD(&dquot->dq_free); | 610 | INIT_LIST_HEAD(&dquot->dq_free); |
581 | INIT_LIST_HEAD(&dquot->dq_inuse); | 611 | INIT_LIST_HEAD(&dquot->dq_inuse); |
582 | INIT_HLIST_NODE(&dquot->dq_hash); | 612 | INIT_HLIST_NODE(&dquot->dq_hash); |
583 | INIT_LIST_HEAD(&dquot->dq_dirty); | 613 | INIT_LIST_HEAD(&dquot->dq_dirty); |
614 | init_waitqueue_head(&dquot->dq_wait_unused); | ||
584 | dquot->dq_sb = sb; | 615 | dquot->dq_sb = sb; |
585 | dquot->dq_type = type; | 616 | dquot->dq_type = type; |
586 | atomic_set(&dquot->dq_count, 1); | 617 | atomic_set(&dquot->dq_count, 1); |
@@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) | |||
590 | 621 | ||
591 | /* | 622 | /* |
592 | * Get reference to dquot | 623 | * Get reference to dquot |
593 | * MUST be called with either dqptr_sem or dqonoff_sem held | 624 | * MUST be called with either dqptr_sem or dqonoff_mutex held |
594 | */ | 625 | */ |
595 | static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | 626 | static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) |
596 | { | 627 | { |
@@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type) | |||
656 | return 0; | 687 | return 0; |
657 | } | 688 | } |
658 | 689 | ||
659 | /* This routine is guarded by dqonoff_sem semaphore */ | 690 | /* This routine is guarded by dqonoff_mutex mutex */ |
660 | static void add_dquot_ref(struct super_block *sb, int type) | 691 | static void add_dquot_ref(struct super_block *sb, int type) |
661 | { | 692 | { |
662 | struct list_head *p; | 693 | struct list_head *p; |
@@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type) | |||
732 | { | 763 | { |
733 | LIST_HEAD(tofree_head); | 764 | LIST_HEAD(tofree_head); |
734 | 765 | ||
735 | /* We need to be guarded against prune_icache to reach all the | ||
736 | * inodes - otherwise some can be on the local list of prune_icache */ | ||
737 | down(&iprune_sem); | ||
738 | down_write(&sb_dqopt(sb)->dqptr_sem); | 766 | down_write(&sb_dqopt(sb)->dqptr_sem); |
739 | remove_dquot_ref(sb, type, &tofree_head); | 767 | remove_dquot_ref(sb, type, &tofree_head); |
740 | up_write(&sb_dqopt(sb)->dqptr_sem); | 768 | up_write(&sb_dqopt(sb)->dqptr_sem); |
741 | up(&iprune_sem); | ||
742 | put_dquot_list(&tofree_head); | 769 | put_dquot_list(&tofree_head); |
743 | } | 770 | } |
744 | 771 | ||
@@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type) | |||
938 | unsigned int id = 0; | 965 | unsigned int id = 0; |
939 | int cnt, ret = 0; | 966 | int cnt, ret = 0; |
940 | 967 | ||
941 | /* First test before acquiring semaphore - solves deadlocks when we | 968 | /* First test before acquiring mutex - solves deadlocks when we |
942 | * re-enter the quota code and are already holding the semaphore */ | 969 | * re-enter the quota code and are already holding the mutex */ |
943 | if (IS_NOQUOTA(inode)) | 970 | if (IS_NOQUOTA(inode)) |
944 | return 0; | 971 | return 0; |
945 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 972 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
@@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | |||
1002 | int cnt, ret = NO_QUOTA; | 1029 | int cnt, ret = NO_QUOTA; |
1003 | char warntype[MAXQUOTAS]; | 1030 | char warntype[MAXQUOTAS]; |
1004 | 1031 | ||
1005 | /* First test before acquiring semaphore - solves deadlocks when we | 1032 | /* First test before acquiring mutex - solves deadlocks when we |
1006 | * re-enter the quota code and are already holding the semaphore */ | 1033 | * re-enter the quota code and are already holding the mutex */ |
1007 | if (IS_NOQUOTA(inode)) { | 1034 | if (IS_NOQUOTA(inode)) { |
1008 | out_add: | 1035 | out_add: |
1009 | inode_add_bytes(inode, number); | 1036 | inode_add_bytes(inode, number); |
@@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number) | |||
1051 | int cnt, ret = NO_QUOTA; | 1078 | int cnt, ret = NO_QUOTA; |
1052 | char warntype[MAXQUOTAS]; | 1079 | char warntype[MAXQUOTAS]; |
1053 | 1080 | ||
1054 | /* First test before acquiring semaphore - solves deadlocks when we | 1081 | /* First test before acquiring mutex - solves deadlocks when we |
1055 | * re-enter the quota code and are already holding the semaphore */ | 1082 | * re-enter the quota code and are already holding the mutex */ |
1056 | if (IS_NOQUOTA(inode)) | 1083 | if (IS_NOQUOTA(inode)) |
1057 | return QUOTA_OK; | 1084 | return QUOTA_OK; |
1058 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1085 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
@@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number) | |||
1095 | { | 1122 | { |
1096 | unsigned int cnt; | 1123 | unsigned int cnt; |
1097 | 1124 | ||
1098 | /* First test before acquiring semaphore - solves deadlocks when we | 1125 | /* First test before acquiring mutex - solves deadlocks when we |
1099 | * re-enter the quota code and are already holding the semaphore */ | 1126 | * re-enter the quota code and are already holding the mutex */ |
1100 | if (IS_NOQUOTA(inode)) { | 1127 | if (IS_NOQUOTA(inode)) { |
1101 | out_sub: | 1128 | out_sub: |
1102 | inode_sub_bytes(inode, number); | 1129 | inode_sub_bytes(inode, number); |
@@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number) | |||
1131 | { | 1158 | { |
1132 | unsigned int cnt; | 1159 | unsigned int cnt; |
1133 | 1160 | ||
1134 | /* First test before acquiring semaphore - solves deadlocks when we | 1161 | /* First test before acquiring mutex - solves deadlocks when we |
1135 | * re-enter the quota code and are already holding the semaphore */ | 1162 | * re-enter the quota code and are already holding the mutex */ |
1136 | if (IS_NOQUOTA(inode)) | 1163 | if (IS_NOQUOTA(inode)) |
1137 | return QUOTA_OK; | 1164 | return QUOTA_OK; |
1138 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1165 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
@@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1171 | chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; | 1198 | chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; |
1172 | char warntype[MAXQUOTAS]; | 1199 | char warntype[MAXQUOTAS]; |
1173 | 1200 | ||
1174 | /* First test before acquiring semaphore - solves deadlocks when we | 1201 | /* First test before acquiring mutex - solves deadlocks when we |
1175 | * re-enter the quota code and are already holding the semaphore */ | 1202 | * re-enter the quota code and are already holding the mutex */ |
1176 | if (IS_NOQUOTA(inode)) | 1203 | if (IS_NOQUOTA(inode)) |
1177 | return QUOTA_OK; | 1204 | return QUOTA_OK; |
1178 | /* Clear the arrays */ | 1205 | /* Clear the arrays */ |
@@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type) | |||
1266 | int ret; | 1293 | int ret; |
1267 | struct quota_info *dqopt = sb_dqopt(sb); | 1294 | struct quota_info *dqopt = sb_dqopt(sb); |
1268 | 1295 | ||
1269 | down(&dqopt->dqio_sem); | 1296 | mutex_lock(&dqopt->dqio_mutex); |
1270 | ret = dqopt->ops[type]->write_file_info(sb, type); | 1297 | ret = dqopt->ops[type]->write_file_info(sb, type); |
1271 | up(&dqopt->dqio_sem); | 1298 | mutex_unlock(&dqopt->dqio_mutex); |
1272 | return ret; | 1299 | return ret; |
1273 | } | 1300 | } |
1274 | 1301 | ||
@@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1324 | struct inode *toputinode[MAXQUOTAS]; | 1351 | struct inode *toputinode[MAXQUOTAS]; |
1325 | 1352 | ||
1326 | /* We need to serialize quota_off() for device */ | 1353 | /* We need to serialize quota_off() for device */ |
1327 | down(&dqopt->dqonoff_sem); | 1354 | mutex_lock(&dqopt->dqonoff_mutex); |
1328 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1355 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1329 | toputinode[cnt] = NULL; | 1356 | toputinode[cnt] = NULL; |
1330 | if (type != -1 && cnt != type) | 1357 | if (type != -1 && cnt != type) |
@@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1353 | dqopt->info[cnt].dqi_bgrace = 0; | 1380 | dqopt->info[cnt].dqi_bgrace = 0; |
1354 | dqopt->ops[cnt] = NULL; | 1381 | dqopt->ops[cnt] = NULL; |
1355 | } | 1382 | } |
1356 | up(&dqopt->dqonoff_sem); | 1383 | mutex_unlock(&dqopt->dqonoff_mutex); |
1357 | /* Sync the superblock so that buffers with quota data are written to | 1384 | /* Sync the superblock so that buffers with quota data are written to |
1358 | * disk (and so userspace sees correct data afterwards). */ | 1385 | * disk (and so userspace sees correct data afterwards). */ |
1359 | if (sb->s_op->sync_fs) | 1386 | if (sb->s_op->sync_fs) |
@@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1366 | * changes done by userspace on the next quotaon() */ | 1393 | * changes done by userspace on the next quotaon() */ |
1367 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1394 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
1368 | if (toputinode[cnt]) { | 1395 | if (toputinode[cnt]) { |
1369 | down(&dqopt->dqonoff_sem); | 1396 | mutex_lock(&dqopt->dqonoff_mutex); |
1370 | /* If quota was reenabled in the meantime, we have | 1397 | /* If quota was reenabled in the meantime, we have |
1371 | * nothing to do */ | 1398 | * nothing to do */ |
1372 | if (!sb_has_quota_enabled(sb, cnt)) { | 1399 | if (!sb_has_quota_enabled(sb, cnt)) { |
@@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1378 | mark_inode_dirty(toputinode[cnt]); | 1405 | mark_inode_dirty(toputinode[cnt]); |
1379 | iput(toputinode[cnt]); | 1406 | iput(toputinode[cnt]); |
1380 | } | 1407 | } |
1381 | up(&dqopt->dqonoff_sem); | 1408 | mutex_unlock(&dqopt->dqonoff_mutex); |
1382 | } | 1409 | } |
1383 | if (sb->s_bdev) | 1410 | if (sb->s_bdev) |
1384 | invalidate_bdev(sb->s_bdev, 0); | 1411 | invalidate_bdev(sb->s_bdev, 0); |
@@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) | |||
1419 | /* And now flush the block cache so that kernel sees the changes */ | 1446 | /* And now flush the block cache so that kernel sees the changes */ |
1420 | invalidate_bdev(sb->s_bdev, 0); | 1447 | invalidate_bdev(sb->s_bdev, 0); |
1421 | mutex_lock(&inode->i_mutex); | 1448 | mutex_lock(&inode->i_mutex); |
1422 | down(&dqopt->dqonoff_sem); | 1449 | mutex_lock(&dqopt->dqonoff_mutex); |
1423 | if (sb_has_quota_enabled(sb, type)) { | 1450 | if (sb_has_quota_enabled(sb, type)) { |
1424 | error = -EBUSY; | 1451 | error = -EBUSY; |
1425 | goto out_lock; | 1452 | goto out_lock; |
@@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) | |||
1444 | dqopt->ops[type] = fmt->qf_ops; | 1471 | dqopt->ops[type] = fmt->qf_ops; |
1445 | dqopt->info[type].dqi_format = fmt; | 1472 | dqopt->info[type].dqi_format = fmt; |
1446 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); | 1473 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); |
1447 | down(&dqopt->dqio_sem); | 1474 | mutex_lock(&dqopt->dqio_mutex); |
1448 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { | 1475 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { |
1449 | up(&dqopt->dqio_sem); | 1476 | mutex_unlock(&dqopt->dqio_mutex); |
1450 | goto out_file_init; | 1477 | goto out_file_init; |
1451 | } | 1478 | } |
1452 | up(&dqopt->dqio_sem); | 1479 | mutex_unlock(&dqopt->dqio_mutex); |
1453 | mutex_unlock(&inode->i_mutex); | 1480 | mutex_unlock(&inode->i_mutex); |
1454 | set_enable_flags(dqopt, type); | 1481 | set_enable_flags(dqopt, type); |
1455 | 1482 | ||
1456 | add_dquot_ref(sb, type); | 1483 | add_dquot_ref(sb, type); |
1457 | up(&dqopt->dqonoff_sem); | 1484 | mutex_unlock(&dqopt->dqonoff_mutex); |
1458 | 1485 | ||
1459 | return 0; | 1486 | return 0; |
1460 | 1487 | ||
@@ -1462,7 +1489,7 @@ out_file_init: | |||
1462 | dqopt->files[type] = NULL; | 1489 | dqopt->files[type] = NULL; |
1463 | iput(inode); | 1490 | iput(inode); |
1464 | out_lock: | 1491 | out_lock: |
1465 | up(&dqopt->dqonoff_sem); | 1492 | mutex_unlock(&dqopt->dqonoff_mutex); |
1466 | if (oldflags != -1) { | 1493 | if (oldflags != -1) { |
1467 | down_write(&dqopt->dqptr_sem); | 1494 | down_write(&dqopt->dqptr_sem); |
1468 | /* Set the flags back (in the case of accidental quotaon() | 1495 | /* Set the flags back (in the case of accidental quotaon() |
@@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
1550 | { | 1577 | { |
1551 | struct dquot *dquot; | 1578 | struct dquot *dquot; |
1552 | 1579 | ||
1553 | down(&sb_dqopt(sb)->dqonoff_sem); | 1580 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1554 | if (!(dquot = dqget(sb, id, type))) { | 1581 | if (!(dquot = dqget(sb, id, type))) { |
1555 | up(&sb_dqopt(sb)->dqonoff_sem); | 1582 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1556 | return -ESRCH; | 1583 | return -ESRCH; |
1557 | } | 1584 | } |
1558 | do_get_dqblk(dquot, di); | 1585 | do_get_dqblk(dquot, di); |
1559 | dqput(dquot); | 1586 | dqput(dquot); |
1560 | up(&sb_dqopt(sb)->dqonoff_sem); | 1587 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1561 | return 0; | 1588 | return 0; |
1562 | } | 1589 | } |
1563 | 1590 | ||
@@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
1619 | { | 1646 | { |
1620 | struct dquot *dquot; | 1647 | struct dquot *dquot; |
1621 | 1648 | ||
1622 | down(&sb_dqopt(sb)->dqonoff_sem); | 1649 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1623 | if (!(dquot = dqget(sb, id, type))) { | 1650 | if (!(dquot = dqget(sb, id, type))) { |
1624 | up(&sb_dqopt(sb)->dqonoff_sem); | 1651 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1625 | return -ESRCH; | 1652 | return -ESRCH; |
1626 | } | 1653 | } |
1627 | do_set_dqblk(dquot, di); | 1654 | do_set_dqblk(dquot, di); |
1628 | dqput(dquot); | 1655 | dqput(dquot); |
1629 | up(&sb_dqopt(sb)->dqonoff_sem); | 1656 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1630 | return 0; | 1657 | return 0; |
1631 | } | 1658 | } |
1632 | 1659 | ||
@@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1635 | { | 1662 | { |
1636 | struct mem_dqinfo *mi; | 1663 | struct mem_dqinfo *mi; |
1637 | 1664 | ||
1638 | down(&sb_dqopt(sb)->dqonoff_sem); | 1665 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1639 | if (!sb_has_quota_enabled(sb, type)) { | 1666 | if (!sb_has_quota_enabled(sb, type)) { |
1640 | up(&sb_dqopt(sb)->dqonoff_sem); | 1667 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1641 | return -ESRCH; | 1668 | return -ESRCH; |
1642 | } | 1669 | } |
1643 | mi = sb_dqopt(sb)->info + type; | 1670 | mi = sb_dqopt(sb)->info + type; |
@@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1647 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | 1674 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; |
1648 | ii->dqi_valid = IIF_ALL; | 1675 | ii->dqi_valid = IIF_ALL; |
1649 | spin_unlock(&dq_data_lock); | 1676 | spin_unlock(&dq_data_lock); |
1650 | up(&sb_dqopt(sb)->dqonoff_sem); | 1677 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1651 | return 0; | 1678 | return 0; |
1652 | } | 1679 | } |
1653 | 1680 | ||
@@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1656 | { | 1683 | { |
1657 | struct mem_dqinfo *mi; | 1684 | struct mem_dqinfo *mi; |
1658 | 1685 | ||
1659 | down(&sb_dqopt(sb)->dqonoff_sem); | 1686 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1660 | if (!sb_has_quota_enabled(sb, type)) { | 1687 | if (!sb_has_quota_enabled(sb, type)) { |
1661 | up(&sb_dqopt(sb)->dqonoff_sem); | 1688 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1662 | return -ESRCH; | 1689 | return -ESRCH; |
1663 | } | 1690 | } |
1664 | mi = sb_dqopt(sb)->info + type; | 1691 | mi = sb_dqopt(sb)->info + type; |
@@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1673 | mark_info_dirty(sb, type); | 1700 | mark_info_dirty(sb, type); |
1674 | /* Force write to disk */ | 1701 | /* Force write to disk */ |
1675 | sb->dq_op->write_info(sb, type); | 1702 | sb->dq_op->write_info(sb, type); |
1676 | up(&sb_dqopt(sb)->dqonoff_sem); | 1703 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1677 | return 0; | 1704 | return 0; |
1678 | } | 1705 | } |
1679 | 1706 | ||
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4284cd31eba6..1c2b16fda13a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/eventpoll.h> | 34 | #include <linux/eventpoll.h> |
35 | #include <linux/mount.h> | 35 | #include <linux/mount.h> |
36 | #include <linux/bitops.h> | 36 | #include <linux/bitops.h> |
37 | #include <linux/mutex.h> | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | #include <asm/system.h> | 39 | #include <asm/system.h> |
39 | #include <asm/io.h> | 40 | #include <asm/io.h> |
@@ -46,7 +47,7 @@ | |||
46 | * LOCKING: | 47 | * LOCKING: |
47 | * There are three level of locking required by epoll : | 48 | * There are three level of locking required by epoll : |
48 | * | 49 | * |
49 | * 1) epsem (semaphore) | 50 | * 1) epmutex (mutex) |
50 | * 2) ep->sem (rw_semaphore) | 51 | * 2) ep->sem (rw_semaphore) |
51 | * 3) ep->lock (rw_lock) | 52 | * 3) ep->lock (rw_lock) |
52 | * | 53 | * |
@@ -67,9 +68,9 @@ | |||
67 | * if a file has been pushed inside an epoll set and it is then | 68 | * if a file has been pushed inside an epoll set and it is then |
68 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). | 69 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). |
69 | * It is possible to drop the "ep->sem" and to use the global | 70 | * It is possible to drop the "ep->sem" and to use the global |
70 | * semaphore "epsem" (together with "ep->lock") to have it working, | 71 | * semaphore "epmutex" (together with "ep->lock") to have it working, |
71 | * but having "ep->sem" will make the interface more scalable. | 72 | * but having "ep->sem" will make the interface more scalable. |
72 | * Events that require holding "epsem" are very rare, while for | 73 | * Events that require holding "epmutex" are very rare, while for |
73 | * normal operations the epoll private "ep->sem" will guarantee | 74 | * normal operations the epoll private "ep->sem" will guarantee |
74 | * a greater scalability. | 75 | * a greater scalability. |
75 | */ | 76 | */ |
@@ -274,7 +275,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type, | |||
274 | /* | 275 | /* |
275 | * This semaphore is used to serialize ep_free() and eventpoll_release_file(). | 276 | * This semaphore is used to serialize ep_free() and eventpoll_release_file(). |
276 | */ | 277 | */ |
277 | static struct semaphore epsem; | 278 | static struct mutex epmutex; |
278 | 279 | ||
279 | /* Safe wake up implementation */ | 280 | /* Safe wake up implementation */ |
280 | static struct poll_safewake psw; | 281 | static struct poll_safewake psw; |
@@ -451,15 +452,6 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) | |||
451 | } | 452 | } |
452 | 453 | ||
453 | 454 | ||
454 | /* Used to initialize the epoll bits inside the "struct file" */ | ||
455 | void eventpoll_init_file(struct file *file) | ||
456 | { | ||
457 | |||
458 | INIT_LIST_HEAD(&file->f_ep_links); | ||
459 | spin_lock_init(&file->f_ep_lock); | ||
460 | } | ||
461 | |||
462 | |||
463 | /* | 455 | /* |
464 | * This is called from eventpoll_release() to unlink files from the eventpoll | 456 | * This is called from eventpoll_release() to unlink files from the eventpoll |
465 | * interface. We need to have this facility to cleanup correctly files that are | 457 | * interface. We need to have this facility to cleanup correctly files that are |
@@ -477,10 +469,10 @@ void eventpoll_release_file(struct file *file) | |||
477 | * cleanup path, and this means that noone is using this file anymore. | 469 | * cleanup path, and this means that noone is using this file anymore. |
478 | * The only hit might come from ep_free() but by holding the semaphore | 470 | * The only hit might come from ep_free() but by holding the semaphore |
479 | * will correctly serialize the operation. We do need to acquire | 471 | * will correctly serialize the operation. We do need to acquire |
480 | * "ep->sem" after "epsem" because ep_remove() requires it when called | 472 | * "ep->sem" after "epmutex" because ep_remove() requires it when called |
481 | * from anywhere but ep_free(). | 473 | * from anywhere but ep_free(). |
482 | */ | 474 | */ |
483 | down(&epsem); | 475 | mutex_lock(&epmutex); |
484 | 476 | ||
485 | while (!list_empty(lsthead)) { | 477 | while (!list_empty(lsthead)) { |
486 | epi = list_entry(lsthead->next, struct epitem, fllink); | 478 | epi = list_entry(lsthead->next, struct epitem, fllink); |
@@ -492,7 +484,7 @@ void eventpoll_release_file(struct file *file) | |||
492 | up_write(&ep->sem); | 484 | up_write(&ep->sem); |
493 | } | 485 | } |
494 | 486 | ||
495 | up(&epsem); | 487 | mutex_unlock(&epmutex); |
496 | } | 488 | } |
497 | 489 | ||
498 | 490 | ||
@@ -819,9 +811,9 @@ static void ep_free(struct eventpoll *ep) | |||
819 | * We do not need to hold "ep->sem" here because the epoll file | 811 | * We do not need to hold "ep->sem" here because the epoll file |
820 | * is on the way to be removed and no one has references to it | 812 | * is on the way to be removed and no one has references to it |
821 | * anymore. The only hit might come from eventpoll_release_file() but | 813 | * anymore. The only hit might come from eventpoll_release_file() but |
822 | * holding "epsem" is sufficent here. | 814 | * holding "epmutex" is sufficent here. |
823 | */ | 815 | */ |
824 | down(&epsem); | 816 | mutex_lock(&epmutex); |
825 | 817 | ||
826 | /* | 818 | /* |
827 | * Walks through the whole tree by unregistering poll callbacks. | 819 | * Walks through the whole tree by unregistering poll callbacks. |
@@ -843,7 +835,7 @@ static void ep_free(struct eventpoll *ep) | |||
843 | ep_remove(ep, epi); | 835 | ep_remove(ep, epi); |
844 | } | 836 | } |
845 | 837 | ||
846 | up(&epsem); | 838 | mutex_unlock(&epmutex); |
847 | } | 839 | } |
848 | 840 | ||
849 | 841 | ||
@@ -1615,7 +1607,7 @@ static int __init eventpoll_init(void) | |||
1615 | { | 1607 | { |
1616 | int error; | 1608 | int error; |
1617 | 1609 | ||
1618 | init_MUTEX(&epsem); | 1610 | mutex_init(&epmutex); |
1619 | 1611 | ||
1620 | /* Initialize the structure used to perform safe poll wait head wake ups */ | 1612 | /* Initialize the structure used to perform safe poll wait head wake ups */ |
1621 | ep_poll_safewake_init(&psw); | 1613 | ep_poll_safewake_init(&psw); |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ad1432a2a62e..4ca824985321 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -36,22 +36,6 @@ | |||
36 | #include "acl.h" | 36 | #include "acl.h" |
37 | #include "xip.h" | 37 | #include "xip.h" |
38 | 38 | ||
39 | /* | ||
40 | * Couple of helper functions - make the code slightly cleaner. | ||
41 | */ | ||
42 | |||
43 | static inline void ext2_inc_count(struct inode *inode) | ||
44 | { | ||
45 | inode->i_nlink++; | ||
46 | mark_inode_dirty(inode); | ||
47 | } | ||
48 | |||
49 | static inline void ext2_dec_count(struct inode *inode) | ||
50 | { | ||
51 | inode->i_nlink--; | ||
52 | mark_inode_dirty(inode); | ||
53 | } | ||
54 | |||
55 | static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) | 39 | static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) |
56 | { | 40 | { |
57 | int err = ext2_add_link(dentry, inode); | 41 | int err = ext2_add_link(dentry, inode); |
@@ -59,7 +43,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) | |||
59 | d_instantiate(dentry, inode); | 43 | d_instantiate(dentry, inode); |
60 | return 0; | 44 | return 0; |
61 | } | 45 | } |
62 | ext2_dec_count(inode); | 46 | inode_dec_link_count(inode); |
63 | iput(inode); | 47 | iput(inode); |
64 | return err; | 48 | return err; |
65 | } | 49 | } |
@@ -201,7 +185,7 @@ out: | |||
201 | return err; | 185 | return err; |
202 | 186 | ||
203 | out_fail: | 187 | out_fail: |
204 | ext2_dec_count(inode); | 188 | inode_dec_link_count(inode); |
205 | iput (inode); | 189 | iput (inode); |
206 | goto out; | 190 | goto out; |
207 | } | 191 | } |
@@ -215,7 +199,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, | |||
215 | return -EMLINK; | 199 | return -EMLINK; |
216 | 200 | ||
217 | inode->i_ctime = CURRENT_TIME_SEC; | 201 | inode->i_ctime = CURRENT_TIME_SEC; |
218 | ext2_inc_count(inode); | 202 | inode_inc_link_count(inode); |
219 | atomic_inc(&inode->i_count); | 203 | atomic_inc(&inode->i_count); |
220 | 204 | ||
221 | return ext2_add_nondir(dentry, inode); | 205 | return ext2_add_nondir(dentry, inode); |
@@ -229,7 +213,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
229 | if (dir->i_nlink >= EXT2_LINK_MAX) | 213 | if (dir->i_nlink >= EXT2_LINK_MAX) |
230 | goto out; | 214 | goto out; |
231 | 215 | ||
232 | ext2_inc_count(dir); | 216 | inode_inc_link_count(dir); |
233 | 217 | ||
234 | inode = ext2_new_inode (dir, S_IFDIR | mode); | 218 | inode = ext2_new_inode (dir, S_IFDIR | mode); |
235 | err = PTR_ERR(inode); | 219 | err = PTR_ERR(inode); |
@@ -243,7 +227,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
243 | else | 227 | else |
244 | inode->i_mapping->a_ops = &ext2_aops; | 228 | inode->i_mapping->a_ops = &ext2_aops; |
245 | 229 | ||
246 | ext2_inc_count(inode); | 230 | inode_inc_link_count(inode); |
247 | 231 | ||
248 | err = ext2_make_empty(inode, dir); | 232 | err = ext2_make_empty(inode, dir); |
249 | if (err) | 233 | if (err) |
@@ -258,11 +242,11 @@ out: | |||
258 | return err; | 242 | return err; |
259 | 243 | ||
260 | out_fail: | 244 | out_fail: |
261 | ext2_dec_count(inode); | 245 | inode_dec_link_count(inode); |
262 | ext2_dec_count(inode); | 246 | inode_dec_link_count(inode); |
263 | iput(inode); | 247 | iput(inode); |
264 | out_dir: | 248 | out_dir: |
265 | ext2_dec_count(dir); | 249 | inode_dec_link_count(dir); |
266 | goto out; | 250 | goto out; |
267 | } | 251 | } |
268 | 252 | ||
@@ -282,7 +266,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) | |||
282 | goto out; | 266 | goto out; |
283 | 267 | ||
284 | inode->i_ctime = dir->i_ctime; | 268 | inode->i_ctime = dir->i_ctime; |
285 | ext2_dec_count(inode); | 269 | inode_dec_link_count(inode); |
286 | err = 0; | 270 | err = 0; |
287 | out: | 271 | out: |
288 | return err; | 272 | return err; |
@@ -297,8 +281,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry) | |||
297 | err = ext2_unlink(dir, dentry); | 281 | err = ext2_unlink(dir, dentry); |
298 | if (!err) { | 282 | if (!err) { |
299 | inode->i_size = 0; | 283 | inode->i_size = 0; |
300 | ext2_dec_count(inode); | 284 | inode_dec_link_count(inode); |
301 | ext2_dec_count(dir); | 285 | inode_dec_link_count(dir); |
302 | } | 286 | } |
303 | } | 287 | } |
304 | return err; | 288 | return err; |
@@ -338,41 +322,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
338 | new_de = ext2_find_entry (new_dir, new_dentry, &new_page); | 322 | new_de = ext2_find_entry (new_dir, new_dentry, &new_page); |
339 | if (!new_de) | 323 | if (!new_de) |
340 | goto out_dir; | 324 | goto out_dir; |
341 | ext2_inc_count(old_inode); | 325 | inode_inc_link_count(old_inode); |
342 | ext2_set_link(new_dir, new_de, new_page, old_inode); | 326 | ext2_set_link(new_dir, new_de, new_page, old_inode); |
343 | new_inode->i_ctime = CURRENT_TIME_SEC; | 327 | new_inode->i_ctime = CURRENT_TIME_SEC; |
344 | if (dir_de) | 328 | if (dir_de) |
345 | new_inode->i_nlink--; | 329 | new_inode->i_nlink--; |
346 | ext2_dec_count(new_inode); | 330 | inode_dec_link_count(new_inode); |
347 | } else { | 331 | } else { |
348 | if (dir_de) { | 332 | if (dir_de) { |
349 | err = -EMLINK; | 333 | err = -EMLINK; |
350 | if (new_dir->i_nlink >= EXT2_LINK_MAX) | 334 | if (new_dir->i_nlink >= EXT2_LINK_MAX) |
351 | goto out_dir; | 335 | goto out_dir; |
352 | } | 336 | } |
353 | ext2_inc_count(old_inode); | 337 | inode_inc_link_count(old_inode); |
354 | err = ext2_add_link(new_dentry, old_inode); | 338 | err = ext2_add_link(new_dentry, old_inode); |
355 | if (err) { | 339 | if (err) { |
356 | ext2_dec_count(old_inode); | 340 | inode_dec_link_count(old_inode); |
357 | goto out_dir; | 341 | goto out_dir; |
358 | } | 342 | } |
359 | if (dir_de) | 343 | if (dir_de) |
360 | ext2_inc_count(new_dir); | 344 | inode_inc_link_count(new_dir); |
361 | } | 345 | } |
362 | 346 | ||
363 | /* | 347 | /* |
364 | * Like most other Unix systems, set the ctime for inodes on a | 348 | * Like most other Unix systems, set the ctime for inodes on a |
365 | * rename. | 349 | * rename. |
366 | * ext2_dec_count() will mark the inode dirty. | 350 | * inode_dec_link_count() will mark the inode dirty. |
367 | */ | 351 | */ |
368 | old_inode->i_ctime = CURRENT_TIME_SEC; | 352 | old_inode->i_ctime = CURRENT_TIME_SEC; |
369 | 353 | ||
370 | ext2_delete_entry (old_de, old_page); | 354 | ext2_delete_entry (old_de, old_page); |
371 | ext2_dec_count(old_inode); | 355 | inode_dec_link_count(old_inode); |
372 | 356 | ||
373 | if (dir_de) { | 357 | if (dir_de) { |
374 | ext2_set_link(old_inode, dir_de, dir_page, new_dir); | 358 | ext2_set_link(old_inode, dir_de, dir_page, new_dir); |
375 | ext2_dec_count(old_dir); | 359 | inode_dec_link_count(old_dir); |
376 | } | 360 | } |
377 | return 0; | 361 | return 0; |
378 | 362 | ||
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 832867aef3dc..773459164bb2 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
@@ -95,11 +95,10 @@ static int ext3_readdir(struct file * filp, | |||
95 | void * dirent, filldir_t filldir) | 95 | void * dirent, filldir_t filldir) |
96 | { | 96 | { |
97 | int error = 0; | 97 | int error = 0; |
98 | unsigned long offset, blk; | 98 | unsigned long offset; |
99 | int i, num, stored; | 99 | int i, stored; |
100 | struct buffer_head * bh, * tmp, * bha[16]; | 100 | struct ext3_dir_entry_2 *de; |
101 | struct ext3_dir_entry_2 * de; | 101 | struct super_block *sb; |
102 | struct super_block * sb; | ||
103 | int err; | 102 | int err; |
104 | struct inode *inode = filp->f_dentry->d_inode; | 103 | struct inode *inode = filp->f_dentry->d_inode; |
105 | int ret = 0; | 104 | int ret = 0; |
@@ -124,12 +123,29 @@ static int ext3_readdir(struct file * filp, | |||
124 | } | 123 | } |
125 | #endif | 124 | #endif |
126 | stored = 0; | 125 | stored = 0; |
127 | bh = NULL; | ||
128 | offset = filp->f_pos & (sb->s_blocksize - 1); | 126 | offset = filp->f_pos & (sb->s_blocksize - 1); |
129 | 127 | ||
130 | while (!error && !stored && filp->f_pos < inode->i_size) { | 128 | while (!error && !stored && filp->f_pos < inode->i_size) { |
131 | blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); | 129 | unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); |
132 | bh = ext3_bread(NULL, inode, blk, 0, &err); | 130 | struct buffer_head map_bh; |
131 | struct buffer_head *bh = NULL; | ||
132 | |||
133 | map_bh.b_state = 0; | ||
134 | err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); | ||
135 | if (!err) { | ||
136 | page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, | ||
137 | &filp->f_ra, | ||
138 | filp, | ||
139 | map_bh.b_blocknr >> | ||
140 | (PAGE_CACHE_SHIFT - inode->i_blkbits), | ||
141 | 1); | ||
142 | bh = ext3_bread(NULL, inode, blk, 0, &err); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * We ignore I/O errors on directories so users have a chance | ||
147 | * of recovering data when there's a bad sector | ||
148 | */ | ||
133 | if (!bh) { | 149 | if (!bh) { |
134 | ext3_error (sb, "ext3_readdir", | 150 | ext3_error (sb, "ext3_readdir", |
135 | "directory #%lu contains a hole at offset %lu", | 151 | "directory #%lu contains a hole at offset %lu", |
@@ -138,26 +154,6 @@ static int ext3_readdir(struct file * filp, | |||
138 | continue; | 154 | continue; |
139 | } | 155 | } |
140 | 156 | ||
141 | /* | ||
142 | * Do the readahead | ||
143 | */ | ||
144 | if (!offset) { | ||
145 | for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0; | ||
146 | i > 0; i--) { | ||
147 | tmp = ext3_getblk (NULL, inode, ++blk, 0, &err); | ||
148 | if (tmp && !buffer_uptodate(tmp) && | ||
149 | !buffer_locked(tmp)) | ||
150 | bha[num++] = tmp; | ||
151 | else | ||
152 | brelse (tmp); | ||
153 | } | ||
154 | if (num) { | ||
155 | ll_rw_block (READA, num, bha); | ||
156 | for (i = 0; i < num; i++) | ||
157 | brelse (bha[i]); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | revalidate: | 157 | revalidate: |
162 | /* If the dir block has changed since the last call to | 158 | /* If the dir block has changed since the last call to |
163 | * readdir(2), then we might be pointing to an invalid | 159 | * readdir(2), then we might be pointing to an invalid |
diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 98e78345ead9..59098ea56711 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c | |||
@@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp) | |||
37 | if ((filp->f_mode & FMODE_WRITE) && | 37 | if ((filp->f_mode & FMODE_WRITE) && |
38 | (atomic_read(&inode->i_writecount) == 1)) | 38 | (atomic_read(&inode->i_writecount) == 1)) |
39 | { | 39 | { |
40 | down(&EXT3_I(inode)->truncate_sem); | 40 | mutex_lock(&EXT3_I(inode)->truncate_mutex); |
41 | ext3_discard_reservation(inode); | 41 | ext3_discard_reservation(inode); |
42 | up(&EXT3_I(inode)->truncate_sem); | 42 | mutex_unlock(&EXT3_I(inode)->truncate_mutex); |
43 | } | 43 | } |
44 | if (is_dx(inode) && filp->private_data) | 44 | if (is_dx(inode) && filp->private_data) |
45 | ext3_htree_free_dir_info(filp->private_data); | 45 | ext3_htree_free_dir_info(filp->private_data); |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 0384e539b88f..2c361377e0a5 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -671,7 +671,7 @@ err_out: | |||
671 | * The BKL may not be held on entry here. Be sure to take it early. | 671 | * The BKL may not be held on entry here. Be sure to take it early. |
672 | */ | 672 | */ |
673 | 673 | ||
674 | static int | 674 | int |
675 | ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | 675 | ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, |
676 | struct buffer_head *bh_result, int create, int extend_disksize) | 676 | struct buffer_head *bh_result, int create, int extend_disksize) |
677 | { | 677 | { |
@@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
702 | if (!create || err == -EIO) | 702 | if (!create || err == -EIO) |
703 | goto cleanup; | 703 | goto cleanup; |
704 | 704 | ||
705 | down(&ei->truncate_sem); | 705 | mutex_lock(&ei->truncate_mutex); |
706 | 706 | ||
707 | /* | 707 | /* |
708 | * If the indirect block is missing while we are reading | 708 | * If the indirect block is missing while we are reading |
@@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
723 | } | 723 | } |
724 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); | 724 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); |
725 | if (!partial) { | 725 | if (!partial) { |
726 | up(&ei->truncate_sem); | 726 | mutex_unlock(&ei->truncate_mutex); |
727 | if (err) | 727 | if (err) |
728 | goto cleanup; | 728 | goto cleanup; |
729 | clear_buffer_new(bh_result); | 729 | clear_buffer_new(bh_result); |
@@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
759 | err = ext3_splice_branch(handle, inode, iblock, chain, | 759 | err = ext3_splice_branch(handle, inode, iblock, chain, |
760 | partial, left); | 760 | partial, left); |
761 | /* | 761 | /* |
762 | * i_disksize growing is protected by truncate_sem. Don't forget to | 762 | * i_disksize growing is protected by truncate_mutex. Don't forget to |
763 | * protect it if you're about to implement concurrent | 763 | * protect it if you're about to implement concurrent |
764 | * ext3_get_block() -bzzz | 764 | * ext3_get_block() -bzzz |
765 | */ | 765 | */ |
766 | if (!err && extend_disksize && inode->i_size > ei->i_disksize) | 766 | if (!err && extend_disksize && inode->i_size > ei->i_disksize) |
767 | ei->i_disksize = inode->i_size; | 767 | ei->i_disksize = inode->i_size; |
768 | up(&ei->truncate_sem); | 768 | mutex_unlock(&ei->truncate_mutex); |
769 | if (err) | 769 | if (err) |
770 | goto cleanup; | 770 | goto cleanup; |
771 | 771 | ||
@@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | |||
1227 | * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... | 1227 | * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... |
1228 | * | 1228 | * |
1229 | * Same applies to ext3_get_block(). We will deadlock on various things like | 1229 | * Same applies to ext3_get_block(). We will deadlock on various things like |
1230 | * lock_journal and i_truncate_sem. | 1230 | * lock_journal and i_truncate_mutex. |
1231 | * | 1231 | * |
1232 | * Setting PF_MEMALLOC here doesn't work - too many internal memory | 1232 | * Setting PF_MEMALLOC here doesn't work - too many internal memory |
1233 | * allocations fail. | 1233 | * allocations fail. |
@@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode) | |||
2161 | * From here we block out all ext3_get_block() callers who want to | 2161 | * From here we block out all ext3_get_block() callers who want to |
2162 | * modify the block allocation tree. | 2162 | * modify the block allocation tree. |
2163 | */ | 2163 | */ |
2164 | down(&ei->truncate_sem); | 2164 | mutex_lock(&ei->truncate_mutex); |
2165 | 2165 | ||
2166 | if (n == 1) { /* direct blocks */ | 2166 | if (n == 1) { /* direct blocks */ |
2167 | ext3_free_data(handle, inode, NULL, i_data+offsets[0], | 2167 | ext3_free_data(handle, inode, NULL, i_data+offsets[0], |
@@ -2228,7 +2228,7 @@ do_indirects: | |||
2228 | 2228 | ||
2229 | ext3_discard_reservation(inode); | 2229 | ext3_discard_reservation(inode); |
2230 | 2230 | ||
2231 | up(&ei->truncate_sem); | 2231 | mutex_unlock(&ei->truncate_mutex); |
2232 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 2232 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
2233 | ext3_mark_inode_dirty(handle, inode); | 2233 | ext3_mark_inode_dirty(handle, inode); |
2234 | 2234 | ||
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 556cd5510078..aaf1da17b6d4 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c | |||
@@ -182,7 +182,7 @@ flags_err: | |||
182 | * need to allocate reservation structure for this inode | 182 | * need to allocate reservation structure for this inode |
183 | * before set the window size | 183 | * before set the window size |
184 | */ | 184 | */ |
185 | down(&ei->truncate_sem); | 185 | mutex_lock(&ei->truncate_mutex); |
186 | if (!ei->i_block_alloc_info) | 186 | if (!ei->i_block_alloc_info) |
187 | ext3_init_block_alloc_info(inode); | 187 | ext3_init_block_alloc_info(inode); |
188 | 188 | ||
@@ -190,7 +190,7 @@ flags_err: | |||
190 | struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; | 190 | struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; |
191 | rsv->rsv_goal_size = rsv_window_size; | 191 | rsv->rsv_goal_size = rsv_window_size; |
192 | } | 192 | } |
193 | up(&ei->truncate_sem); | 193 | mutex_unlock(&ei->truncate_mutex); |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | case EXT3_IOC_GROUP_EXTEND: { | 196 | case EXT3_IOC_GROUP_EXTEND: { |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 56bf76586019..efe5b20d7a5a 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
472 | #ifdef CONFIG_EXT3_FS_XATTR | 472 | #ifdef CONFIG_EXT3_FS_XATTR |
473 | init_rwsem(&ei->xattr_sem); | 473 | init_rwsem(&ei->xattr_sem); |
474 | #endif | 474 | #endif |
475 | init_MUTEX(&ei->truncate_sem); | 475 | mutex_init(&ei->truncate_mutex); |
476 | inode_init_once(&ei->vfs_inode); | 476 | inode_init_once(&ei->vfs_inode); |
477 | } | 477 | } |
478 | } | 478 | } |
@@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf) | |||
2382 | * Process 1 Process 2 | 2382 | * Process 1 Process 2 |
2383 | * ext3_create() quota_sync() | 2383 | * ext3_create() quota_sync() |
2384 | * journal_start() write_dquot() | 2384 | * journal_start() write_dquot() |
2385 | * DQUOT_INIT() down(dqio_sem) | 2385 | * DQUOT_INIT() down(dqio_mutex) |
2386 | * down(dqio_sem) journal_start() | 2386 | * down(dqio_mutex) journal_start() |
2387 | * | 2387 | * |
2388 | */ | 2388 | */ |
2389 | 2389 | ||
diff --git a/fs/fat/dir.c b/fs/fat/dir.c index db0de5c621c7..4095bc149eb1 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c | |||
@@ -114,7 +114,7 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos, | |||
114 | } | 114 | } |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Convert Unicode 16 to UTF8, translated Unicode, or ASCII. | 117 | * Convert Unicode 16 to UTF-8, translated Unicode, or ASCII. |
118 | * If uni_xlate is enabled and we can't get a 1:1 conversion, use a | 118 | * If uni_xlate is enabled and we can't get a 1:1 conversion, use a |
119 | * colon as an escape character since it is normally invalid on the vfat | 119 | * colon as an escape character since it is normally invalid on the vfat |
120 | * filesystem. The following four characters are the hexadecimal digits | 120 | * filesystem. The following four characters are the hexadecimal digits |
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index a1a9e0451217..ab171ea8e869 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c | |||
@@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = { | |||
267 | 267 | ||
268 | static inline void lock_fat(struct msdos_sb_info *sbi) | 268 | static inline void lock_fat(struct msdos_sb_info *sbi) |
269 | { | 269 | { |
270 | down(&sbi->fat_lock); | 270 | mutex_lock(&sbi->fat_lock); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline void unlock_fat(struct msdos_sb_info *sbi) | 273 | static inline void unlock_fat(struct msdos_sb_info *sbi) |
274 | { | 274 | { |
275 | up(&sbi->fat_lock); | 275 | mutex_unlock(&sbi->fat_lock); |
276 | } | 276 | } |
277 | 277 | ||
278 | void fat_ent_access_init(struct super_block *sb) | 278 | void fat_ent_access_init(struct super_block *sb) |
279 | { | 279 | { |
280 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | 280 | struct msdos_sb_info *sbi = MSDOS_SB(sb); |
281 | 281 | ||
282 | init_MUTEX(&sbi->fat_lock); | 282 | mutex_init(&sbi->fat_lock); |
283 | 283 | ||
284 | switch (sbi->fat_bits) { | 284 | switch (sbi->fat_bits) { |
285 | case 32: | 285 | case 32: |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index e7f4aa7fc686..e78d7b4842cc 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -1101,7 +1101,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug, | |||
1101 | return -EINVAL; | 1101 | return -EINVAL; |
1102 | } | 1102 | } |
1103 | } | 1103 | } |
1104 | /* UTF8 doesn't provide FAT semantics */ | 1104 | /* UTF-8 doesn't provide FAT semantics */ |
1105 | if (!strcmp(opts->iocharset, "utf8")) { | 1105 | if (!strcmp(opts->iocharset, "utf8")) { |
1106 | printk(KERN_ERR "FAT: utf8 is not a recommended IO charset" | 1106 | printk(KERN_ERR "FAT: utf8 is not a recommended IO charset" |
1107 | " for FAT filesystems, filesystem will be case sensitive!\n"); | 1107 | " for FAT filesystems, filesystem will be case sensitive!\n"); |
diff --git a/fs/fcntl.c b/fs/fcntl.c index dc4a7007f4e7..03c789560fb8 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -73,8 +73,8 @@ repeat: | |||
73 | * orig_start..fdt->next_fd | 73 | * orig_start..fdt->next_fd |
74 | */ | 74 | */ |
75 | start = orig_start; | 75 | start = orig_start; |
76 | if (start < fdt->next_fd) | 76 | if (start < files->next_fd) |
77 | start = fdt->next_fd; | 77 | start = files->next_fd; |
78 | 78 | ||
79 | newfd = start; | 79 | newfd = start; |
80 | if (start < fdt->max_fdset) { | 80 | if (start < fdt->max_fdset) { |
@@ -102,9 +102,8 @@ repeat: | |||
102 | * we reacquire the fdtable pointer and use it while holding | 102 | * we reacquire the fdtable pointer and use it while holding |
103 | * the lock, no one can free it during that time. | 103 | * the lock, no one can free it during that time. |
104 | */ | 104 | */ |
105 | fdt = files_fdtable(files); | 105 | if (start <= files->next_fd) |
106 | if (start <= fdt->next_fd) | 106 | files->next_fd = newfd + 1; |
107 | fdt->next_fd = newfd + 1; | ||
108 | 107 | ||
109 | error = newfd; | 108 | error = newfd; |
110 | 109 | ||
@@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) | |||
125 | kmem_cache_free(files_cachep, fdt->free_files); | 125 | kmem_cache_free(files_cachep, fdt->free_files); |
126 | return; | 126 | return; |
127 | } | 127 | } |
128 | if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { | 128 | if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && |
129 | fdt->max_fds <= NR_OPEN_DEFAULT) { | ||
129 | /* | 130 | /* |
130 | * The fdtable was embedded | 131 | * The fdtable was embedded |
131 | */ | 132 | */ |
@@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu) | |||
155 | 156 | ||
156 | void free_fdtable(struct fdtable *fdt) | 157 | void free_fdtable(struct fdtable *fdt) |
157 | { | 158 | { |
158 | if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || | 159 | if (fdt->free_files || |
159 | fdt->max_fds > NR_OPEN_DEFAULT) | 160 | fdt->max_fdset > EMBEDDED_FD_SET_SIZE || |
161 | fdt->max_fds > NR_OPEN_DEFAULT) | ||
160 | call_rcu(&fdt->rcu, free_fdtable_rcu); | 162 | call_rcu(&fdt->rcu, free_fdtable_rcu); |
161 | } | 163 | } |
162 | 164 | ||
@@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) | |||
199 | (nfdt->max_fds - fdt->max_fds) * | 201 | (nfdt->max_fds - fdt->max_fds) * |
200 | sizeof(struct file *)); | 202 | sizeof(struct file *)); |
201 | } | 203 | } |
202 | nfdt->next_fd = fdt->next_fd; | ||
203 | } | 204 | } |
204 | 205 | ||
205 | /* | 206 | /* |
@@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num) | |||
220 | 221 | ||
221 | void free_fdset(fd_set *array, int num) | 222 | void free_fdset(fd_set *array, int num) |
222 | { | 223 | { |
223 | int size = num / 8; | 224 | if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ |
224 | |||
225 | if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */ | ||
226 | return; | 225 | return; |
227 | else if (size <= PAGE_SIZE) | 226 | else if (num <= 8 * PAGE_SIZE) |
228 | kfree(array); | 227 | kfree(array); |
229 | else | 228 | else |
230 | vfree(array); | 229 | vfree(array); |
@@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr) | |||
237 | fd_set *new_openset = NULL, *new_execset = NULL; | 236 | fd_set *new_openset = NULL, *new_execset = NULL; |
238 | struct file **new_fds; | 237 | struct file **new_fds; |
239 | 238 | ||
240 | fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); | 239 | fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); |
241 | if (!fdt) | 240 | if (!fdt) |
242 | goto out; | 241 | goto out; |
243 | memset(fdt, 0, sizeof(*fdt)); | ||
244 | 242 | ||
245 | nfds = __FD_SETSIZE; | 243 | nfds = 8 * L1_CACHE_BYTES; |
246 | /* Expand to the max in easy steps */ | 244 | /* Expand to the max in easy steps */ |
247 | do { | 245 | while (nfds <= nr) { |
248 | if (nfds < (PAGE_SIZE * 8)) | 246 | nfds = nfds * 2; |
249 | nfds = PAGE_SIZE * 8; | 247 | if (nfds > NR_OPEN) |
250 | else { | 248 | nfds = NR_OPEN; |
251 | nfds = nfds * 2; | 249 | } |
252 | if (nfds > NR_OPEN) | ||
253 | nfds = NR_OPEN; | ||
254 | } | ||
255 | } while (nfds <= nr); | ||
256 | 250 | ||
257 | new_openset = alloc_fdset(nfds); | 251 | new_openset = alloc_fdset(nfds); |
258 | new_execset = alloc_fdset(nfds); | 252 | new_execset = alloc_fdset(nfds); |
diff --git a/fs/file_table.c b/fs/file_table.c index 44fabeaa9415..bcea1998b4de 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -88,6 +88,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp, | |||
88 | */ | 88 | */ |
89 | struct file *get_empty_filp(void) | 89 | struct file *get_empty_filp(void) |
90 | { | 90 | { |
91 | struct task_struct *tsk; | ||
91 | static int old_max; | 92 | static int old_max; |
92 | struct file * f; | 93 | struct file * f; |
93 | 94 | ||
@@ -112,13 +113,14 @@ struct file *get_empty_filp(void) | |||
112 | if (security_file_alloc(f)) | 113 | if (security_file_alloc(f)) |
113 | goto fail_sec; | 114 | goto fail_sec; |
114 | 115 | ||
115 | eventpoll_init_file(f); | 116 | tsk = current; |
117 | INIT_LIST_HEAD(&f->f_u.fu_list); | ||
116 | atomic_set(&f->f_count, 1); | 118 | atomic_set(&f->f_count, 1); |
117 | f->f_uid = current->fsuid; | ||
118 | f->f_gid = current->fsgid; | ||
119 | rwlock_init(&f->f_owner.lock); | 119 | rwlock_init(&f->f_owner.lock); |
120 | f->f_uid = tsk->fsuid; | ||
121 | f->f_gid = tsk->fsgid; | ||
122 | eventpoll_init_file(f); | ||
120 | /* f->f_version: 0 */ | 123 | /* f->f_version: 0 */ |
121 | INIT_LIST_HEAD(&f->f_u.fu_list); | ||
122 | return f; | 124 | return f; |
123 | 125 | ||
124 | over: | 126 | over: |
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 6628c3b352cb..4c6473ab3b34 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h | |||
@@ -9,6 +9,7 @@ | |||
9 | //#define DBG | 9 | //#define DBG |
10 | //#define DEBUG_LOCKS | 10 | //#define DEBUG_LOCKS |
11 | 11 | ||
12 | #include <linux/mutex.h> | ||
12 | #include <linux/pagemap.h> | 13 | #include <linux/pagemap.h> |
13 | #include <linux/buffer_head.h> | 14 | #include <linux/buffer_head.h> |
14 | #include <linux/hpfs_fs.h> | 15 | #include <linux/hpfs_fs.h> |
@@ -57,8 +58,8 @@ struct hpfs_inode_info { | |||
57 | unsigned i_ea_uid : 1; /* file's uid is stored in ea */ | 58 | unsigned i_ea_uid : 1; /* file's uid is stored in ea */ |
58 | unsigned i_ea_gid : 1; /* file's gid is stored in ea */ | 59 | unsigned i_ea_gid : 1; /* file's gid is stored in ea */ |
59 | unsigned i_dirty : 1; | 60 | unsigned i_dirty : 1; |
60 | struct semaphore i_sem; | 61 | struct mutex i_mutex; |
61 | struct semaphore i_parent; | 62 | struct mutex i_parent_mutex; |
62 | loff_t **i_rddir_off; | 63 | loff_t **i_rddir_off; |
63 | struct inode vfs_inode; | 64 | struct inode vfs_inode; |
64 | }; | 65 | }; |
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index e3d17e9ea6c1..56f2c338c4d9 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c | |||
@@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i) | |||
186 | kfree(hpfs_inode->i_rddir_off); | 186 | kfree(hpfs_inode->i_rddir_off); |
187 | hpfs_inode->i_rddir_off = NULL; | 187 | hpfs_inode->i_rddir_off = NULL; |
188 | } | 188 | } |
189 | down(&hpfs_inode->i_parent); | 189 | mutex_lock(&hpfs_inode->i_parent_mutex); |
190 | if (!i->i_nlink) { | 190 | if (!i->i_nlink) { |
191 | up(&hpfs_inode->i_parent); | 191 | mutex_unlock(&hpfs_inode->i_parent_mutex); |
192 | return; | 192 | return; |
193 | } | 193 | } |
194 | parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); | 194 | parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); |
@@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i) | |||
199 | hpfs_read_inode(parent); | 199 | hpfs_read_inode(parent); |
200 | unlock_new_inode(parent); | 200 | unlock_new_inode(parent); |
201 | } | 201 | } |
202 | down(&hpfs_inode->i_sem); | 202 | mutex_lock(&hpfs_inode->i_mutex); |
203 | hpfs_write_inode_nolock(i); | 203 | hpfs_write_inode_nolock(i); |
204 | up(&hpfs_inode->i_sem); | 204 | mutex_unlock(&hpfs_inode->i_mutex); |
205 | iput(parent); | 205 | iput(parent); |
206 | } else { | 206 | } else { |
207 | mark_inode_dirty(i); | 207 | mark_inode_dirty(i); |
208 | } | 208 | } |
209 | up(&hpfs_inode->i_parent); | 209 | mutex_unlock(&hpfs_inode->i_parent_mutex); |
210 | } | 210 | } |
211 | 211 | ||
212 | void hpfs_write_inode_nolock(struct inode *i) | 212 | void hpfs_write_inode_nolock(struct inode *i) |
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 8ff8fc433fc1..a03abb12c610 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c | |||
@@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
60 | if (dee.read_only) | 60 | if (dee.read_only) |
61 | result->i_mode &= ~0222; | 61 | result->i_mode &= ~0222; |
62 | 62 | ||
63 | down(&hpfs_i(dir)->i_sem); | 63 | mutex_lock(&hpfs_i(dir)->i_mutex); |
64 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); | 64 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); |
65 | if (r == 1) | 65 | if (r == 1) |
66 | goto bail3; | 66 | goto bail3; |
@@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
101 | hpfs_write_inode_nolock(result); | 101 | hpfs_write_inode_nolock(result); |
102 | } | 102 | } |
103 | d_instantiate(dentry, result); | 103 | d_instantiate(dentry, result); |
104 | up(&hpfs_i(dir)->i_sem); | 104 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
105 | unlock_kernel(); | 105 | unlock_kernel(); |
106 | return 0; | 106 | return 0; |
107 | bail3: | 107 | bail3: |
108 | up(&hpfs_i(dir)->i_sem); | 108 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
109 | iput(result); | 109 | iput(result); |
110 | bail2: | 110 | bail2: |
111 | hpfs_brelse4(&qbh0); | 111 | hpfs_brelse4(&qbh0); |
@@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc | |||
168 | result->i_data.a_ops = &hpfs_aops; | 168 | result->i_data.a_ops = &hpfs_aops; |
169 | hpfs_i(result)->mmu_private = 0; | 169 | hpfs_i(result)->mmu_private = 0; |
170 | 170 | ||
171 | down(&hpfs_i(dir)->i_sem); | 171 | mutex_lock(&hpfs_i(dir)->i_mutex); |
172 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); | 172 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); |
173 | if (r == 1) | 173 | if (r == 1) |
174 | goto bail2; | 174 | goto bail2; |
@@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc | |||
193 | hpfs_write_inode_nolock(result); | 193 | hpfs_write_inode_nolock(result); |
194 | } | 194 | } |
195 | d_instantiate(dentry, result); | 195 | d_instantiate(dentry, result); |
196 | up(&hpfs_i(dir)->i_sem); | 196 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
197 | unlock_kernel(); | 197 | unlock_kernel(); |
198 | return 0; | 198 | return 0; |
199 | 199 | ||
200 | bail2: | 200 | bail2: |
201 | up(&hpfs_i(dir)->i_sem); | 201 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
202 | iput(result); | 202 | iput(result); |
203 | bail1: | 203 | bail1: |
204 | brelse(bh); | 204 | brelse(bh); |
@@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t | |||
254 | result->i_blocks = 1; | 254 | result->i_blocks = 1; |
255 | init_special_inode(result, mode, rdev); | 255 | init_special_inode(result, mode, rdev); |
256 | 256 | ||
257 | down(&hpfs_i(dir)->i_sem); | 257 | mutex_lock(&hpfs_i(dir)->i_mutex); |
258 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); | 258 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); |
259 | if (r == 1) | 259 | if (r == 1) |
260 | goto bail2; | 260 | goto bail2; |
@@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t | |||
271 | 271 | ||
272 | hpfs_write_inode_nolock(result); | 272 | hpfs_write_inode_nolock(result); |
273 | d_instantiate(dentry, result); | 273 | d_instantiate(dentry, result); |
274 | up(&hpfs_i(dir)->i_sem); | 274 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
275 | brelse(bh); | 275 | brelse(bh); |
276 | unlock_kernel(); | 276 | unlock_kernel(); |
277 | return 0; | 277 | return 0; |
278 | bail2: | 278 | bail2: |
279 | up(&hpfs_i(dir)->i_sem); | 279 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
280 | iput(result); | 280 | iput(result); |
281 | bail1: | 281 | bail1: |
282 | brelse(bh); | 282 | brelse(bh); |
@@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy | |||
333 | result->i_op = &page_symlink_inode_operations; | 333 | result->i_op = &page_symlink_inode_operations; |
334 | result->i_data.a_ops = &hpfs_symlink_aops; | 334 | result->i_data.a_ops = &hpfs_symlink_aops; |
335 | 335 | ||
336 | down(&hpfs_i(dir)->i_sem); | 336 | mutex_lock(&hpfs_i(dir)->i_mutex); |
337 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); | 337 | r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); |
338 | if (r == 1) | 338 | if (r == 1) |
339 | goto bail2; | 339 | goto bail2; |
@@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy | |||
352 | 352 | ||
353 | hpfs_write_inode_nolock(result); | 353 | hpfs_write_inode_nolock(result); |
354 | d_instantiate(dentry, result); | 354 | d_instantiate(dentry, result); |
355 | up(&hpfs_i(dir)->i_sem); | 355 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
356 | unlock_kernel(); | 356 | unlock_kernel(); |
357 | return 0; | 357 | return 0; |
358 | bail2: | 358 | bail2: |
359 | up(&hpfs_i(dir)->i_sem); | 359 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
360 | iput(result); | 360 | iput(result); |
361 | bail1: | 361 | bail1: |
362 | brelse(bh); | 362 | brelse(bh); |
@@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) | |||
382 | lock_kernel(); | 382 | lock_kernel(); |
383 | hpfs_adjust_length((char *)name, &len); | 383 | hpfs_adjust_length((char *)name, &len); |
384 | again: | 384 | again: |
385 | down(&hpfs_i(inode)->i_parent); | 385 | mutex_lock(&hpfs_i(inode)->i_parent_mutex); |
386 | down(&hpfs_i(dir)->i_sem); | 386 | mutex_lock(&hpfs_i(dir)->i_mutex); |
387 | err = -ENOENT; | 387 | err = -ENOENT; |
388 | de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); | 388 | de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); |
389 | if (!de) | 389 | if (!de) |
@@ -410,8 +410,8 @@ again: | |||
410 | if (rep++) | 410 | if (rep++) |
411 | break; | 411 | break; |
412 | 412 | ||
413 | up(&hpfs_i(dir)->i_sem); | 413 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
414 | up(&hpfs_i(inode)->i_parent); | 414 | mutex_unlock(&hpfs_i(inode)->i_parent_mutex); |
415 | d_drop(dentry); | 415 | d_drop(dentry); |
416 | spin_lock(&dentry->d_lock); | 416 | spin_lock(&dentry->d_lock); |
417 | if (atomic_read(&dentry->d_count) > 1 || | 417 | if (atomic_read(&dentry->d_count) > 1 || |
@@ -442,8 +442,8 @@ again: | |||
442 | out1: | 442 | out1: |
443 | hpfs_brelse4(&qbh); | 443 | hpfs_brelse4(&qbh); |
444 | out: | 444 | out: |
445 | up(&hpfs_i(dir)->i_sem); | 445 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
446 | up(&hpfs_i(inode)->i_parent); | 446 | mutex_unlock(&hpfs_i(inode)->i_parent_mutex); |
447 | unlock_kernel(); | 447 | unlock_kernel(); |
448 | return err; | 448 | return err; |
449 | } | 449 | } |
@@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
463 | 463 | ||
464 | hpfs_adjust_length((char *)name, &len); | 464 | hpfs_adjust_length((char *)name, &len); |
465 | lock_kernel(); | 465 | lock_kernel(); |
466 | down(&hpfs_i(inode)->i_parent); | 466 | mutex_lock(&hpfs_i(inode)->i_parent_mutex); |
467 | down(&hpfs_i(dir)->i_sem); | 467 | mutex_lock(&hpfs_i(dir)->i_mutex); |
468 | err = -ENOENT; | 468 | err = -ENOENT; |
469 | de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); | 469 | de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); |
470 | if (!de) | 470 | if (!de) |
@@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
502 | out1: | 502 | out1: |
503 | hpfs_brelse4(&qbh); | 503 | hpfs_brelse4(&qbh); |
504 | out: | 504 | out: |
505 | up(&hpfs_i(dir)->i_sem); | 505 | mutex_unlock(&hpfs_i(dir)->i_mutex); |
506 | up(&hpfs_i(inode)->i_parent); | 506 | mutex_unlock(&hpfs_i(inode)->i_parent_mutex); |
507 | unlock_kernel(); | 507 | unlock_kernel(); |
508 | return err; | 508 | return err; |
509 | } | 509 | } |
@@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
565 | 565 | ||
566 | lock_kernel(); | 566 | lock_kernel(); |
567 | /* order doesn't matter, due to VFS exclusion */ | 567 | /* order doesn't matter, due to VFS exclusion */ |
568 | down(&hpfs_i(i)->i_parent); | 568 | mutex_lock(&hpfs_i(i)->i_parent_mutex); |
569 | if (new_inode) | 569 | if (new_inode) |
570 | down(&hpfs_i(new_inode)->i_parent); | 570 | mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); |
571 | down(&hpfs_i(old_dir)->i_sem); | 571 | mutex_lock(&hpfs_i(old_dir)->i_mutex); |
572 | if (new_dir != old_dir) | 572 | if (new_dir != old_dir) |
573 | down(&hpfs_i(new_dir)->i_sem); | 573 | mutex_lock(&hpfs_i(new_dir)->i_mutex); |
574 | 574 | ||
575 | /* Erm? Moving over the empty non-busy directory is perfectly legal */ | 575 | /* Erm? Moving over the empty non-busy directory is perfectly legal */ |
576 | if (new_inode && S_ISDIR(new_inode->i_mode)) { | 576 | if (new_inode && S_ISDIR(new_inode->i_mode)) { |
@@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
650 | hpfs_decide_conv(i, (char *)new_name, new_len); | 650 | hpfs_decide_conv(i, (char *)new_name, new_len); |
651 | end1: | 651 | end1: |
652 | if (old_dir != new_dir) | 652 | if (old_dir != new_dir) |
653 | up(&hpfs_i(new_dir)->i_sem); | 653 | mutex_unlock(&hpfs_i(new_dir)->i_mutex); |
654 | up(&hpfs_i(old_dir)->i_sem); | 654 | mutex_unlock(&hpfs_i(old_dir)->i_mutex); |
655 | up(&hpfs_i(i)->i_parent); | 655 | mutex_unlock(&hpfs_i(i)->i_parent_mutex); |
656 | if (new_inode) | 656 | if (new_inode) |
657 | up(&hpfs_i(new_inode)->i_parent); | 657 | mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); |
658 | unlock_kernel(); | 658 | unlock_kernel(); |
659 | return err; | 659 | return err; |
660 | } | 660 | } |
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 63e88d7e2c3b..9488a794076e 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c | |||
@@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
181 | 181 | ||
182 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | 182 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == |
183 | SLAB_CTOR_CONSTRUCTOR) { | 183 | SLAB_CTOR_CONSTRUCTOR) { |
184 | init_MUTEX(&ei->i_sem); | 184 | mutex_init(&ei->i_mutex); |
185 | init_MUTEX(&ei->i_parent); | 185 | mutex_init(&ei->i_parent_mutex); |
186 | inode_init_once(&ei->vfs_inode); | 186 | inode_init_once(&ei->vfs_inode); |
187 | } | 187 | } |
188 | } | 188 | } |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index b35195289945..25fa8bba8cb5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -56,48 +56,10 @@ static void huge_pagevec_release(struct pagevec *pvec) | |||
56 | pagevec_reinit(pvec); | 56 | pagevec_reinit(pvec); |
57 | } | 57 | } |
58 | 58 | ||
59 | /* | ||
60 | * huge_pages_needed tries to determine the number of new huge pages that | ||
61 | * will be required to fully populate this VMA. This will be equal to | ||
62 | * the size of the VMA in huge pages minus the number of huge pages | ||
63 | * (covered by this VMA) that are found in the page cache. | ||
64 | * | ||
65 | * Result is in bytes to be compatible with is_hugepage_mem_enough() | ||
66 | */ | ||
67 | static unsigned long | ||
68 | huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma) | ||
69 | { | ||
70 | int i; | ||
71 | struct pagevec pvec; | ||
72 | unsigned long start = vma->vm_start; | ||
73 | unsigned long end = vma->vm_end; | ||
74 | unsigned long hugepages = (end - start) >> HPAGE_SHIFT; | ||
75 | pgoff_t next = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT); | ||
76 | pgoff_t endpg = next + hugepages; | ||
77 | |||
78 | pagevec_init(&pvec, 0); | ||
79 | while (next < endpg) { | ||
80 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) | ||
81 | break; | ||
82 | for (i = 0; i < pagevec_count(&pvec); i++) { | ||
83 | struct page *page = pvec.pages[i]; | ||
84 | if (page->index > next) | ||
85 | next = page->index; | ||
86 | if (page->index >= endpg) | ||
87 | break; | ||
88 | next++; | ||
89 | hugepages--; | ||
90 | } | ||
91 | huge_pagevec_release(&pvec); | ||
92 | } | ||
93 | return hugepages << HPAGE_SHIFT; | ||
94 | } | ||
95 | |||
96 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | 59 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
97 | { | 60 | { |
98 | struct inode *inode = file->f_dentry->d_inode; | 61 | struct inode *inode = file->f_dentry->d_inode; |
99 | struct address_space *mapping = inode->i_mapping; | 62 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
100 | unsigned long bytes; | ||
101 | loff_t len, vma_len; | 63 | loff_t len, vma_len; |
102 | int ret; | 64 | int ret; |
103 | 65 | ||
@@ -113,10 +75,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
113 | if (vma->vm_end - vma->vm_start < HPAGE_SIZE) | 75 | if (vma->vm_end - vma->vm_start < HPAGE_SIZE) |
114 | return -EINVAL; | 76 | return -EINVAL; |
115 | 77 | ||
116 | bytes = huge_pages_needed(mapping, vma); | ||
117 | if (!is_hugepage_mem_enough(bytes)) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); | 78 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
121 | 79 | ||
122 | mutex_lock(&inode->i_mutex); | 80 | mutex_lock(&inode->i_mutex); |
@@ -129,6 +87,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
129 | if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size) | 87 | if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size) |
130 | goto out; | 88 | goto out; |
131 | 89 | ||
90 | if (vma->vm_flags & VM_MAYSHARE) | ||
91 | if (hugetlb_extend_reservation(info, len >> HPAGE_SHIFT) != 0) | ||
92 | goto out; | ||
93 | |||
132 | ret = 0; | 94 | ret = 0; |
133 | hugetlb_prefault_arch_hook(vma->vm_mm); | 95 | hugetlb_prefault_arch_hook(vma->vm_mm); |
134 | if (inode->i_size < len) | 96 | if (inode->i_size < len) |
@@ -227,13 +189,18 @@ static void truncate_huge_page(struct page *page) | |||
227 | put_page(page); | 189 | put_page(page); |
228 | } | 190 | } |
229 | 191 | ||
230 | static void truncate_hugepages(struct address_space *mapping, loff_t lstart) | 192 | static void truncate_hugepages(struct inode *inode, loff_t lstart) |
231 | { | 193 | { |
194 | struct address_space *mapping = &inode->i_data; | ||
232 | const pgoff_t start = lstart >> HPAGE_SHIFT; | 195 | const pgoff_t start = lstart >> HPAGE_SHIFT; |
233 | struct pagevec pvec; | 196 | struct pagevec pvec; |
234 | pgoff_t next; | 197 | pgoff_t next; |
235 | int i; | 198 | int i; |
236 | 199 | ||
200 | hugetlb_truncate_reservation(HUGETLBFS_I(inode), | ||
201 | lstart >> HPAGE_SHIFT); | ||
202 | if (!mapping->nrpages) | ||
203 | return; | ||
237 | pagevec_init(&pvec, 0); | 204 | pagevec_init(&pvec, 0); |
238 | next = start; | 205 | next = start; |
239 | while (1) { | 206 | while (1) { |
@@ -262,8 +229,7 @@ static void truncate_hugepages(struct address_space *mapping, loff_t lstart) | |||
262 | 229 | ||
263 | static void hugetlbfs_delete_inode(struct inode *inode) | 230 | static void hugetlbfs_delete_inode(struct inode *inode) |
264 | { | 231 | { |
265 | if (inode->i_data.nrpages) | 232 | truncate_hugepages(inode, 0); |
266 | truncate_hugepages(&inode->i_data, 0); | ||
267 | clear_inode(inode); | 233 | clear_inode(inode); |
268 | } | 234 | } |
269 | 235 | ||
@@ -296,8 +262,7 @@ static void hugetlbfs_forget_inode(struct inode *inode) | |||
296 | inode->i_state |= I_FREEING; | 262 | inode->i_state |= I_FREEING; |
297 | inodes_stat.nr_inodes--; | 263 | inodes_stat.nr_inodes--; |
298 | spin_unlock(&inode_lock); | 264 | spin_unlock(&inode_lock); |
299 | if (inode->i_data.nrpages) | 265 | truncate_hugepages(inode, 0); |
300 | truncate_hugepages(&inode->i_data, 0); | ||
301 | clear_inode(inode); | 266 | clear_inode(inode); |
302 | destroy_inode(inode); | 267 | destroy_inode(inode); |
303 | } | 268 | } |
@@ -356,7 +321,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |||
356 | if (!prio_tree_empty(&mapping->i_mmap)) | 321 | if (!prio_tree_empty(&mapping->i_mmap)) |
357 | hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); | 322 | hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); |
358 | spin_unlock(&mapping->i_mmap_lock); | 323 | spin_unlock(&mapping->i_mmap_lock); |
359 | truncate_hugepages(mapping, offset); | 324 | truncate_hugepages(inode, offset); |
360 | return 0; | 325 | return 0; |
361 | } | 326 | } |
362 | 327 | ||
@@ -573,6 +538,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |||
573 | hugetlbfs_inc_free_inodes(sbinfo); | 538 | hugetlbfs_inc_free_inodes(sbinfo); |
574 | return NULL; | 539 | return NULL; |
575 | } | 540 | } |
541 | p->prereserved_hpages = 0; | ||
576 | return &p->vfs_inode; | 542 | return &p->vfs_inode; |
577 | } | 543 | } |
578 | 544 | ||
@@ -771,21 +737,6 @@ static struct file_system_type hugetlbfs_fs_type = { | |||
771 | 737 | ||
772 | static struct vfsmount *hugetlbfs_vfsmount; | 738 | static struct vfsmount *hugetlbfs_vfsmount; |
773 | 739 | ||
774 | /* | ||
775 | * Return the next identifier for a shm file | ||
776 | */ | ||
777 | static unsigned long hugetlbfs_counter(void) | ||
778 | { | ||
779 | static DEFINE_SPINLOCK(lock); | ||
780 | static unsigned long counter; | ||
781 | unsigned long ret; | ||
782 | |||
783 | spin_lock(&lock); | ||
784 | ret = ++counter; | ||
785 | spin_unlock(&lock); | ||
786 | return ret; | ||
787 | } | ||
788 | |||
789 | static int can_do_hugetlb_shm(void) | 740 | static int can_do_hugetlb_shm(void) |
790 | { | 741 | { |
791 | return likely(capable(CAP_IPC_LOCK) || | 742 | return likely(capable(CAP_IPC_LOCK) || |
@@ -801,18 +752,16 @@ struct file *hugetlb_zero_setup(size_t size) | |||
801 | struct dentry *dentry, *root; | 752 | struct dentry *dentry, *root; |
802 | struct qstr quick_string; | 753 | struct qstr quick_string; |
803 | char buf[16]; | 754 | char buf[16]; |
755 | static atomic_t counter; | ||
804 | 756 | ||
805 | if (!can_do_hugetlb_shm()) | 757 | if (!can_do_hugetlb_shm()) |
806 | return ERR_PTR(-EPERM); | 758 | return ERR_PTR(-EPERM); |
807 | 759 | ||
808 | if (!is_hugepage_mem_enough(size)) | ||
809 | return ERR_PTR(-ENOMEM); | ||
810 | |||
811 | if (!user_shm_lock(size, current->user)) | 760 | if (!user_shm_lock(size, current->user)) |
812 | return ERR_PTR(-ENOMEM); | 761 | return ERR_PTR(-ENOMEM); |
813 | 762 | ||
814 | root = hugetlbfs_vfsmount->mnt_root; | 763 | root = hugetlbfs_vfsmount->mnt_root; |
815 | snprintf(buf, 16, "%lu", hugetlbfs_counter()); | 764 | snprintf(buf, 16, "%u", atomic_inc_return(&counter)); |
816 | quick_string.name = buf; | 765 | quick_string.name = buf; |
817 | quick_string.len = strlen(quick_string.name); | 766 | quick_string.len = strlen(quick_string.name); |
818 | quick_string.hash = 0; | 767 | quick_string.hash = 0; |
@@ -831,6 +780,11 @@ struct file *hugetlb_zero_setup(size_t size) | |||
831 | if (!inode) | 780 | if (!inode) |
832 | goto out_file; | 781 | goto out_file; |
833 | 782 | ||
783 | error = -ENOMEM; | ||
784 | if (hugetlb_extend_reservation(HUGETLBFS_I(inode), | ||
785 | size >> HPAGE_SHIFT) != 0) | ||
786 | goto out_inode; | ||
787 | |||
834 | d_instantiate(dentry, inode); | 788 | d_instantiate(dentry, inode); |
835 | inode->i_size = size; | 789 | inode->i_size = size; |
836 | inode->i_nlink = 0; | 790 | inode->i_nlink = 0; |
@@ -841,6 +795,8 @@ struct file *hugetlb_zero_setup(size_t size) | |||
841 | file->f_mode = FMODE_WRITE | FMODE_READ; | 795 | file->f_mode = FMODE_WRITE | FMODE_READ; |
842 | return file; | 796 | return file; |
843 | 797 | ||
798 | out_inode: | ||
799 | iput(inode); | ||
844 | out_file: | 800 | out_file: |
845 | put_filp(file); | 801 | put_filp(file); |
846 | out_dentry: | 802 | out_dentry: |
diff --git a/fs/inode.c b/fs/inode.c index d0be6159eb7f..25967b67903d 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable; | |||
84 | DEFINE_SPINLOCK(inode_lock); | 84 | DEFINE_SPINLOCK(inode_lock); |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * iprune_sem provides exclusion between the kswapd or try_to_free_pages | 87 | * iprune_mutex provides exclusion between the kswapd or try_to_free_pages |
88 | * icache shrinking path, and the umount path. Without this exclusion, | 88 | * icache shrinking path, and the umount path. Without this exclusion, |
89 | * by the time prune_icache calls iput for the inode whose pages it has | 89 | * by the time prune_icache calls iput for the inode whose pages it has |
90 | * been invalidating, or by the time it calls clear_inode & destroy_inode | 90 | * been invalidating, or by the time it calls clear_inode & destroy_inode |
91 | * from its final dispose_list, the struct super_block they refer to | 91 | * from its final dispose_list, the struct super_block they refer to |
92 | * (for inode->i_sb->s_op) may already have been freed and reused. | 92 | * (for inode->i_sb->s_op) may already have been freed and reused. |
93 | */ | 93 | */ |
94 | DECLARE_MUTEX(iprune_sem); | 94 | DEFINE_MUTEX(iprune_mutex); |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Statistics gathering.. | 97 | * Statistics gathering.. |
@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode) | |||
206 | i_size_ordered_init(inode); | 206 | i_size_ordered_init(inode); |
207 | #ifdef CONFIG_INOTIFY | 207 | #ifdef CONFIG_INOTIFY |
208 | INIT_LIST_HEAD(&inode->inotify_watches); | 208 | INIT_LIST_HEAD(&inode->inotify_watches); |
209 | sema_init(&inode->inotify_sem, 1); | 209 | mutex_init(&inode->inotify_mutex); |
210 | #endif | 210 | #endif |
211 | } | 211 | } |
212 | 212 | ||
@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) | |||
319 | /* | 319 | /* |
320 | * We can reschedule here without worrying about the list's | 320 | * We can reschedule here without worrying about the list's |
321 | * consistency because the per-sb list of inodes must not | 321 | * consistency because the per-sb list of inodes must not |
322 | * change during umount anymore, and because iprune_sem keeps | 322 | * change during umount anymore, and because iprune_mutex keeps |
323 | * shrink_icache_memory() away. | 323 | * shrink_icache_memory() away. |
324 | */ | 324 | */ |
325 | cond_resched_lock(&inode_lock); | 325 | cond_resched_lock(&inode_lock); |
@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb) | |||
355 | int busy; | 355 | int busy; |
356 | LIST_HEAD(throw_away); | 356 | LIST_HEAD(throw_away); |
357 | 357 | ||
358 | down(&iprune_sem); | 358 | mutex_lock(&iprune_mutex); |
359 | spin_lock(&inode_lock); | 359 | spin_lock(&inode_lock); |
360 | inotify_unmount_inodes(&sb->s_inodes); | 360 | inotify_unmount_inodes(&sb->s_inodes); |
361 | busy = invalidate_list(&sb->s_inodes, &throw_away); | 361 | busy = invalidate_list(&sb->s_inodes, &throw_away); |
362 | spin_unlock(&inode_lock); | 362 | spin_unlock(&inode_lock); |
363 | 363 | ||
364 | dispose_list(&throw_away); | 364 | dispose_list(&throw_away); |
365 | up(&iprune_sem); | 365 | mutex_unlock(&iprune_mutex); |
366 | 366 | ||
367 | return busy; | 367 | return busy; |
368 | } | 368 | } |
@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev) | |||
377 | if (sb) { | 377 | if (sb) { |
378 | /* | 378 | /* |
379 | * no need to lock the super, get_super holds the | 379 | * no need to lock the super, get_super holds the |
380 | * read semaphore so the filesystem cannot go away | 380 | * read mutex so the filesystem cannot go away |
381 | * under us (->put_super runs with the write lock | 381 | * under us (->put_super runs with the write lock |
382 | * hold). | 382 | * hold). |
383 | */ | 383 | */ |
@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) | |||
423 | int nr_scanned; | 423 | int nr_scanned; |
424 | unsigned long reap = 0; | 424 | unsigned long reap = 0; |
425 | 425 | ||
426 | down(&iprune_sem); | 426 | mutex_lock(&iprune_mutex); |
427 | spin_lock(&inode_lock); | 427 | spin_lock(&inode_lock); |
428 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { | 428 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { |
429 | struct inode *inode; | 429 | struct inode *inode; |
@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) | |||
459 | spin_unlock(&inode_lock); | 459 | spin_unlock(&inode_lock); |
460 | 460 | ||
461 | dispose_list(&freeable); | 461 | dispose_list(&freeable); |
462 | up(&iprune_sem); | 462 | mutex_unlock(&iprune_mutex); |
463 | 463 | ||
464 | if (current_is_kswapd()) | 464 | if (current_is_kswapd()) |
465 | mod_page_state(kswapd_inodesteal, reap); | 465 | mod_page_state(kswapd_inodesteal, reap); |
diff --git a/fs/inotify.c b/fs/inotify.c index 3041503bde02..0ee39ef591c6 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -54,10 +54,10 @@ int inotify_max_queued_events; | |||
54 | * Lock ordering: | 54 | * Lock ordering: |
55 | * | 55 | * |
56 | * dentry->d_lock (used to keep d_move() away from dentry->d_parent) | 56 | * dentry->d_lock (used to keep d_move() away from dentry->d_parent) |
57 | * iprune_sem (synchronize shrink_icache_memory()) | 57 | * iprune_mutex (synchronize shrink_icache_memory()) |
58 | * inode_lock (protects the super_block->s_inodes list) | 58 | * inode_lock (protects the super_block->s_inodes list) |
59 | * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) | 59 | * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) |
60 | * inotify_dev->sem (protects inotify_device and watches->d_list) | 60 | * inotify_dev->mutex (protects inotify_device and watches->d_list) |
61 | */ | 61 | */ |
62 | 62 | ||
63 | /* | 63 | /* |
@@ -79,12 +79,12 @@ int inotify_max_queued_events; | |||
79 | /* | 79 | /* |
80 | * struct inotify_device - represents an inotify instance | 80 | * struct inotify_device - represents an inotify instance |
81 | * | 81 | * |
82 | * This structure is protected by the semaphore 'sem'. | 82 | * This structure is protected by the mutex 'mutex'. |
83 | */ | 83 | */ |
84 | struct inotify_device { | 84 | struct inotify_device { |
85 | wait_queue_head_t wq; /* wait queue for i/o */ | 85 | wait_queue_head_t wq; /* wait queue for i/o */ |
86 | struct idr idr; /* idr mapping wd -> watch */ | 86 | struct idr idr; /* idr mapping wd -> watch */ |
87 | struct semaphore sem; /* protects this bad boy */ | 87 | struct mutex mutex; /* protects this bad boy */ |
88 | struct list_head events; /* list of queued events */ | 88 | struct list_head events; /* list of queued events */ |
89 | struct list_head watches; /* list of watches */ | 89 | struct list_head watches; /* list of watches */ |
90 | atomic_t count; /* reference count */ | 90 | atomic_t count; /* reference count */ |
@@ -101,7 +101,7 @@ struct inotify_device { | |||
101 | * device. In read(), this list is walked and all events that can fit in the | 101 | * device. In read(), this list is walked and all events that can fit in the |
102 | * buffer are returned. | 102 | * buffer are returned. |
103 | * | 103 | * |
104 | * Protected by dev->sem of the device in which we are queued. | 104 | * Protected by dev->mutex of the device in which we are queued. |
105 | */ | 105 | */ |
106 | struct inotify_kernel_event { | 106 | struct inotify_kernel_event { |
107 | struct inotify_event event; /* the user-space event */ | 107 | struct inotify_event event; /* the user-space event */ |
@@ -112,8 +112,8 @@ struct inotify_kernel_event { | |||
112 | /* | 112 | /* |
113 | * struct inotify_watch - represents a watch request on a specific inode | 113 | * struct inotify_watch - represents a watch request on a specific inode |
114 | * | 114 | * |
115 | * d_list is protected by dev->sem of the associated watch->dev. | 115 | * d_list is protected by dev->mutex of the associated watch->dev. |
116 | * i_list and mask are protected by inode->inotify_sem of the associated inode. | 116 | * i_list and mask are protected by inode->inotify_mutex of the associated inode. |
117 | * dev, inode, and wd are never written to once the watch is created. | 117 | * dev, inode, and wd are never written to once the watch is created. |
118 | */ | 118 | */ |
119 | struct inotify_watch { | 119 | struct inotify_watch { |
@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | |||
261 | /* | 261 | /* |
262 | * inotify_dev_get_event - return the next event in the given dev's queue | 262 | * inotify_dev_get_event - return the next event in the given dev's queue |
263 | * | 263 | * |
264 | * Caller must hold dev->sem. | 264 | * Caller must hold dev->mutex. |
265 | */ | 265 | */ |
266 | static inline struct inotify_kernel_event * | 266 | static inline struct inotify_kernel_event * |
267 | inotify_dev_get_event(struct inotify_device *dev) | 267 | inotify_dev_get_event(struct inotify_device *dev) |
@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) | |||
272 | /* | 272 | /* |
273 | * inotify_dev_queue_event - add a new event to the given device | 273 | * inotify_dev_queue_event - add a new event to the given device |
274 | * | 274 | * |
275 | * Caller must hold dev->sem. Can sleep (calls kernel_event()). | 275 | * Caller must hold dev->mutex. Can sleep (calls kernel_event()). |
276 | */ | 276 | */ |
277 | static void inotify_dev_queue_event(struct inotify_device *dev, | 277 | static void inotify_dev_queue_event(struct inotify_device *dev, |
278 | struct inotify_watch *watch, u32 mask, | 278 | struct inotify_watch *watch, u32 mask, |
@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, | |||
315 | /* | 315 | /* |
316 | * remove_kevent - cleans up and ultimately frees the given kevent | 316 | * remove_kevent - cleans up and ultimately frees the given kevent |
317 | * | 317 | * |
318 | * Caller must hold dev->sem. | 318 | * Caller must hold dev->mutex. |
319 | */ | 319 | */ |
320 | static void remove_kevent(struct inotify_device *dev, | 320 | static void remove_kevent(struct inotify_device *dev, |
321 | struct inotify_kernel_event *kevent) | 321 | struct inotify_kernel_event *kevent) |
@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, | |||
332 | /* | 332 | /* |
333 | * inotify_dev_event_dequeue - destroy an event on the given device | 333 | * inotify_dev_event_dequeue - destroy an event on the given device |
334 | * | 334 | * |
335 | * Caller must hold dev->sem. | 335 | * Caller must hold dev->mutex. |
336 | */ | 336 | */ |
337 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | 337 | static void inotify_dev_event_dequeue(struct inotify_device *dev) |
338 | { | 338 | { |
@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) | |||
346 | /* | 346 | /* |
347 | * inotify_dev_get_wd - returns the next WD for use by the given dev | 347 | * inotify_dev_get_wd - returns the next WD for use by the given dev |
348 | * | 348 | * |
349 | * Callers must hold dev->sem. This function can sleep. | 349 | * Callers must hold dev->mutex. This function can sleep. |
350 | */ | 350 | */ |
351 | static int inotify_dev_get_wd(struct inotify_device *dev, | 351 | static int inotify_dev_get_wd(struct inotify_device *dev, |
352 | struct inotify_watch *watch) | 352 | struct inotify_watch *watch) |
@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, | |||
383 | /* | 383 | /* |
384 | * create_watch - creates a watch on the given device. | 384 | * create_watch - creates a watch on the given device. |
385 | * | 385 | * |
386 | * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. | 386 | * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. |
387 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. | 387 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. |
388 | */ | 388 | */ |
389 | static struct inotify_watch *create_watch(struct inotify_device *dev, | 389 | static struct inotify_watch *create_watch(struct inotify_device *dev, |
@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, | |||
434 | /* | 434 | /* |
435 | * inotify_find_dev - find the watch associated with the given inode and dev | 435 | * inotify_find_dev - find the watch associated with the given inode and dev |
436 | * | 436 | * |
437 | * Callers must hold inode->inotify_sem. | 437 | * Callers must hold inode->inotify_mutex. |
438 | */ | 438 | */ |
439 | static struct inotify_watch *inode_find_dev(struct inode *inode, | 439 | static struct inotify_watch *inode_find_dev(struct inode *inode, |
440 | struct inotify_device *dev) | 440 | struct inotify_device *dev) |
@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, | |||
469 | * the IN_IGNORED event to the given device signifying that the inode is no | 469 | * the IN_IGNORED event to the given device signifying that the inode is no |
470 | * longer watched. | 470 | * longer watched. |
471 | * | 471 | * |
472 | * Callers must hold both inode->inotify_sem and dev->sem. We drop a | 472 | * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a |
473 | * reference to the inode before returning. | 473 | * reference to the inode before returning. |
474 | * | 474 | * |
475 | * The inode is not iput() so as to remain atomic. If the inode needs to be | 475 | * The inode is not iput() so as to remain atomic. If the inode needs to be |
@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | |||
507 | if (!inotify_inode_watched(inode)) | 507 | if (!inotify_inode_watched(inode)) |
508 | return; | 508 | return; |
509 | 509 | ||
510 | down(&inode->inotify_sem); | 510 | mutex_lock(&inode->inotify_mutex); |
511 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 511 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
512 | u32 watch_mask = watch->mask; | 512 | u32 watch_mask = watch->mask; |
513 | if (watch_mask & mask) { | 513 | if (watch_mask & mask) { |
514 | struct inotify_device *dev = watch->dev; | 514 | struct inotify_device *dev = watch->dev; |
515 | get_inotify_watch(watch); | 515 | get_inotify_watch(watch); |
516 | down(&dev->sem); | 516 | mutex_lock(&dev->mutex); |
517 | inotify_dev_queue_event(dev, watch, mask, cookie, name); | 517 | inotify_dev_queue_event(dev, watch, mask, cookie, name); |
518 | if (watch_mask & IN_ONESHOT) | 518 | if (watch_mask & IN_ONESHOT) |
519 | remove_watch_no_event(watch, dev); | 519 | remove_watch_no_event(watch, dev); |
520 | up(&dev->sem); | 520 | mutex_unlock(&dev->mutex); |
521 | put_inotify_watch(watch); | 521 | put_inotify_watch(watch); |
522 | } | 522 | } |
523 | } | 523 | } |
524 | up(&inode->inotify_sem); | 524 | mutex_unlock(&inode->inotify_mutex); |
525 | } | 525 | } |
526 | EXPORT_SYMBOL_GPL(inotify_inode_queue_event); | 526 | EXPORT_SYMBOL_GPL(inotify_inode_queue_event); |
527 | 527 | ||
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); | |||
569 | * @list: list of inodes being unmounted (sb->s_inodes) | 569 | * @list: list of inodes being unmounted (sb->s_inodes) |
570 | * | 570 | * |
571 | * Called with inode_lock held, protecting the unmounting super block's list | 571 | * Called with inode_lock held, protecting the unmounting super block's list |
572 | * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. | 572 | * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. |
573 | * We temporarily drop inode_lock, however, and CAN block. | 573 | * We temporarily drop inode_lock, however, and CAN block. |
574 | */ | 574 | */ |
575 | void inotify_unmount_inodes(struct list_head *list) | 575 | void inotify_unmount_inodes(struct list_head *list) |
@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) | |||
618 | * We can safely drop inode_lock here because we hold | 618 | * We can safely drop inode_lock here because we hold |
619 | * references on both inode and next_i. Also no new inodes | 619 | * references on both inode and next_i. Also no new inodes |
620 | * will be added since the umount has begun. Finally, | 620 | * will be added since the umount has begun. Finally, |
621 | * iprune_sem keeps shrink_icache_memory() away. | 621 | * iprune_mutex keeps shrink_icache_memory() away. |
622 | */ | 622 | */ |
623 | spin_unlock(&inode_lock); | 623 | spin_unlock(&inode_lock); |
624 | 624 | ||
@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) | |||
626 | iput(need_iput_tmp); | 626 | iput(need_iput_tmp); |
627 | 627 | ||
628 | /* for each watch, send IN_UNMOUNT and then remove it */ | 628 | /* for each watch, send IN_UNMOUNT and then remove it */ |
629 | down(&inode->inotify_sem); | 629 | mutex_lock(&inode->inotify_mutex); |
630 | watches = &inode->inotify_watches; | 630 | watches = &inode->inotify_watches; |
631 | list_for_each_entry_safe(watch, next_w, watches, i_list) { | 631 | list_for_each_entry_safe(watch, next_w, watches, i_list) { |
632 | struct inotify_device *dev = watch->dev; | 632 | struct inotify_device *dev = watch->dev; |
633 | down(&dev->sem); | 633 | mutex_lock(&dev->mutex); |
634 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); | 634 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); |
635 | remove_watch(watch, dev); | 635 | remove_watch(watch, dev); |
636 | up(&dev->sem); | 636 | mutex_unlock(&dev->mutex); |
637 | } | 637 | } |
638 | up(&inode->inotify_sem); | 638 | mutex_unlock(&inode->inotify_mutex); |
639 | iput(inode); | 639 | iput(inode); |
640 | 640 | ||
641 | spin_lock(&inode_lock); | 641 | spin_lock(&inode_lock); |
@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) | |||
651 | { | 651 | { |
652 | struct inotify_watch *watch, *next; | 652 | struct inotify_watch *watch, *next; |
653 | 653 | ||
654 | down(&inode->inotify_sem); | 654 | mutex_lock(&inode->inotify_mutex); |
655 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 655 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
656 | struct inotify_device *dev = watch->dev; | 656 | struct inotify_device *dev = watch->dev; |
657 | down(&dev->sem); | 657 | mutex_lock(&dev->mutex); |
658 | remove_watch(watch, dev); | 658 | remove_watch(watch, dev); |
659 | up(&dev->sem); | 659 | mutex_unlock(&dev->mutex); |
660 | } | 660 | } |
661 | up(&inode->inotify_sem); | 661 | mutex_unlock(&inode->inotify_mutex); |
662 | } | 662 | } |
663 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); | 663 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); |
664 | 664 | ||
@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) | |||
670 | int ret = 0; | 670 | int ret = 0; |
671 | 671 | ||
672 | poll_wait(file, &dev->wq, wait); | 672 | poll_wait(file, &dev->wq, wait); |
673 | down(&dev->sem); | 673 | mutex_lock(&dev->mutex); |
674 | if (!list_empty(&dev->events)) | 674 | if (!list_empty(&dev->events)) |
675 | ret = POLLIN | POLLRDNORM; | 675 | ret = POLLIN | POLLRDNORM; |
676 | up(&dev->sem); | 676 | mutex_unlock(&dev->mutex); |
677 | 677 | ||
678 | return ret; | 678 | return ret; |
679 | } | 679 | } |
@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
695 | 695 | ||
696 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | 696 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); |
697 | 697 | ||
698 | down(&dev->sem); | 698 | mutex_lock(&dev->mutex); |
699 | events = !list_empty(&dev->events); | 699 | events = !list_empty(&dev->events); |
700 | up(&dev->sem); | 700 | mutex_unlock(&dev->mutex); |
701 | if (events) { | 701 | if (events) { |
702 | ret = 0; | 702 | ret = 0; |
703 | break; | 703 | break; |
@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
720 | if (ret) | 720 | if (ret) |
721 | return ret; | 721 | return ret; |
722 | 722 | ||
723 | down(&dev->sem); | 723 | mutex_lock(&dev->mutex); |
724 | while (1) { | 724 | while (1) { |
725 | struct inotify_kernel_event *kevent; | 725 | struct inotify_kernel_event *kevent; |
726 | 726 | ||
@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
750 | 750 | ||
751 | remove_kevent(dev, kevent); | 751 | remove_kevent(dev, kevent); |
752 | } | 752 | } |
753 | up(&dev->sem); | 753 | mutex_unlock(&dev->mutex); |
754 | 754 | ||
755 | return ret; | 755 | return ret; |
756 | } | 756 | } |
@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) | |||
763 | * Destroy all of the watches on this device. Unfortunately, not very | 763 | * Destroy all of the watches on this device. Unfortunately, not very |
764 | * pretty. We cannot do a simple iteration over the list, because we | 764 | * pretty. We cannot do a simple iteration over the list, because we |
765 | * do not know the inode until we iterate to the watch. But we need to | 765 | * do not know the inode until we iterate to the watch. But we need to |
766 | * hold inode->inotify_sem before dev->sem. The following works. | 766 | * hold inode->inotify_mutex before dev->mutex. The following works. |
767 | */ | 767 | */ |
768 | while (1) { | 768 | while (1) { |
769 | struct inotify_watch *watch; | 769 | struct inotify_watch *watch; |
770 | struct list_head *watches; | 770 | struct list_head *watches; |
771 | struct inode *inode; | 771 | struct inode *inode; |
772 | 772 | ||
773 | down(&dev->sem); | 773 | mutex_lock(&dev->mutex); |
774 | watches = &dev->watches; | 774 | watches = &dev->watches; |
775 | if (list_empty(watches)) { | 775 | if (list_empty(watches)) { |
776 | up(&dev->sem); | 776 | mutex_unlock(&dev->mutex); |
777 | break; | 777 | break; |
778 | } | 778 | } |
779 | watch = list_entry(watches->next, struct inotify_watch, d_list); | 779 | watch = list_entry(watches->next, struct inotify_watch, d_list); |
780 | get_inotify_watch(watch); | 780 | get_inotify_watch(watch); |
781 | up(&dev->sem); | 781 | mutex_unlock(&dev->mutex); |
782 | 782 | ||
783 | inode = watch->inode; | 783 | inode = watch->inode; |
784 | down(&inode->inotify_sem); | 784 | mutex_lock(&inode->inotify_mutex); |
785 | down(&dev->sem); | 785 | mutex_lock(&dev->mutex); |
786 | remove_watch_no_event(watch, dev); | 786 | remove_watch_no_event(watch, dev); |
787 | up(&dev->sem); | 787 | mutex_unlock(&dev->mutex); |
788 | up(&inode->inotify_sem); | 788 | mutex_unlock(&inode->inotify_mutex); |
789 | put_inotify_watch(watch); | 789 | put_inotify_watch(watch); |
790 | } | 790 | } |
791 | 791 | ||
792 | /* destroy all of the events on this device */ | 792 | /* destroy all of the events on this device */ |
793 | down(&dev->sem); | 793 | mutex_lock(&dev->mutex); |
794 | while (!list_empty(&dev->events)) | 794 | while (!list_empty(&dev->events)) |
795 | inotify_dev_event_dequeue(dev); | 795 | inotify_dev_event_dequeue(dev); |
796 | up(&dev->sem); | 796 | mutex_unlock(&dev->mutex); |
797 | 797 | ||
798 | /* free this device: the put matching the get in inotify_init() */ | 798 | /* free this device: the put matching the get in inotify_init() */ |
799 | put_inotify_dev(dev); | 799 | put_inotify_dev(dev); |
@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) | |||
811 | struct inotify_watch *watch; | 811 | struct inotify_watch *watch; |
812 | struct inode *inode; | 812 | struct inode *inode; |
813 | 813 | ||
814 | down(&dev->sem); | 814 | mutex_lock(&dev->mutex); |
815 | watch = idr_find(&dev->idr, wd); | 815 | watch = idr_find(&dev->idr, wd); |
816 | if (unlikely(!watch)) { | 816 | if (unlikely(!watch)) { |
817 | up(&dev->sem); | 817 | mutex_unlock(&dev->mutex); |
818 | return -EINVAL; | 818 | return -EINVAL; |
819 | } | 819 | } |
820 | get_inotify_watch(watch); | 820 | get_inotify_watch(watch); |
821 | inode = watch->inode; | 821 | inode = watch->inode; |
822 | up(&dev->sem); | 822 | mutex_unlock(&dev->mutex); |
823 | 823 | ||
824 | down(&inode->inotify_sem); | 824 | mutex_lock(&inode->inotify_mutex); |
825 | down(&dev->sem); | 825 | mutex_lock(&dev->mutex); |
826 | 826 | ||
827 | /* make sure that we did not race */ | 827 | /* make sure that we did not race */ |
828 | watch = idr_find(&dev->idr, wd); | 828 | watch = idr_find(&dev->idr, wd); |
829 | if (likely(watch)) | 829 | if (likely(watch)) |
830 | remove_watch(watch, dev); | 830 | remove_watch(watch, dev); |
831 | 831 | ||
832 | up(&dev->sem); | 832 | mutex_unlock(&dev->mutex); |
833 | up(&inode->inotify_sem); | 833 | mutex_unlock(&inode->inotify_mutex); |
834 | put_inotify_watch(watch); | 834 | put_inotify_watch(watch); |
835 | 835 | ||
836 | return 0; | 836 | return 0; |
@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) | |||
905 | INIT_LIST_HEAD(&dev->events); | 905 | INIT_LIST_HEAD(&dev->events); |
906 | INIT_LIST_HEAD(&dev->watches); | 906 | INIT_LIST_HEAD(&dev->watches); |
907 | init_waitqueue_head(&dev->wq); | 907 | init_waitqueue_head(&dev->wq); |
908 | sema_init(&dev->sem, 1); | 908 | mutex_init(&dev->mutex); |
909 | dev->event_count = 0; | 909 | dev->event_count = 0; |
910 | dev->queue_size = 0; | 910 | dev->queue_size = 0; |
911 | dev->max_events = inotify_max_queued_events; | 911 | dev->max_events = inotify_max_queued_events; |
@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
960 | inode = nd.dentry->d_inode; | 960 | inode = nd.dentry->d_inode; |
961 | dev = filp->private_data; | 961 | dev = filp->private_data; |
962 | 962 | ||
963 | down(&inode->inotify_sem); | 963 | mutex_lock(&inode->inotify_mutex); |
964 | down(&dev->sem); | 964 | mutex_lock(&dev->mutex); |
965 | 965 | ||
966 | if (mask & IN_MASK_ADD) | 966 | if (mask & IN_MASK_ADD) |
967 | mask_add = 1; | 967 | mask_add = 1; |
@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
998 | list_add(&watch->i_list, &inode->inotify_watches); | 998 | list_add(&watch->i_list, &inode->inotify_watches); |
999 | ret = watch->wd; | 999 | ret = watch->wd; |
1000 | out: | 1000 | out: |
1001 | up(&dev->sem); | 1001 | mutex_unlock(&dev->mutex); |
1002 | up(&inode->inotify_sem); | 1002 | mutex_unlock(&inode->inotify_mutex); |
1003 | path_release(&nd); | 1003 | path_release(&nd); |
1004 | fput_and_out: | 1004 | fput_and_out: |
1005 | fput_light(filp, fput_needed); | 1005 | fput_light(filp, fput_needed); |
diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c index 2931de7f1a6a..81a90e170ac3 100644 --- a/fs/isofs/joliet.c +++ b/fs/isofs/joliet.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include "isofs.h" | 11 | #include "isofs.h" |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * Convert Unicode 16 to UTF8 or ASCII. | 14 | * Convert Unicode 16 to UTF-8 or ASCII. |
15 | */ | 15 | */ |
16 | static int | 16 | static int |
17 | uni16_to_x8(unsigned char *ascii, u16 *uni, int len, struct nls_table *nls) | 17 | uni16_to_x8(unsigned char *ascii, u16 *uni, int len, struct nls_table *nls) |
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 543ed543d1e5..3f5102b069db 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c | |||
@@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal) | |||
85 | if (journal->j_flags & JFS_ABORT) | 85 | if (journal->j_flags & JFS_ABORT) |
86 | return; | 86 | return; |
87 | spin_unlock(&journal->j_state_lock); | 87 | spin_unlock(&journal->j_state_lock); |
88 | down(&journal->j_checkpoint_sem); | 88 | mutex_lock(&journal->j_checkpoint_mutex); |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Test again, another process may have checkpointed while we | 91 | * Test again, another process may have checkpointed while we |
@@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal) | |||
98 | log_do_checkpoint(journal); | 98 | log_do_checkpoint(journal); |
99 | spin_lock(&journal->j_state_lock); | 99 | spin_lock(&journal->j_state_lock); |
100 | } | 100 | } |
101 | up(&journal->j_checkpoint_sem); | 101 | mutex_unlock(&journal->j_checkpoint_mutex); |
102 | } | 102 | } |
103 | } | 103 | } |
104 | 104 | ||
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index e4b516ac4989..95a628d8cac8 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -659,8 +659,8 @@ static journal_t * journal_init_common (void) | |||
659 | init_waitqueue_head(&journal->j_wait_checkpoint); | 659 | init_waitqueue_head(&journal->j_wait_checkpoint); |
660 | init_waitqueue_head(&journal->j_wait_commit); | 660 | init_waitqueue_head(&journal->j_wait_commit); |
661 | init_waitqueue_head(&journal->j_wait_updates); | 661 | init_waitqueue_head(&journal->j_wait_updates); |
662 | init_MUTEX(&journal->j_barrier); | 662 | mutex_init(&journal->j_barrier); |
663 | init_MUTEX(&journal->j_checkpoint_sem); | 663 | mutex_init(&journal->j_checkpoint_mutex); |
664 | spin_lock_init(&journal->j_revoke_lock); | 664 | spin_lock_init(&journal->j_revoke_lock); |
665 | spin_lock_init(&journal->j_list_lock); | 665 | spin_lock_init(&journal->j_list_lock); |
666 | spin_lock_init(&journal->j_state_lock); | 666 | spin_lock_init(&journal->j_state_lock); |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ca917973c2c0..5fc40888f4cf 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal) | |||
455 | * to make sure that we serialise special journal-locked operations | 455 | * to make sure that we serialise special journal-locked operations |
456 | * too. | 456 | * too. |
457 | */ | 457 | */ |
458 | down(&journal->j_barrier); | 458 | mutex_lock(&journal->j_barrier); |
459 | } | 459 | } |
460 | 460 | ||
461 | /** | 461 | /** |
@@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal) | |||
470 | { | 470 | { |
471 | J_ASSERT(journal->j_barrier_count != 0); | 471 | J_ASSERT(journal->j_barrier_count != 0); |
472 | 472 | ||
473 | up(&journal->j_barrier); | 473 | mutex_unlock(&journal->j_barrier); |
474 | spin_lock(&journal->j_state_lock); | 474 | spin_lock(&journal->j_state_lock); |
475 | --journal->j_barrier_count; | 475 | --journal->j_barrier_count; |
476 | spin_unlock(&journal->j_state_lock); | 476 | spin_unlock(&journal->j_state_lock); |
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c index fc3855a1aef3..890d7ff7456d 100644 --- a/fs/jffs/inode-v23.c +++ b/fs/jffs/inode-v23.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include <linux/quotaops.h> | 42 | #include <linux/quotaops.h> |
43 | #include <linux/highmem.h> | 43 | #include <linux/highmem.h> |
44 | #include <linux/vfs.h> | 44 | #include <linux/vfs.h> |
45 | #include <asm/semaphore.h> | 45 | #include <linux/mutex.h> |
46 | #include <asm/byteorder.h> | 46 | #include <asm/byteorder.h> |
47 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
48 | 48 | ||
@@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) | |||
203 | fmc = c->fmc; | 203 | fmc = c->fmc; |
204 | 204 | ||
205 | D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); | 205 | D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); |
206 | down(&fmc->biglock); | 206 | mutex_lock(&fmc->biglock); |
207 | 207 | ||
208 | f = jffs_find_file(c, inode->i_ino); | 208 | f = jffs_find_file(c, inode->i_ino); |
209 | 209 | ||
@@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) | |||
211 | printk("jffs_setattr(): Invalid inode number: %lu\n", | 211 | printk("jffs_setattr(): Invalid inode number: %lu\n", |
212 | inode->i_ino); | 212 | inode->i_ino); |
213 | D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); | 213 | D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); |
214 | up(&fmc->biglock); | 214 | mutex_unlock(&fmc->biglock); |
215 | res = -EINVAL; | 215 | res = -EINVAL; |
216 | goto out; | 216 | goto out; |
217 | }); | 217 | }); |
@@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) | |||
232 | if (!(new_node = jffs_alloc_node())) { | 232 | if (!(new_node = jffs_alloc_node())) { |
233 | D(printk("jffs_setattr(): Allocation failed!\n")); | 233 | D(printk("jffs_setattr(): Allocation failed!\n")); |
234 | D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); | 234 | D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); |
235 | up(&fmc->biglock); | 235 | mutex_unlock(&fmc->biglock); |
236 | res = -ENOMEM; | 236 | res = -ENOMEM; |
237 | goto out; | 237 | goto out; |
238 | } | 238 | } |
@@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) | |||
319 | D(printk("jffs_notify_change(): The write failed!\n")); | 319 | D(printk("jffs_notify_change(): The write failed!\n")); |
320 | jffs_free_node(new_node); | 320 | jffs_free_node(new_node); |
321 | D3(printk (KERN_NOTICE "n_c(): up biglock\n")); | 321 | D3(printk (KERN_NOTICE "n_c(): up biglock\n")); |
322 | up(&c->fmc->biglock); | 322 | mutex_unlock(&c->fmc->biglock); |
323 | goto out; | 323 | goto out; |
324 | } | 324 | } |
325 | 325 | ||
@@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) | |||
327 | 327 | ||
328 | mark_inode_dirty(inode); | 328 | mark_inode_dirty(inode); |
329 | D3(printk (KERN_NOTICE "n_c(): up biglock\n")); | 329 | D3(printk (KERN_NOTICE "n_c(): up biglock\n")); |
330 | up(&c->fmc->biglock); | 330 | mutex_unlock(&c->fmc->biglock); |
331 | out: | 331 | out: |
332 | unlock_kernel(); | 332 | unlock_kernel(); |
333 | return res; | 333 | return res; |
@@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
461 | goto jffs_rename_end; | 461 | goto jffs_rename_end; |
462 | } | 462 | } |
463 | D3(printk (KERN_NOTICE "rename(): down biglock\n")); | 463 | D3(printk (KERN_NOTICE "rename(): down biglock\n")); |
464 | down(&c->fmc->biglock); | 464 | mutex_lock(&c->fmc->biglock); |
465 | /* Create a node and initialize as much as needed. */ | 465 | /* Create a node and initialize as much as needed. */ |
466 | result = -ENOMEM; | 466 | result = -ENOMEM; |
467 | if (!(node = jffs_alloc_node())) { | 467 | if (!(node = jffs_alloc_node())) { |
@@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
555 | 555 | ||
556 | jffs_rename_end: | 556 | jffs_rename_end: |
557 | D3(printk (KERN_NOTICE "rename(): up biglock\n")); | 557 | D3(printk (KERN_NOTICE "rename(): up biglock\n")); |
558 | up(&c->fmc->biglock); | 558 | mutex_unlock(&c->fmc->biglock); |
559 | unlock_kernel(); | 559 | unlock_kernel(); |
560 | return result; | 560 | return result; |
561 | } /* jffs_rename() */ | 561 | } /* jffs_rename() */ |
@@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
574 | int ddino; | 574 | int ddino; |
575 | lock_kernel(); | 575 | lock_kernel(); |
576 | D3(printk (KERN_NOTICE "readdir(): down biglock\n")); | 576 | D3(printk (KERN_NOTICE "readdir(): down biglock\n")); |
577 | down(&c->fmc->biglock); | 577 | mutex_lock(&c->fmc->biglock); |
578 | 578 | ||
579 | D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); | 579 | D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); |
580 | if (filp->f_pos == 0) { | 580 | if (filp->f_pos == 0) { |
581 | D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); | 581 | D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); |
582 | if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { | 582 | if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { |
583 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); | 583 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); |
584 | up(&c->fmc->biglock); | 584 | mutex_unlock(&c->fmc->biglock); |
585 | unlock_kernel(); | 585 | unlock_kernel(); |
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
@@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
598 | D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); | 598 | D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); |
599 | if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { | 599 | if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { |
600 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); | 600 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); |
601 | up(&c->fmc->biglock); | 601 | mutex_unlock(&c->fmc->biglock); |
602 | unlock_kernel(); | 602 | unlock_kernel(); |
603 | return 0; | 603 | return 0; |
604 | } | 604 | } |
@@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
617 | if (filldir(dirent, f->name, f->nsize, | 617 | if (filldir(dirent, f->name, f->nsize, |
618 | filp->f_pos , f->ino, DT_UNKNOWN) < 0) { | 618 | filp->f_pos , f->ino, DT_UNKNOWN) < 0) { |
619 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); | 619 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); |
620 | up(&c->fmc->biglock); | 620 | mutex_unlock(&c->fmc->biglock); |
621 | unlock_kernel(); | 621 | unlock_kernel(); |
622 | return 0; | 622 | return 0; |
623 | } | 623 | } |
@@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
627 | } while(f && f->deleted); | 627 | } while(f && f->deleted); |
628 | } | 628 | } |
629 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); | 629 | D3(printk (KERN_NOTICE "readdir(): up biglock\n")); |
630 | up(&c->fmc->biglock); | 630 | mutex_unlock(&c->fmc->biglock); |
631 | unlock_kernel(); | 631 | unlock_kernel(); |
632 | return filp->f_pos; | 632 | return filp->f_pos; |
633 | } /* jffs_readdir() */ | 633 | } /* jffs_readdir() */ |
@@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
660 | }); | 660 | }); |
661 | 661 | ||
662 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); | 662 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); |
663 | down(&c->fmc->biglock); | 663 | mutex_lock(&c->fmc->biglock); |
664 | 664 | ||
665 | r = -ENAMETOOLONG; | 665 | r = -ENAMETOOLONG; |
666 | if (len > JFFS_MAX_NAME_LEN) { | 666 | if (len > JFFS_MAX_NAME_LEN) { |
@@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
683 | 683 | ||
684 | if ((len == 1) && (name[0] == '.')) { | 684 | if ((len == 1) && (name[0] == '.')) { |
685 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); | 685 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); |
686 | up(&c->fmc->biglock); | 686 | mutex_unlock(&c->fmc->biglock); |
687 | if (!(inode = iget(dir->i_sb, d->ino))) { | 687 | if (!(inode = iget(dir->i_sb, d->ino))) { |
688 | D(printk("jffs_lookup(): . iget() ==> NULL\n")); | 688 | D(printk("jffs_lookup(): . iget() ==> NULL\n")); |
689 | goto jffs_lookup_end_no_biglock; | 689 | goto jffs_lookup_end_no_biglock; |
690 | } | 690 | } |
691 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); | 691 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); |
692 | down(&c->fmc->biglock); | 692 | mutex_lock(&c->fmc->biglock); |
693 | } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { | 693 | } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { |
694 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); | 694 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); |
695 | up(&c->fmc->biglock); | 695 | mutex_unlock(&c->fmc->biglock); |
696 | if (!(inode = iget(dir->i_sb, d->pino))) { | 696 | if (!(inode = iget(dir->i_sb, d->pino))) { |
697 | D(printk("jffs_lookup(): .. iget() ==> NULL\n")); | 697 | D(printk("jffs_lookup(): .. iget() ==> NULL\n")); |
698 | goto jffs_lookup_end_no_biglock; | 698 | goto jffs_lookup_end_no_biglock; |
699 | } | 699 | } |
700 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); | 700 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); |
701 | down(&c->fmc->biglock); | 701 | mutex_lock(&c->fmc->biglock); |
702 | } else if ((f = jffs_find_child(d, name, len))) { | 702 | } else if ((f = jffs_find_child(d, name, len))) { |
703 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); | 703 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); |
704 | up(&c->fmc->biglock); | 704 | mutex_unlock(&c->fmc->biglock); |
705 | if (!(inode = iget(dir->i_sb, f->ino))) { | 705 | if (!(inode = iget(dir->i_sb, f->ino))) { |
706 | D(printk("jffs_lookup(): iget() ==> NULL\n")); | 706 | D(printk("jffs_lookup(): iget() ==> NULL\n")); |
707 | goto jffs_lookup_end_no_biglock; | 707 | goto jffs_lookup_end_no_biglock; |
708 | } | 708 | } |
709 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); | 709 | D3(printk (KERN_NOTICE "lookup(): down biglock\n")); |
710 | down(&c->fmc->biglock); | 710 | mutex_lock(&c->fmc->biglock); |
711 | } else { | 711 | } else { |
712 | D3(printk("jffs_lookup(): Couldn't find the file. " | 712 | D3(printk("jffs_lookup(): Couldn't find the file. " |
713 | "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", | 713 | "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", |
@@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
717 | 717 | ||
718 | d_add(dentry, inode); | 718 | d_add(dentry, inode); |
719 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); | 719 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); |
720 | up(&c->fmc->biglock); | 720 | mutex_unlock(&c->fmc->biglock); |
721 | unlock_kernel(); | 721 | unlock_kernel(); |
722 | return NULL; | 722 | return NULL; |
723 | 723 | ||
724 | jffs_lookup_end: | 724 | jffs_lookup_end: |
725 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); | 725 | D3(printk (KERN_NOTICE "lookup(): up biglock\n")); |
726 | up(&c->fmc->biglock); | 726 | mutex_unlock(&c->fmc->biglock); |
727 | 727 | ||
728 | jffs_lookup_end_no_biglock: | 728 | jffs_lookup_end_no_biglock: |
729 | unlock_kernel(); | 729 | unlock_kernel(); |
@@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) | |||
753 | ClearPageError(page); | 753 | ClearPageError(page); |
754 | 754 | ||
755 | D3(printk (KERN_NOTICE "readpage(): down biglock\n")); | 755 | D3(printk (KERN_NOTICE "readpage(): down biglock\n")); |
756 | down(&c->fmc->biglock); | 756 | mutex_lock(&c->fmc->biglock); |
757 | 757 | ||
758 | read_len = 0; | 758 | read_len = 0; |
759 | result = 0; | 759 | result = 0; |
@@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) | |||
782 | kunmap(page); | 782 | kunmap(page); |
783 | 783 | ||
784 | D3(printk (KERN_NOTICE "readpage(): up biglock\n")); | 784 | D3(printk (KERN_NOTICE "readpage(): up biglock\n")); |
785 | up(&c->fmc->biglock); | 785 | mutex_unlock(&c->fmc->biglock); |
786 | 786 | ||
787 | if (result) { | 787 | if (result) { |
788 | SetPageError(page); | 788 | SetPageError(page); |
@@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
839 | 839 | ||
840 | c = dir_f->c; | 840 | c = dir_f->c; |
841 | D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); | 841 | D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); |
842 | down(&c->fmc->biglock); | 842 | mutex_lock(&c->fmc->biglock); |
843 | 843 | ||
844 | dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) | 844 | dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) |
845 | & ~current->fs->umask); | 845 | & ~current->fs->umask); |
@@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
906 | result = 0; | 906 | result = 0; |
907 | jffs_mkdir_end: | 907 | jffs_mkdir_end: |
908 | D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); | 908 | D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); |
909 | up(&c->fmc->biglock); | 909 | mutex_unlock(&c->fmc->biglock); |
910 | unlock_kernel(); | 910 | unlock_kernel(); |
911 | return result; | 911 | return result; |
912 | } /* jffs_mkdir() */ | 912 | } /* jffs_mkdir() */ |
@@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry) | |||
921 | D3(printk("***jffs_rmdir()\n")); | 921 | D3(printk("***jffs_rmdir()\n")); |
922 | D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); | 922 | D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); |
923 | lock_kernel(); | 923 | lock_kernel(); |
924 | down(&c->fmc->biglock); | 924 | mutex_lock(&c->fmc->biglock); |
925 | ret = jffs_remove(dir, dentry, S_IFDIR); | 925 | ret = jffs_remove(dir, dentry, S_IFDIR); |
926 | D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); | 926 | D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); |
927 | up(&c->fmc->biglock); | 927 | mutex_unlock(&c->fmc->biglock); |
928 | unlock_kernel(); | 928 | unlock_kernel(); |
929 | return ret; | 929 | return ret; |
930 | } | 930 | } |
@@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry) | |||
940 | lock_kernel(); | 940 | lock_kernel(); |
941 | D3(printk("***jffs_unlink()\n")); | 941 | D3(printk("***jffs_unlink()\n")); |
942 | D3(printk (KERN_NOTICE "unlink(): down biglock\n")); | 942 | D3(printk (KERN_NOTICE "unlink(): down biglock\n")); |
943 | down(&c->fmc->biglock); | 943 | mutex_lock(&c->fmc->biglock); |
944 | ret = jffs_remove(dir, dentry, 0); | 944 | ret = jffs_remove(dir, dentry, 0); |
945 | D3(printk (KERN_NOTICE "unlink(): up biglock\n")); | 945 | D3(printk (KERN_NOTICE "unlink(): up biglock\n")); |
946 | up(&c->fmc->biglock); | 946 | mutex_unlock(&c->fmc->biglock); |
947 | unlock_kernel(); | 947 | unlock_kernel(); |
948 | return ret; | 948 | return ret; |
949 | } | 949 | } |
@@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) | |||
1086 | c = dir_f->c; | 1086 | c = dir_f->c; |
1087 | 1087 | ||
1088 | D3(printk (KERN_NOTICE "mknod(): down biglock\n")); | 1088 | D3(printk (KERN_NOTICE "mknod(): down biglock\n")); |
1089 | down(&c->fmc->biglock); | 1089 | mutex_lock(&c->fmc->biglock); |
1090 | 1090 | ||
1091 | /* Create and initialize a new node. */ | 1091 | /* Create and initialize a new node. */ |
1092 | if (!(node = jffs_alloc_node())) { | 1092 | if (!(node = jffs_alloc_node())) { |
@@ -1152,7 +1152,7 @@ jffs_mknod_err: | |||
1152 | 1152 | ||
1153 | jffs_mknod_end: | 1153 | jffs_mknod_end: |
1154 | D3(printk (KERN_NOTICE "mknod(): up biglock\n")); | 1154 | D3(printk (KERN_NOTICE "mknod(): up biglock\n")); |
1155 | up(&c->fmc->biglock); | 1155 | mutex_unlock(&c->fmc->biglock); |
1156 | unlock_kernel(); | 1156 | unlock_kernel(); |
1157 | return result; | 1157 | return result; |
1158 | } /* jffs_mknod() */ | 1158 | } /* jffs_mknod() */ |
@@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |||
1203 | return -ENOMEM; | 1203 | return -ENOMEM; |
1204 | } | 1204 | } |
1205 | D3(printk (KERN_NOTICE "symlink(): down biglock\n")); | 1205 | D3(printk (KERN_NOTICE "symlink(): down biglock\n")); |
1206 | down(&c->fmc->biglock); | 1206 | mutex_lock(&c->fmc->biglock); |
1207 | 1207 | ||
1208 | node->data_offset = 0; | 1208 | node->data_offset = 0; |
1209 | node->removed_size = 0; | 1209 | node->removed_size = 0; |
@@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |||
1253 | d_instantiate(dentry, inode); | 1253 | d_instantiate(dentry, inode); |
1254 | jffs_symlink_end: | 1254 | jffs_symlink_end: |
1255 | D3(printk (KERN_NOTICE "symlink(): up biglock\n")); | 1255 | D3(printk (KERN_NOTICE "symlink(): up biglock\n")); |
1256 | up(&c->fmc->biglock); | 1256 | mutex_unlock(&c->fmc->biglock); |
1257 | unlock_kernel(); | 1257 | unlock_kernel(); |
1258 | return err; | 1258 | return err; |
1259 | } /* jffs_symlink() */ | 1259 | } /* jffs_symlink() */ |
@@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
1306 | return -ENOMEM; | 1306 | return -ENOMEM; |
1307 | } | 1307 | } |
1308 | D3(printk (KERN_NOTICE "create(): down biglock\n")); | 1308 | D3(printk (KERN_NOTICE "create(): down biglock\n")); |
1309 | down(&c->fmc->biglock); | 1309 | mutex_lock(&c->fmc->biglock); |
1310 | 1310 | ||
1311 | node->data_offset = 0; | 1311 | node->data_offset = 0; |
1312 | node->removed_size = 0; | 1312 | node->removed_size = 0; |
@@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
1359 | d_instantiate(dentry, inode); | 1359 | d_instantiate(dentry, inode); |
1360 | jffs_create_end: | 1360 | jffs_create_end: |
1361 | D3(printk (KERN_NOTICE "create(): up biglock\n")); | 1361 | D3(printk (KERN_NOTICE "create(): up biglock\n")); |
1362 | up(&c->fmc->biglock); | 1362 | mutex_unlock(&c->fmc->biglock); |
1363 | unlock_kernel(); | 1363 | unlock_kernel(); |
1364 | return err; | 1364 | return err; |
1365 | } /* jffs_create() */ | 1365 | } /* jffs_create() */ |
@@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, | |||
1423 | thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); | 1423 | thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); |
1424 | 1424 | ||
1425 | D3(printk (KERN_NOTICE "file_write(): down biglock\n")); | 1425 | D3(printk (KERN_NOTICE "file_write(): down biglock\n")); |
1426 | down(&c->fmc->biglock); | 1426 | mutex_lock(&c->fmc->biglock); |
1427 | 1427 | ||
1428 | /* Urgh. POSIX says we can do short writes if we feel like it. | 1428 | /* Urgh. POSIX says we can do short writes if we feel like it. |
1429 | * In practice, we can't. Nothing will cope. So we loop until | 1429 | * In practice, we can't. Nothing will cope. So we loop until |
@@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, | |||
1511 | } | 1511 | } |
1512 | out: | 1512 | out: |
1513 | D3(printk (KERN_NOTICE "file_write(): up biglock\n")); | 1513 | D3(printk (KERN_NOTICE "file_write(): up biglock\n")); |
1514 | up(&c->fmc->biglock); | 1514 | mutex_unlock(&c->fmc->biglock); |
1515 | 1515 | ||
1516 | /* Fix things in the real inode. */ | 1516 | /* Fix things in the real inode. */ |
1517 | if (pos > inode->i_size) { | 1517 | if (pos > inode->i_size) { |
@@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | |||
1567 | return -EIO; | 1567 | return -EIO; |
1568 | } | 1568 | } |
1569 | D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); | 1569 | D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); |
1570 | down(&c->fmc->biglock); | 1570 | mutex_lock(&c->fmc->biglock); |
1571 | 1571 | ||
1572 | switch (cmd) { | 1572 | switch (cmd) { |
1573 | case JFFS_PRINT_HASH: | 1573 | case JFFS_PRINT_HASH: |
@@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | |||
1609 | ret = -ENOTTY; | 1609 | ret = -ENOTTY; |
1610 | } | 1610 | } |
1611 | D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); | 1611 | D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); |
1612 | up(&c->fmc->biglock); | 1612 | mutex_unlock(&c->fmc->biglock); |
1613 | return ret; | 1613 | return ret; |
1614 | } /* jffs_ioctl() */ | 1614 | } /* jffs_ioctl() */ |
1615 | 1615 | ||
@@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode) | |||
1685 | } | 1685 | } |
1686 | c = (struct jffs_control *)inode->i_sb->s_fs_info; | 1686 | c = (struct jffs_control *)inode->i_sb->s_fs_info; |
1687 | D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); | 1687 | D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); |
1688 | down(&c->fmc->biglock); | 1688 | mutex_lock(&c->fmc->biglock); |
1689 | if (!(f = jffs_find_file(c, inode->i_ino))) { | 1689 | if (!(f = jffs_find_file(c, inode->i_ino))) { |
1690 | D(printk("jffs_read_inode(): No such inode (%lu).\n", | 1690 | D(printk("jffs_read_inode(): No such inode (%lu).\n", |
1691 | inode->i_ino)); | 1691 | inode->i_ino)); |
1692 | D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); | 1692 | D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); |
1693 | up(&c->fmc->biglock); | 1693 | mutex_unlock(&c->fmc->biglock); |
1694 | return; | 1694 | return; |
1695 | } | 1695 | } |
1696 | inode->u.generic_ip = (void *)f; | 1696 | inode->u.generic_ip = (void *)f; |
@@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode) | |||
1732 | } | 1732 | } |
1733 | 1733 | ||
1734 | D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); | 1734 | D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); |
1735 | up(&c->fmc->biglock); | 1735 | mutex_unlock(&c->fmc->biglock); |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | 1738 | ||
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index ce7b54b0b2b7..0ef207dfaf6f 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #include <linux/fs.h> | 62 | #include <linux/fs.h> |
63 | #include <linux/stat.h> | 63 | #include <linux/stat.h> |
64 | #include <linux/pagemap.h> | 64 | #include <linux/pagemap.h> |
65 | #include <asm/semaphore.h> | 65 | #include <linux/mutex.h> |
66 | #include <asm/byteorder.h> | 66 | #include <asm/byteorder.h> |
67 | #include <linux/smp_lock.h> | 67 | #include <linux/smp_lock.h> |
68 | #include <linux/time.h> | 68 | #include <linux/time.h> |
@@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr) | |||
3416 | D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); | 3416 | D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); |
3417 | 3417 | ||
3418 | D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); | 3418 | D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); |
3419 | down(&fmc->biglock); | 3419 | mutex_lock(&fmc->biglock); |
3420 | 3420 | ||
3421 | D1(printk("***jffs_garbage_collect_thread(): round #%u, " | 3421 | D1(printk("***jffs_garbage_collect_thread(): round #%u, " |
3422 | "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); | 3422 | "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); |
@@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr) | |||
3447 | 3447 | ||
3448 | gc_end: | 3448 | gc_end: |
3449 | D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); | 3449 | D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); |
3450 | up(&fmc->biglock); | 3450 | mutex_unlock(&fmc->biglock); |
3451 | } /* for (;;) */ | 3451 | } /* for (;;) */ |
3452 | } /* jffs_garbage_collect_thread() */ | 3452 | } /* jffs_garbage_collect_thread() */ |
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c index 6da13b309bd1..7d8ca1aeace2 100644 --- a/fs/jffs/jffs_fm.c +++ b/fs/jffs/jffs_fm.c | |||
@@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit) | |||
139 | fmc->tail = NULL; | 139 | fmc->tail = NULL; |
140 | fmc->head_extra = NULL; | 140 | fmc->head_extra = NULL; |
141 | fmc->tail_extra = NULL; | 141 | fmc->tail_extra = NULL; |
142 | init_MUTEX(&fmc->biglock); | 142 | mutex_init(&fmc->biglock); |
143 | return fmc; | 143 | return fmc; |
144 | } | 144 | } |
145 | 145 | ||
diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h index f64151e74122..c794d923df2a 100644 --- a/fs/jffs/jffs_fm.h +++ b/fs/jffs/jffs_fm.h | |||
@@ -20,10 +20,11 @@ | |||
20 | #ifndef __LINUX_JFFS_FM_H__ | 20 | #ifndef __LINUX_JFFS_FM_H__ |
21 | #define __LINUX_JFFS_FM_H__ | 21 | #define __LINUX_JFFS_FM_H__ |
22 | 22 | ||
23 | #include <linux/config.h> | ||
23 | #include <linux/types.h> | 24 | #include <linux/types.h> |
24 | #include <linux/jffs.h> | 25 | #include <linux/jffs.h> |
25 | #include <linux/mtd/mtd.h> | 26 | #include <linux/mtd/mtd.h> |
26 | #include <linux/config.h> | 27 | #include <linux/mutex.h> |
27 | 28 | ||
28 | /* The alignment between two nodes in the flash memory. */ | 29 | /* The alignment between two nodes in the flash memory. */ |
29 | #define JFFS_ALIGN_SIZE 4 | 30 | #define JFFS_ALIGN_SIZE 4 |
@@ -97,7 +98,7 @@ struct jffs_fmcontrol | |||
97 | struct jffs_fm *tail; | 98 | struct jffs_fm *tail; |
98 | struct jffs_fm *head_extra; | 99 | struct jffs_fm *head_extra; |
99 | struct jffs_fm *tail_extra; | 100 | struct jffs_fm *tail_extra; |
100 | struct semaphore biglock; | 101 | struct mutex biglock; |
101 | }; | 102 | }; |
102 | 103 | ||
103 | /* Notice the two members head_extra and tail_extra in the jffs_control | 104 | /* Notice the two members head_extra and tail_extra in the jffs_control |
diff --git a/fs/libfs.c b/fs/libfs.c index 71fd08fa4103..4fdeaceb892c 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/pagemap.h> | 7 | #include <linux/pagemap.h> |
8 | #include <linux/mount.h> | 8 | #include <linux/mount.h> |
9 | #include <linux/vfs.h> | 9 | #include <linux/vfs.h> |
10 | #include <linux/mutex.h> | ||
11 | |||
10 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
11 | 13 | ||
12 | int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, | 14 | int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, |
@@ -530,7 +532,7 @@ struct simple_attr { | |||
530 | char set_buf[24]; | 532 | char set_buf[24]; |
531 | void *data; | 533 | void *data; |
532 | const char *fmt; /* format for read operation */ | 534 | const char *fmt; /* format for read operation */ |
533 | struct semaphore sem; /* protects access to these buffers */ | 535 | struct mutex mutex; /* protects access to these buffers */ |
534 | }; | 536 | }; |
535 | 537 | ||
536 | /* simple_attr_open is called by an actual attribute open file operation | 538 | /* simple_attr_open is called by an actual attribute open file operation |
@@ -549,7 +551,7 @@ int simple_attr_open(struct inode *inode, struct file *file, | |||
549 | attr->set = set; | 551 | attr->set = set; |
550 | attr->data = inode->u.generic_ip; | 552 | attr->data = inode->u.generic_ip; |
551 | attr->fmt = fmt; | 553 | attr->fmt = fmt; |
552 | init_MUTEX(&attr->sem); | 554 | mutex_init(&attr->mutex); |
553 | 555 | ||
554 | file->private_data = attr; | 556 | file->private_data = attr; |
555 | 557 | ||
@@ -575,7 +577,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, | |||
575 | if (!attr->get) | 577 | if (!attr->get) |
576 | return -EACCES; | 578 | return -EACCES; |
577 | 579 | ||
578 | down(&attr->sem); | 580 | mutex_lock(&attr->mutex); |
579 | if (*ppos) /* continued read */ | 581 | if (*ppos) /* continued read */ |
580 | size = strlen(attr->get_buf); | 582 | size = strlen(attr->get_buf); |
581 | else /* first read */ | 583 | else /* first read */ |
@@ -584,7 +586,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, | |||
584 | (unsigned long long)attr->get(attr->data)); | 586 | (unsigned long long)attr->get(attr->data)); |
585 | 587 | ||
586 | ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); | 588 | ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); |
587 | up(&attr->sem); | 589 | mutex_unlock(&attr->mutex); |
588 | return ret; | 590 | return ret; |
589 | } | 591 | } |
590 | 592 | ||
@@ -602,7 +604,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, | |||
602 | if (!attr->set) | 604 | if (!attr->set) |
603 | return -EACCES; | 605 | return -EACCES; |
604 | 606 | ||
605 | down(&attr->sem); | 607 | mutex_lock(&attr->mutex); |
606 | ret = -EFAULT; | 608 | ret = -EFAULT; |
607 | size = min(sizeof(attr->set_buf) - 1, len); | 609 | size = min(sizeof(attr->set_buf) - 1, len); |
608 | if (copy_from_user(attr->set_buf, buf, size)) | 610 | if (copy_from_user(attr->set_buf, buf, size)) |
@@ -613,7 +615,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, | |||
613 | val = simple_strtol(attr->set_buf, NULL, 0); | 615 | val = simple_strtol(attr->set_buf, NULL, 0); |
614 | attr->set(attr->data, val); | 616 | attr->set(attr->data, val); |
615 | out: | 617 | out: |
616 | up(&attr->sem); | 618 | mutex_unlock(&attr->mutex); |
617 | return ret; | 619 | return ret; |
618 | } | 620 | } |
619 | 621 | ||
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index b25bca5bdb57..5b6a4540a05b 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
@@ -6,18 +6,6 @@ | |||
6 | 6 | ||
7 | #include "minix.h" | 7 | #include "minix.h" |
8 | 8 | ||
9 | static inline void inc_count(struct inode *inode) | ||
10 | { | ||
11 | inode->i_nlink++; | ||
12 | mark_inode_dirty(inode); | ||
13 | } | ||
14 | |||
15 | static inline void dec_count(struct inode *inode) | ||
16 | { | ||
17 | inode->i_nlink--; | ||
18 | mark_inode_dirty(inode); | ||
19 | } | ||
20 | |||
21 | static int add_nondir(struct dentry *dentry, struct inode *inode) | 9 | static int add_nondir(struct dentry *dentry, struct inode *inode) |
22 | { | 10 | { |
23 | int err = minix_add_link(dentry, inode); | 11 | int err = minix_add_link(dentry, inode); |
@@ -25,7 +13,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) | |||
25 | d_instantiate(dentry, inode); | 13 | d_instantiate(dentry, inode); |
26 | return 0; | 14 | return 0; |
27 | } | 15 | } |
28 | dec_count(inode); | 16 | inode_dec_link_count(inode); |
29 | iput(inode); | 17 | iput(inode); |
30 | return err; | 18 | return err; |
31 | } | 19 | } |
@@ -125,7 +113,7 @@ out: | |||
125 | return err; | 113 | return err; |
126 | 114 | ||
127 | out_fail: | 115 | out_fail: |
128 | dec_count(inode); | 116 | inode_dec_link_count(inode); |
129 | iput(inode); | 117 | iput(inode); |
130 | goto out; | 118 | goto out; |
131 | } | 119 | } |
@@ -139,7 +127,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, | |||
139 | return -EMLINK; | 127 | return -EMLINK; |
140 | 128 | ||
141 | inode->i_ctime = CURRENT_TIME_SEC; | 129 | inode->i_ctime = CURRENT_TIME_SEC; |
142 | inc_count(inode); | 130 | inode_inc_link_count(inode); |
143 | atomic_inc(&inode->i_count); | 131 | atomic_inc(&inode->i_count); |
144 | return add_nondir(dentry, inode); | 132 | return add_nondir(dentry, inode); |
145 | } | 133 | } |
@@ -152,7 +140,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) | |||
152 | if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) | 140 | if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) |
153 | goto out; | 141 | goto out; |
154 | 142 | ||
155 | inc_count(dir); | 143 | inode_inc_link_count(dir); |
156 | 144 | ||
157 | inode = minix_new_inode(dir, &err); | 145 | inode = minix_new_inode(dir, &err); |
158 | if (!inode) | 146 | if (!inode) |
@@ -163,7 +151,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) | |||
163 | inode->i_mode |= S_ISGID; | 151 | inode->i_mode |= S_ISGID; |
164 | minix_set_inode(inode, 0); | 152 | minix_set_inode(inode, 0); |
165 | 153 | ||
166 | inc_count(inode); | 154 | inode_inc_link_count(inode); |
167 | 155 | ||
168 | err = minix_make_empty(inode, dir); | 156 | err = minix_make_empty(inode, dir); |
169 | if (err) | 157 | if (err) |
@@ -178,11 +166,11 @@ out: | |||
178 | return err; | 166 | return err; |
179 | 167 | ||
180 | out_fail: | 168 | out_fail: |
181 | dec_count(inode); | 169 | inode_dec_link_count(inode); |
182 | dec_count(inode); | 170 | inode_dec_link_count(inode); |
183 | iput(inode); | 171 | iput(inode); |
184 | out_dir: | 172 | out_dir: |
185 | dec_count(dir); | 173 | inode_dec_link_count(dir); |
186 | goto out; | 174 | goto out; |
187 | } | 175 | } |
188 | 176 | ||
@@ -202,7 +190,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry) | |||
202 | goto end_unlink; | 190 | goto end_unlink; |
203 | 191 | ||
204 | inode->i_ctime = dir->i_ctime; | 192 | inode->i_ctime = dir->i_ctime; |
205 | dec_count(inode); | 193 | inode_dec_link_count(inode); |
206 | end_unlink: | 194 | end_unlink: |
207 | return err; | 195 | return err; |
208 | } | 196 | } |
@@ -215,8 +203,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) | |||
215 | if (minix_empty_dir(inode)) { | 203 | if (minix_empty_dir(inode)) { |
216 | err = minix_unlink(dir, dentry); | 204 | err = minix_unlink(dir, dentry); |
217 | if (!err) { | 205 | if (!err) { |
218 | dec_count(dir); | 206 | inode_dec_link_count(dir); |
219 | dec_count(inode); | 207 | inode_dec_link_count(inode); |
220 | } | 208 | } |
221 | } | 209 | } |
222 | return err; | 210 | return err; |
@@ -257,34 +245,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, | |||
257 | new_de = minix_find_entry(new_dentry, &new_page); | 245 | new_de = minix_find_entry(new_dentry, &new_page); |
258 | if (!new_de) | 246 | if (!new_de) |
259 | goto out_dir; | 247 | goto out_dir; |
260 | inc_count(old_inode); | 248 | inode_inc_link_count(old_inode); |
261 | minix_set_link(new_de, new_page, old_inode); | 249 | minix_set_link(new_de, new_page, old_inode); |
262 | new_inode->i_ctime = CURRENT_TIME_SEC; | 250 | new_inode->i_ctime = CURRENT_TIME_SEC; |
263 | if (dir_de) | 251 | if (dir_de) |
264 | new_inode->i_nlink--; | 252 | new_inode->i_nlink--; |
265 | dec_count(new_inode); | 253 | inode_dec_link_count(new_inode); |
266 | } else { | 254 | } else { |
267 | if (dir_de) { | 255 | if (dir_de) { |
268 | err = -EMLINK; | 256 | err = -EMLINK; |
269 | if (new_dir->i_nlink >= info->s_link_max) | 257 | if (new_dir->i_nlink >= info->s_link_max) |
270 | goto out_dir; | 258 | goto out_dir; |
271 | } | 259 | } |
272 | inc_count(old_inode); | 260 | inode_inc_link_count(old_inode); |
273 | err = minix_add_link(new_dentry, old_inode); | 261 | err = minix_add_link(new_dentry, old_inode); |
274 | if (err) { | 262 | if (err) { |
275 | dec_count(old_inode); | 263 | inode_dec_link_count(old_inode); |
276 | goto out_dir; | 264 | goto out_dir; |
277 | } | 265 | } |
278 | if (dir_de) | 266 | if (dir_de) |
279 | inc_count(new_dir); | 267 | inode_inc_link_count(new_dir); |
280 | } | 268 | } |
281 | 269 | ||
282 | minix_delete_entry(old_de, old_page); | 270 | minix_delete_entry(old_de, old_page); |
283 | dec_count(old_inode); | 271 | inode_dec_link_count(old_inode); |
284 | 272 | ||
285 | if (dir_de) { | 273 | if (dir_de) { |
286 | minix_set_link(dir_de, dir_page, new_dir); | 274 | minix_set_link(dir_de, dir_page, new_dir); |
287 | dec_count(old_dir); | 275 | inode_dec_link_count(old_dir); |
288 | } | 276 | } |
289 | return 0; | 277 | return 0; |
290 | 278 | ||
diff --git a/fs/namei.c b/fs/namei.c index 8dc2b038d5d9..c72b940797fc 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -104,7 +104,7 @@ | |||
104 | */ | 104 | */ |
105 | /* | 105 | /* |
106 | * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) | 106 | * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) |
107 | * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives | 107 | * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives |
108 | * any extra contention... | 108 | * any extra contention... |
109 | */ | 109 | */ |
110 | 110 | ||
@@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) | |||
1422 | return NULL; | 1422 | return NULL; |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | down(&p1->d_inode->i_sb->s_vfs_rename_sem); | 1425 | mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); |
1426 | 1426 | ||
1427 | for (p = p1; p->d_parent != p; p = p->d_parent) { | 1427 | for (p = p1; p->d_parent != p; p = p->d_parent) { |
1428 | if (p->d_parent == p2) { | 1428 | if (p->d_parent == p2) { |
@@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) | |||
1450 | mutex_unlock(&p1->d_inode->i_mutex); | 1450 | mutex_unlock(&p1->d_inode->i_mutex); |
1451 | if (p1 != p2) { | 1451 | if (p1 != p2) { |
1452 | mutex_unlock(&p2->d_inode->i_mutex); | 1452 | mutex_unlock(&p2->d_inode->i_mutex); |
1453 | up(&p1->d_inode->i_sb->s_vfs_rename_sem); | 1453 | mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); |
1454 | } | 1454 | } |
1455 | } | 1455 | } |
1456 | 1456 | ||
@@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname) | |||
2277 | * a) we can get into loop creation. Check is done in is_subdir(). | 2277 | * a) we can get into loop creation. Check is done in is_subdir(). |
2278 | * b) race potential - two innocent renames can create a loop together. | 2278 | * b) race potential - two innocent renames can create a loop together. |
2279 | * That's where 4.4 screws up. Current fix: serialization on | 2279 | * That's where 4.4 screws up. Current fix: serialization on |
2280 | * sb->s_vfs_rename_sem. We might be more accurate, but that's another | 2280 | * sb->s_vfs_rename_mutex. We might be more accurate, but that's another |
2281 | * story. | 2281 | * story. |
2282 | * c) we have to lock _three_ objects - parents and victim (if it exists). | 2282 | * c) we have to lock _three_ objects - parents and victim (if it exists). |
2283 | * And that - after we got ->i_mutex on parents (until then we don't know | 2283 | * And that - after we got ->i_mutex on parents (until then we don't know |
2284 | * whether the target exists). Solution: try to be smart with locking | 2284 | * whether the target exists). Solution: try to be smart with locking |
2285 | * order for inodes. We rely on the fact that tree topology may change | 2285 | * order for inodes. We rely on the fact that tree topology may change |
2286 | * only under ->s_vfs_rename_sem _and_ that parent of the object we | 2286 | * only under ->s_vfs_rename_mutex _and_ that parent of the object we |
2287 | * move will be locked. Thus we can rank directories by the tree | 2287 | * move will be locked. Thus we can rank directories by the tree |
2288 | * (ancestors first) and rank all non-directories after them. | 2288 | * (ancestors first) and rank all non-directories after them. |
2289 | * That works since everybody except rename does "lock parent, lookup, | 2289 | * That works since everybody except rename does "lock parent, lookup, |
2290 | * lock child" and rename is under ->s_vfs_rename_sem. | 2290 | * lock child" and rename is under ->s_vfs_rename_mutex. |
2291 | * HOWEVER, it relies on the assumption that any object with ->lookup() | 2291 | * HOWEVER, it relies on the assumption that any object with ->lookup() |
2292 | * has no more than 1 dentry. If "hybrid" objects will ever appear, | 2292 | * has no more than 1 dentry. If "hybrid" objects will ever appear, |
2293 | * we'd better make sure that there's no link(2) for them. | 2293 | * we'd better make sure that there's no link(2) for them. |
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 973b444d6914..ebdad8f6398f 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c | |||
@@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right) | |||
46 | NCP_FINFO(inode)->volNumber, | 46 | NCP_FINFO(inode)->volNumber, |
47 | NCP_FINFO(inode)->dirEntNum); | 47 | NCP_FINFO(inode)->dirEntNum); |
48 | error = -EACCES; | 48 | error = -EACCES; |
49 | down(&NCP_FINFO(inode)->open_sem); | 49 | mutex_lock(&NCP_FINFO(inode)->open_mutex); |
50 | if (!atomic_read(&NCP_FINFO(inode)->opened)) { | 50 | if (!atomic_read(&NCP_FINFO(inode)->opened)) { |
51 | struct ncp_entry_info finfo; | 51 | struct ncp_entry_info finfo; |
52 | int result; | 52 | int result; |
@@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right) | |||
93 | } | 93 | } |
94 | 94 | ||
95 | out_unlock: | 95 | out_unlock: |
96 | up(&NCP_FINFO(inode)->open_sem); | 96 | mutex_unlock(&NCP_FINFO(inode)->open_mutex); |
97 | out: | 97 | out: |
98 | return error; | 98 | return error; |
99 | } | 99 | } |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index d277a58bd128..0b521d3d97ce 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
63 | 63 | ||
64 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | 64 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == |
65 | SLAB_CTOR_CONSTRUCTOR) { | 65 | SLAB_CTOR_CONSTRUCTOR) { |
66 | init_MUTEX(&ei->open_sem); | 66 | mutex_init(&ei->open_mutex); |
67 | inode_init_once(&ei->vfs_inode); | 67 | inode_init_once(&ei->vfs_inode); |
68 | } | 68 | } |
69 | } | 69 | } |
@@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
520 | } | 520 | } |
521 | 521 | ||
522 | /* server->lock = 0; */ | 522 | /* server->lock = 0; */ |
523 | init_MUTEX(&server->sem); | 523 | mutex_init(&server->mutex); |
524 | server->packet = NULL; | 524 | server->packet = NULL; |
525 | /* server->buffer_size = 0; */ | 525 | /* server->buffer_size = 0; */ |
526 | /* server->conn_status = 0; */ | 526 | /* server->conn_status = 0; */ |
@@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
557 | server->dentry_ttl = 0; /* no caching */ | 557 | server->dentry_ttl = 0; /* no caching */ |
558 | 558 | ||
559 | INIT_LIST_HEAD(&server->tx.requests); | 559 | INIT_LIST_HEAD(&server->tx.requests); |
560 | init_MUTEX(&server->rcv.creq_sem); | 560 | mutex_init(&server->rcv.creq_mutex); |
561 | server->tx.creq = NULL; | 561 | server->tx.creq = NULL; |
562 | server->rcv.creq = NULL; | 562 | server->rcv.creq = NULL; |
563 | server->data_ready = sock->sk->sk_data_ready; | 563 | server->data_ready = sock->sk->sk_data_ready; |
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index c755e1848a42..d9ebf6439f59 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c | |||
@@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode) | |||
291 | int err; | 291 | int err; |
292 | 292 | ||
293 | err = 0; | 293 | err = 0; |
294 | down(&NCP_FINFO(inode)->open_sem); | 294 | mutex_lock(&NCP_FINFO(inode)->open_mutex); |
295 | if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { | 295 | if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { |
296 | atomic_set(&NCP_FINFO(inode)->opened, 0); | 296 | atomic_set(&NCP_FINFO(inode)->opened, 0); |
297 | err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); | 297 | err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); |
@@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode) | |||
301 | NCP_FINFO(inode)->volNumber, | 301 | NCP_FINFO(inode)->volNumber, |
302 | NCP_FINFO(inode)->dirEntNum, err); | 302 | NCP_FINFO(inode)->dirEntNum, err); |
303 | } | 303 | } |
304 | up(&NCP_FINFO(inode)->open_sem); | 304 | mutex_unlock(&NCP_FINFO(inode)->open_mutex); |
305 | return err; | 305 | return err; |
306 | } | 306 | } |
307 | 307 | ||
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 6593a5ca88ba..8783eb7ec641 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c | |||
@@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req | |||
171 | 171 | ||
172 | static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) | 172 | static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) |
173 | { | 173 | { |
174 | down(&server->rcv.creq_sem); | 174 | mutex_lock(&server->rcv.creq_mutex); |
175 | __ncp_abort_request(server, req, err); | 175 | __ncp_abort_request(server, req, err); |
176 | up(&server->rcv.creq_sem); | 176 | mutex_unlock(&server->rcv.creq_mutex); |
177 | } | 177 | } |
178 | 178 | ||
179 | static inline void __ncptcp_abort(struct ncp_server *server) | 179 | static inline void __ncptcp_abort(struct ncp_server *server) |
@@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req | |||
303 | 303 | ||
304 | static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) | 304 | static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) |
305 | { | 305 | { |
306 | down(&server->rcv.creq_sem); | 306 | mutex_lock(&server->rcv.creq_mutex); |
307 | if (!ncp_conn_valid(server)) { | 307 | if (!ncp_conn_valid(server)) { |
308 | up(&server->rcv.creq_sem); | 308 | mutex_unlock(&server->rcv.creq_mutex); |
309 | printk(KERN_ERR "ncpfs: tcp: Server died\n"); | 309 | printk(KERN_ERR "ncpfs: tcp: Server died\n"); |
310 | return -EIO; | 310 | return -EIO; |
311 | } | 311 | } |
312 | if (server->tx.creq || server->rcv.creq) { | 312 | if (server->tx.creq || server->rcv.creq) { |
313 | req->status = RQ_QUEUED; | 313 | req->status = RQ_QUEUED; |
314 | list_add_tail(&req->req, &server->tx.requests); | 314 | list_add_tail(&req->req, &server->tx.requests); |
315 | up(&server->rcv.creq_sem); | 315 | mutex_unlock(&server->rcv.creq_mutex); |
316 | return 0; | 316 | return 0; |
317 | } | 317 | } |
318 | __ncp_start_request(server, req); | 318 | __ncp_start_request(server, req); |
319 | up(&server->rcv.creq_sem); | 319 | mutex_unlock(&server->rcv.creq_mutex); |
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
@@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s) | |||
400 | info_server(server, 0, server->unexpected_packet.data, result); | 400 | info_server(server, 0, server->unexpected_packet.data, result); |
401 | continue; | 401 | continue; |
402 | } | 402 | } |
403 | down(&server->rcv.creq_sem); | 403 | mutex_lock(&server->rcv.creq_mutex); |
404 | req = server->rcv.creq; | 404 | req = server->rcv.creq; |
405 | if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && | 405 | if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && |
406 | server->connection == get_conn_number(&reply)))) { | 406 | server->connection == get_conn_number(&reply)))) { |
@@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s) | |||
430 | server->rcv.creq = NULL; | 430 | server->rcv.creq = NULL; |
431 | ncp_finish_request(req, result); | 431 | ncp_finish_request(req, result); |
432 | __ncp_next_request(server); | 432 | __ncp_next_request(server); |
433 | up(&server->rcv.creq_sem); | 433 | mutex_unlock(&server->rcv.creq_mutex); |
434 | continue; | 434 | continue; |
435 | } | 435 | } |
436 | } | 436 | } |
437 | up(&server->rcv.creq_sem); | 437 | mutex_unlock(&server->rcv.creq_mutex); |
438 | } | 438 | } |
439 | drop:; | 439 | drop:; |
440 | _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); | 440 | _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); |
@@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) | |||
472 | void ncpdgram_timeout_proc(void *s) | 472 | void ncpdgram_timeout_proc(void *s) |
473 | { | 473 | { |
474 | struct ncp_server *server = s; | 474 | struct ncp_server *server = s; |
475 | down(&server->rcv.creq_sem); | 475 | mutex_lock(&server->rcv.creq_mutex); |
476 | __ncpdgram_timeout_proc(server); | 476 | __ncpdgram_timeout_proc(server); |
477 | up(&server->rcv.creq_sem); | 477 | mutex_unlock(&server->rcv.creq_mutex); |
478 | } | 478 | } |
479 | 479 | ||
480 | static inline void ncp_init_req(struct ncp_request_reply* req) | 480 | static inline void ncp_init_req(struct ncp_request_reply* req) |
@@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s) | |||
657 | { | 657 | { |
658 | struct ncp_server *server = s; | 658 | struct ncp_server *server = s; |
659 | 659 | ||
660 | down(&server->rcv.creq_sem); | 660 | mutex_lock(&server->rcv.creq_mutex); |
661 | __ncptcp_rcv_proc(server); | 661 | __ncptcp_rcv_proc(server); |
662 | up(&server->rcv.creq_sem); | 662 | mutex_unlock(&server->rcv.creq_mutex); |
663 | } | 663 | } |
664 | 664 | ||
665 | void ncp_tcp_tx_proc(void *s) | 665 | void ncp_tcp_tx_proc(void *s) |
666 | { | 666 | { |
667 | struct ncp_server *server = s; | 667 | struct ncp_server *server = s; |
668 | 668 | ||
669 | down(&server->rcv.creq_sem); | 669 | mutex_lock(&server->rcv.creq_mutex); |
670 | __ncptcp_try_send(server); | 670 | __ncptcp_try_send(server); |
671 | up(&server->rcv.creq_sem); | 671 | mutex_unlock(&server->rcv.creq_mutex); |
672 | } | 672 | } |
673 | 673 | ||
674 | static int do_ncp_rpc_call(struct ncp_server *server, int size, | 674 | static int do_ncp_rpc_call(struct ncp_server *server, int size, |
@@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server) | |||
833 | 833 | ||
834 | void ncp_lock_server(struct ncp_server *server) | 834 | void ncp_lock_server(struct ncp_server *server) |
835 | { | 835 | { |
836 | down(&server->sem); | 836 | mutex_lock(&server->mutex); |
837 | if (server->lock) | 837 | if (server->lock) |
838 | printk(KERN_WARNING "ncp_lock_server: was locked!\n"); | 838 | printk(KERN_WARNING "ncp_lock_server: was locked!\n"); |
839 | server->lock = 1; | 839 | server->lock = 1; |
@@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server) | |||
846 | return; | 846 | return; |
847 | } | 847 | } |
848 | server->lock = 0; | 848 | server->lock = 0; |
849 | up(&server->sem); | 849 | mutex_unlock(&server->mutex); |
850 | } | 850 | } |
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig index 0ab8f00bdbb2..976ecccd6f56 100644 --- a/fs/nls/Kconfig +++ b/fs/nls/Kconfig | |||
@@ -491,7 +491,7 @@ config NLS_KOI8_U | |||
491 | (koi8-u) and Belarusian (koi8-ru) character sets. | 491 | (koi8-u) and Belarusian (koi8-ru) character sets. |
492 | 492 | ||
493 | config NLS_UTF8 | 493 | config NLS_UTF8 |
494 | tristate "NLS UTF8" | 494 | tristate "NLS UTF-8" |
495 | depends on NLS | 495 | depends on NLS |
496 | help | 496 | help |
497 | If you want to display filenames with native language characters | 497 | If you want to display filenames with native language characters |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 8dd3aafec499..09e1c57a86a0 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -959,7 +959,7 @@ static int ocfs2_initialize_mem_caches(void) | |||
959 | ocfs2_lock_cache = kmem_cache_create("ocfs2_lock", | 959 | ocfs2_lock_cache = kmem_cache_create("ocfs2_lock", |
960 | sizeof(struct ocfs2_journal_lock), | 960 | sizeof(struct ocfs2_journal_lock), |
961 | 0, | 961 | 0, |
962 | SLAB_NO_REAP|SLAB_HWCACHE_ALIGN, | 962 | SLAB_HWCACHE_ALIGN, |
963 | NULL, NULL); | 963 | NULL, NULL); |
964 | if (!ocfs2_lock_cache) | 964 | if (!ocfs2_lock_cache) |
965 | return -ENOMEM; | 965 | return -ENOMEM; |
@@ -973,7 +973,7 @@ repeat: | |||
973 | fdt = files_fdtable(files); | 973 | fdt = files_fdtable(files); |
974 | fd = find_next_zero_bit(fdt->open_fds->fds_bits, | 974 | fd = find_next_zero_bit(fdt->open_fds->fds_bits, |
975 | fdt->max_fdset, | 975 | fdt->max_fdset, |
976 | fdt->next_fd); | 976 | files->next_fd); |
977 | 977 | ||
978 | /* | 978 | /* |
979 | * N.B. For clone tasks sharing a files structure, this test | 979 | * N.B. For clone tasks sharing a files structure, this test |
@@ -998,7 +998,7 @@ repeat: | |||
998 | 998 | ||
999 | FD_SET(fd, fdt->open_fds); | 999 | FD_SET(fd, fdt->open_fds); |
1000 | FD_CLR(fd, fdt->close_on_exec); | 1000 | FD_CLR(fd, fdt->close_on_exec); |
1001 | fdt->next_fd = fd + 1; | 1001 | files->next_fd = fd + 1; |
1002 | #if 1 | 1002 | #if 1 |
1003 | /* Sanity check */ | 1003 | /* Sanity check */ |
1004 | if (fdt->fd[fd] != NULL) { | 1004 | if (fdt->fd[fd] != NULL) { |
@@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) | |||
1019 | { | 1019 | { |
1020 | struct fdtable *fdt = files_fdtable(files); | 1020 | struct fdtable *fdt = files_fdtable(files); |
1021 | __FD_CLR(fd, fdt->open_fds); | 1021 | __FD_CLR(fd, fdt->open_fds); |
1022 | if (fd < fdt->next_fd) | 1022 | if (fd < files->next_fd) |
1023 | fdt->next_fd = fd; | 1023 | files->next_fd = fd; |
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | void fastcall put_unused_fd(unsigned int fd) | 1026 | void fastcall put_unused_fd(unsigned int fd) |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1d24fead51a6..826c131994c3 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) | |||
312 | case BLK_HDR: | 312 | case BLK_HDR: |
313 | info->state = BLK_LIST; | 313 | info->state = BLK_LIST; |
314 | (*pos)++; | 314 | (*pos)++; |
315 | break; | 315 | /*fallthrough*/ |
316 | case BLK_LIST: | 316 | case BLK_LIST: |
317 | if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { | 317 | if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { |
318 | /* | 318 | /* |
diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c index b471315e24ef..c33963fded9e 100644 --- a/fs/qnx4/file.c +++ b/fs/qnx4/file.c | |||
@@ -12,10 +12,7 @@ | |||
12 | * 27-06-1998 by Frank Denis : file overwriting. | 12 | * 27-06-1998 by Frank Denis : file overwriting. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
18 | #include <linux/time.h> | ||
19 | #include <linux/qnx4_fs.h> | 16 | #include <linux/qnx4_fs.h> |
20 | 17 | ||
21 | /* | 18 | /* |
diff --git a/fs/quota.c b/fs/quota.c index ba9e0bf32f67..d6a2be826e29 100644 --- a/fs/quota.c +++ b/fs/quota.c | |||
@@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type) | |||
170 | 170 | ||
171 | /* Now when everything is written we can discard the pagecache so | 171 | /* Now when everything is written we can discard the pagecache so |
172 | * that userspace sees the changes. We need i_mutex and so we could | 172 | * that userspace sees the changes. We need i_mutex and so we could |
173 | * not do it inside dqonoff_sem. Moreover we need to be carefull | 173 | * not do it inside dqonoff_mutex. Moreover we need to be carefull |
174 | * about races with quotaoff() (that is the reason why we have own | 174 | * about races with quotaoff() (that is the reason why we have own |
175 | * reference to inode). */ | 175 | * reference to inode). */ |
176 | down(&sb_dqopt(sb)->dqonoff_sem); | 176 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
177 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 177 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
178 | discard[cnt] = NULL; | 178 | discard[cnt] = NULL; |
179 | if (type != -1 && cnt != type) | 179 | if (type != -1 && cnt != type) |
@@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type) | |||
182 | continue; | 182 | continue; |
183 | discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); | 183 | discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); |
184 | } | 184 | } |
185 | up(&sb_dqopt(sb)->dqonoff_sem); | 185 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
186 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 186 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
187 | if (discard[cnt]) { | 187 | if (discard[cnt]) { |
188 | mutex_lock(&discard[cnt]->i_mutex); | 188 | mutex_lock(&discard[cnt]->i_mutex); |
diff --git a/fs/quota_v2.c b/fs/quota_v2.c index b4199ec3ece4..c519a583e681 100644 --- a/fs/quota_v2.c +++ b/fs/quota_v2.c | |||
@@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot) | |||
394 | ssize_t ret; | 394 | ssize_t ret; |
395 | struct v2_disk_dqblk ddquot, empty; | 395 | struct v2_disk_dqblk ddquot, empty; |
396 | 396 | ||
397 | /* dq_off is guarded by dqio_sem */ | 397 | /* dq_off is guarded by dqio_mutex */ |
398 | if (!dquot->dq_off) | 398 | if (!dquot->dq_off) |
399 | if ((ret = dq_insert_tree(dquot)) < 0) { | 399 | if ((ret = dq_insert_tree(dquot)) < 0) { |
400 | printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); | 400 | printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); |
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 2115383dcc8d..6ada2095b9ac 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c | |||
@@ -24,18 +24,7 @@ | |||
24 | * caches is sufficient. | 24 | * caches is sufficient. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
29 | #include <linux/pagemap.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/smp_lock.h> | ||
34 | #include <linux/backing-dev.h> | ||
35 | #include <linux/ramfs.h> | ||
36 | |||
37 | #include <asm/uaccess.h> | ||
38 | #include "internal.h" | ||
39 | 28 | ||
40 | struct address_space_operations ramfs_aops = { | 29 | struct address_space_operations ramfs_aops = { |
41 | .readpage = simple_readpage, | 30 | .readpage = simple_readpage, |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 3f810acd0bfa..b1ca234068f6 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
87 | xpages = 1UL << order; | 87 | xpages = 1UL << order; |
88 | npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; | 88 | npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; |
89 | 89 | ||
90 | for (loop = 0; loop < npages; loop++) | 90 | split_page(pages, order); |
91 | set_page_count(pages + loop, 1); | ||
92 | 91 | ||
93 | /* trim off any pages we don't actually require */ | 92 | /* trim off any pages we don't actually require */ |
94 | for (loop = npages; loop < xpages; loop++) | 93 | for (loop = npages; loop < xpages; loop++) |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 7c40570b71dc..555b9ac04c25 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op) | |||
37 | file->private_data = p; | 37 | file->private_data = p; |
38 | } | 38 | } |
39 | memset(p, 0, sizeof(*p)); | 39 | memset(p, 0, sizeof(*p)); |
40 | sema_init(&p->sem, 1); | 40 | mutex_init(&p->lock); |
41 | p->op = op; | 41 | p->op = op; |
42 | 42 | ||
43 | /* | 43 | /* |
@@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) | |||
71 | void *p; | 71 | void *p; |
72 | int err = 0; | 72 | int err = 0; |
73 | 73 | ||
74 | down(&m->sem); | 74 | mutex_lock(&m->lock); |
75 | /* | 75 | /* |
76 | * seq_file->op->..m_start/m_stop/m_next may do special actions | 76 | * seq_file->op->..m_start/m_stop/m_next may do special actions |
77 | * or optimisations based on the file->f_version, so we want to | 77 | * or optimisations based on the file->f_version, so we want to |
@@ -164,7 +164,7 @@ Done: | |||
164 | else | 164 | else |
165 | *ppos += copied; | 165 | *ppos += copied; |
166 | file->f_version = m->version; | 166 | file->f_version = m->version; |
167 | up(&m->sem); | 167 | mutex_unlock(&m->lock); |
168 | return copied; | 168 | return copied; |
169 | Enomem: | 169 | Enomem: |
170 | err = -ENOMEM; | 170 | err = -ENOMEM; |
@@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) | |||
237 | struct seq_file *m = (struct seq_file *)file->private_data; | 237 | struct seq_file *m = (struct seq_file *)file->private_data; |
238 | long long retval = -EINVAL; | 238 | long long retval = -EINVAL; |
239 | 239 | ||
240 | down(&m->sem); | 240 | mutex_lock(&m->lock); |
241 | m->version = file->f_version; | 241 | m->version = file->f_version; |
242 | switch (origin) { | 242 | switch (origin) { |
243 | case 1: | 243 | case 1: |
@@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) | |||
260 | } | 260 | } |
261 | } | 261 | } |
262 | } | 262 | } |
263 | up(&m->sem); | 263 | mutex_unlock(&m->lock); |
264 | file->f_version = m->version; | 264 | file->f_version = m->version; |
265 | return retval; | 265 | return retval; |
266 | } | 266 | } |
diff --git a/fs/super.c b/fs/super.c index e20b5580afd5..425861cb1caa 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -76,9 +76,9 @@ static struct super_block *alloc_super(void) | |||
76 | down_write(&s->s_umount); | 76 | down_write(&s->s_umount); |
77 | s->s_count = S_BIAS; | 77 | s->s_count = S_BIAS; |
78 | atomic_set(&s->s_active, 1); | 78 | atomic_set(&s->s_active, 1); |
79 | sema_init(&s->s_vfs_rename_sem,1); | 79 | mutex_init(&s->s_vfs_rename_mutex); |
80 | sema_init(&s->s_dquot.dqio_sem, 1); | 80 | mutex_init(&s->s_dquot.dqio_mutex); |
81 | sema_init(&s->s_dquot.dqonoff_sem, 1); | 81 | mutex_init(&s->s_dquot.dqonoff_mutex); |
82 | init_rwsem(&s->s_dquot.dqptr_sem); | 82 | init_rwsem(&s->s_dquot.dqptr_sem); |
83 | init_waitqueue_head(&s->s_wait_unfrozen); | 83 | init_waitqueue_head(&s->s_wait_unfrozen); |
84 | s->s_maxbytes = MAX_NON_LFS; | 84 | s->s_maxbytes = MAX_NON_LFS; |
@@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type, | |||
693 | * will protect the lockfs code from trying to start a snapshot | 693 | * will protect the lockfs code from trying to start a snapshot |
694 | * while we are mounting | 694 | * while we are mounting |
695 | */ | 695 | */ |
696 | down(&bdev->bd_mount_sem); | 696 | mutex_lock(&bdev->bd_mount_mutex); |
697 | s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); | 697 | s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); |
698 | up(&bdev->bd_mount_sem); | 698 | mutex_unlock(&bdev->bd_mount_mutex); |
699 | if (IS_ERR(s)) | 699 | if (IS_ERR(s)) |
700 | goto out; | 700 | goto out; |
701 | 701 | ||
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 7f0e4b53085e..b8a73f716fbe 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c | |||
@@ -16,18 +16,6 @@ | |||
16 | #include <linux/smp_lock.h> | 16 | #include <linux/smp_lock.h> |
17 | #include "sysv.h" | 17 | #include "sysv.h" |
18 | 18 | ||
19 | static inline void inc_count(struct inode *inode) | ||
20 | { | ||
21 | inode->i_nlink++; | ||
22 | mark_inode_dirty(inode); | ||
23 | } | ||
24 | |||
25 | static inline void dec_count(struct inode *inode) | ||
26 | { | ||
27 | inode->i_nlink--; | ||
28 | mark_inode_dirty(inode); | ||
29 | } | ||
30 | |||
31 | static int add_nondir(struct dentry *dentry, struct inode *inode) | 19 | static int add_nondir(struct dentry *dentry, struct inode *inode) |
32 | { | 20 | { |
33 | int err = sysv_add_link(dentry, inode); | 21 | int err = sysv_add_link(dentry, inode); |
@@ -35,7 +23,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) | |||
35 | d_instantiate(dentry, inode); | 23 | d_instantiate(dentry, inode); |
36 | return 0; | 24 | return 0; |
37 | } | 25 | } |
38 | dec_count(inode); | 26 | inode_dec_link_count(inode); |
39 | iput(inode); | 27 | iput(inode); |
40 | return err; | 28 | return err; |
41 | } | 29 | } |
@@ -124,7 +112,7 @@ out: | |||
124 | return err; | 112 | return err; |
125 | 113 | ||
126 | out_fail: | 114 | out_fail: |
127 | dec_count(inode); | 115 | inode_dec_link_count(inode); |
128 | iput(inode); | 116 | iput(inode); |
129 | goto out; | 117 | goto out; |
130 | } | 118 | } |
@@ -138,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, | |||
138 | return -EMLINK; | 126 | return -EMLINK; |
139 | 127 | ||
140 | inode->i_ctime = CURRENT_TIME_SEC; | 128 | inode->i_ctime = CURRENT_TIME_SEC; |
141 | inc_count(inode); | 129 | inode_inc_link_count(inode); |
142 | atomic_inc(&inode->i_count); | 130 | atomic_inc(&inode->i_count); |
143 | 131 | ||
144 | return add_nondir(dentry, inode); | 132 | return add_nondir(dentry, inode); |
@@ -151,7 +139,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) | |||
151 | 139 | ||
152 | if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) | 140 | if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) |
153 | goto out; | 141 | goto out; |
154 | inc_count(dir); | 142 | inode_inc_link_count(dir); |
155 | 143 | ||
156 | inode = sysv_new_inode(dir, S_IFDIR|mode); | 144 | inode = sysv_new_inode(dir, S_IFDIR|mode); |
157 | err = PTR_ERR(inode); | 145 | err = PTR_ERR(inode); |
@@ -160,7 +148,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) | |||
160 | 148 | ||
161 | sysv_set_inode(inode, 0); | 149 | sysv_set_inode(inode, 0); |
162 | 150 | ||
163 | inc_count(inode); | 151 | inode_inc_link_count(inode); |
164 | 152 | ||
165 | err = sysv_make_empty(inode, dir); | 153 | err = sysv_make_empty(inode, dir); |
166 | if (err) | 154 | if (err) |
@@ -175,11 +163,11 @@ out: | |||
175 | return err; | 163 | return err; |
176 | 164 | ||
177 | out_fail: | 165 | out_fail: |
178 | dec_count(inode); | 166 | inode_dec_link_count(inode); |
179 | dec_count(inode); | 167 | inode_dec_link_count(inode); |
180 | iput(inode); | 168 | iput(inode); |
181 | out_dir: | 169 | out_dir: |
182 | dec_count(dir); | 170 | inode_dec_link_count(dir); |
183 | goto out; | 171 | goto out; |
184 | } | 172 | } |
185 | 173 | ||
@@ -199,7 +187,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry) | |||
199 | goto out; | 187 | goto out; |
200 | 188 | ||
201 | inode->i_ctime = dir->i_ctime; | 189 | inode->i_ctime = dir->i_ctime; |
202 | dec_count(inode); | 190 | inode_dec_link_count(inode); |
203 | out: | 191 | out: |
204 | return err; | 192 | return err; |
205 | } | 193 | } |
@@ -213,8 +201,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) | |||
213 | err = sysv_unlink(dir, dentry); | 201 | err = sysv_unlink(dir, dentry); |
214 | if (!err) { | 202 | if (!err) { |
215 | inode->i_size = 0; | 203 | inode->i_size = 0; |
216 | dec_count(inode); | 204 | inode_dec_link_count(inode); |
217 | dec_count(dir); | 205 | inode_dec_link_count(dir); |
218 | } | 206 | } |
219 | } | 207 | } |
220 | return err; | 208 | return err; |
@@ -258,34 +246,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, | |||
258 | new_de = sysv_find_entry(new_dentry, &new_page); | 246 | new_de = sysv_find_entry(new_dentry, &new_page); |
259 | if (!new_de) | 247 | if (!new_de) |
260 | goto out_dir; | 248 | goto out_dir; |
261 | inc_count(old_inode); | 249 | inode_inc_link_count(old_inode); |
262 | sysv_set_link(new_de, new_page, old_inode); | 250 | sysv_set_link(new_de, new_page, old_inode); |
263 | new_inode->i_ctime = CURRENT_TIME_SEC; | 251 | new_inode->i_ctime = CURRENT_TIME_SEC; |
264 | if (dir_de) | 252 | if (dir_de) |
265 | new_inode->i_nlink--; | 253 | new_inode->i_nlink--; |
266 | dec_count(new_inode); | 254 | inode_dec_link_count(new_inode); |
267 | } else { | 255 | } else { |
268 | if (dir_de) { | 256 | if (dir_de) { |
269 | err = -EMLINK; | 257 | err = -EMLINK; |
270 | if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) | 258 | if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) |
271 | goto out_dir; | 259 | goto out_dir; |
272 | } | 260 | } |
273 | inc_count(old_inode); | 261 | inode_inc_link_count(old_inode); |
274 | err = sysv_add_link(new_dentry, old_inode); | 262 | err = sysv_add_link(new_dentry, old_inode); |
275 | if (err) { | 263 | if (err) { |
276 | dec_count(old_inode); | 264 | inode_dec_link_count(old_inode); |
277 | goto out_dir; | 265 | goto out_dir; |
278 | } | 266 | } |
279 | if (dir_de) | 267 | if (dir_de) |
280 | inc_count(new_dir); | 268 | inode_inc_link_count(new_dir); |
281 | } | 269 | } |
282 | 270 | ||
283 | sysv_delete_entry(old_de, old_page); | 271 | sysv_delete_entry(old_de, old_page); |
284 | dec_count(old_inode); | 272 | inode_dec_link_count(old_inode); |
285 | 273 | ||
286 | if (dir_de) { | 274 | if (dir_de) { |
287 | sysv_set_link(dir_de, dir_page, new_dir); | 275 | sysv_set_link(dir_de, dir_page, new_dir); |
288 | dec_count(old_dir); | 276 | inode_dec_link_count(old_dir); |
289 | } | 277 | } |
290 | return 0; | 278 | return 0; |
291 | 279 | ||
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 201049ac8a96..ea521f846d97 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
@@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb, | |||
152 | int bitmap_nr; | 152 | int bitmap_nr; |
153 | unsigned long overflow; | 153 | unsigned long overflow; |
154 | 154 | ||
155 | down(&sbi->s_alloc_sem); | 155 | mutex_lock(&sbi->s_alloc_mutex); |
156 | if (bloc.logicalBlockNum < 0 || | 156 | if (bloc.logicalBlockNum < 0 || |
157 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) | 157 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) |
158 | { | 158 | { |
@@ -211,7 +211,7 @@ error_return: | |||
211 | sb->s_dirt = 1; | 211 | sb->s_dirt = 1; |
212 | if (UDF_SB_LVIDBH(sb)) | 212 | if (UDF_SB_LVIDBH(sb)) |
213 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 213 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
214 | up(&sbi->s_alloc_sem); | 214 | mutex_unlock(&sbi->s_alloc_mutex); |
215 | return; | 215 | return; |
216 | } | 216 | } |
217 | 217 | ||
@@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, | |||
226 | int nr_groups, bitmap_nr; | 226 | int nr_groups, bitmap_nr; |
227 | struct buffer_head *bh; | 227 | struct buffer_head *bh; |
228 | 228 | ||
229 | down(&sbi->s_alloc_sem); | 229 | mutex_lock(&sbi->s_alloc_mutex); |
230 | if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) | 230 | if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) |
231 | goto out; | 231 | goto out; |
232 | 232 | ||
@@ -275,7 +275,7 @@ out: | |||
275 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 275 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
276 | } | 276 | } |
277 | sb->s_dirt = 1; | 277 | sb->s_dirt = 1; |
278 | up(&sbi->s_alloc_sem); | 278 | mutex_unlock(&sbi->s_alloc_mutex); |
279 | return alloc_count; | 279 | return alloc_count; |
280 | } | 280 | } |
281 | 281 | ||
@@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb, | |||
291 | int newblock = 0; | 291 | int newblock = 0; |
292 | 292 | ||
293 | *err = -ENOSPC; | 293 | *err = -ENOSPC; |
294 | down(&sbi->s_alloc_sem); | 294 | mutex_lock(&sbi->s_alloc_mutex); |
295 | 295 | ||
296 | repeat: | 296 | repeat: |
297 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) | 297 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) |
@@ -364,7 +364,7 @@ repeat: | |||
364 | } | 364 | } |
365 | if (i >= (nr_groups*2)) | 365 | if (i >= (nr_groups*2)) |
366 | { | 366 | { |
367 | up(&sbi->s_alloc_sem); | 367 | mutex_unlock(&sbi->s_alloc_mutex); |
368 | return newblock; | 368 | return newblock; |
369 | } | 369 | } |
370 | if (bit < sb->s_blocksize << 3) | 370 | if (bit < sb->s_blocksize << 3) |
@@ -373,7 +373,7 @@ repeat: | |||
373 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); | 373 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); |
374 | if (bit >= sb->s_blocksize << 3) | 374 | if (bit >= sb->s_blocksize << 3) |
375 | { | 375 | { |
376 | up(&sbi->s_alloc_sem); | 376 | mutex_unlock(&sbi->s_alloc_mutex); |
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
@@ -387,7 +387,7 @@ got_block: | |||
387 | */ | 387 | */ |
388 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) | 388 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) |
389 | { | 389 | { |
390 | up(&sbi->s_alloc_sem); | 390 | mutex_unlock(&sbi->s_alloc_mutex); |
391 | *err = -EDQUOT; | 391 | *err = -EDQUOT; |
392 | return 0; | 392 | return 0; |
393 | } | 393 | } |
@@ -410,13 +410,13 @@ got_block: | |||
410 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 410 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
411 | } | 411 | } |
412 | sb->s_dirt = 1; | 412 | sb->s_dirt = 1; |
413 | up(&sbi->s_alloc_sem); | 413 | mutex_unlock(&sbi->s_alloc_mutex); |
414 | *err = 0; | 414 | *err = 0; |
415 | return newblock; | 415 | return newblock; |
416 | 416 | ||
417 | error_return: | 417 | error_return: |
418 | *err = -EIO; | 418 | *err = -EIO; |
419 | up(&sbi->s_alloc_sem); | 419 | mutex_unlock(&sbi->s_alloc_mutex); |
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
@@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
433 | int8_t etype; | 433 | int8_t etype; |
434 | int i; | 434 | int i; |
435 | 435 | ||
436 | down(&sbi->s_alloc_sem); | 436 | mutex_lock(&sbi->s_alloc_mutex); |
437 | if (bloc.logicalBlockNum < 0 || | 437 | if (bloc.logicalBlockNum < 0 || |
438 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) | 438 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) |
439 | { | 439 | { |
@@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
666 | 666 | ||
667 | error_return: | 667 | error_return: |
668 | sb->s_dirt = 1; | 668 | sb->s_dirt = 1; |
669 | up(&sbi->s_alloc_sem); | 669 | mutex_unlock(&sbi->s_alloc_mutex); |
670 | return; | 670 | return; |
671 | } | 671 | } |
672 | 672 | ||
@@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, | |||
692 | else | 692 | else |
693 | return 0; | 693 | return 0; |
694 | 694 | ||
695 | down(&sbi->s_alloc_sem); | 695 | mutex_lock(&sbi->s_alloc_mutex); |
696 | extoffset = sizeof(struct unallocSpaceEntry); | 696 | extoffset = sizeof(struct unallocSpaceEntry); |
697 | bloc = UDF_I_LOCATION(table); | 697 | bloc = UDF_I_LOCATION(table); |
698 | 698 | ||
@@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, | |||
736 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 736 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
737 | sb->s_dirt = 1; | 737 | sb->s_dirt = 1; |
738 | } | 738 | } |
739 | up(&sbi->s_alloc_sem); | 739 | mutex_unlock(&sbi->s_alloc_mutex); |
740 | return alloc_count; | 740 | return alloc_count; |
741 | } | 741 | } |
742 | 742 | ||
@@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb, | |||
761 | else | 761 | else |
762 | return newblock; | 762 | return newblock; |
763 | 763 | ||
764 | down(&sbi->s_alloc_sem); | 764 | mutex_lock(&sbi->s_alloc_mutex); |
765 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) | 765 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) |
766 | goal = 0; | 766 | goal = 0; |
767 | 767 | ||
@@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb, | |||
811 | if (spread == 0xFFFFFFFF) | 811 | if (spread == 0xFFFFFFFF) |
812 | { | 812 | { |
813 | udf_release_data(goal_bh); | 813 | udf_release_data(goal_bh); |
814 | up(&sbi->s_alloc_sem); | 814 | mutex_unlock(&sbi->s_alloc_mutex); |
815 | return 0; | 815 | return 0; |
816 | } | 816 | } |
817 | 817 | ||
@@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb, | |||
827 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) | 827 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) |
828 | { | 828 | { |
829 | udf_release_data(goal_bh); | 829 | udf_release_data(goal_bh); |
830 | up(&sbi->s_alloc_sem); | 830 | mutex_unlock(&sbi->s_alloc_mutex); |
831 | *err = -EDQUOT; | 831 | *err = -EDQUOT; |
832 | return 0; | 832 | return 0; |
833 | } | 833 | } |
@@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb, | |||
846 | } | 846 | } |
847 | 847 | ||
848 | sb->s_dirt = 1; | 848 | sb->s_dirt = 1; |
849 | up(&sbi->s_alloc_sem); | 849 | mutex_unlock(&sbi->s_alloc_mutex); |
850 | *err = 0; | 850 | *err = 0; |
851 | return newblock; | 851 | return newblock; |
852 | } | 852 | } |
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index c9b707b470ca..3873c672cb4c 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c | |||
@@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode) | |||
42 | 42 | ||
43 | clear_inode(inode); | 43 | clear_inode(inode); |
44 | 44 | ||
45 | down(&sbi->s_alloc_sem); | 45 | mutex_lock(&sbi->s_alloc_mutex); |
46 | if (sbi->s_lvidbh) { | 46 | if (sbi->s_lvidbh) { |
47 | if (S_ISDIR(inode->i_mode)) | 47 | if (S_ISDIR(inode->i_mode)) |
48 | UDF_SB_LVIDIU(sb)->numDirs = | 48 | UDF_SB_LVIDIU(sb)->numDirs = |
@@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode) | |||
53 | 53 | ||
54 | mark_buffer_dirty(sbi->s_lvidbh); | 54 | mark_buffer_dirty(sbi->s_lvidbh); |
55 | } | 55 | } |
56 | up(&sbi->s_alloc_sem); | 56 | mutex_unlock(&sbi->s_alloc_mutex); |
57 | 57 | ||
58 | udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); | 58 | udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); |
59 | } | 59 | } |
@@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) | |||
83 | return NULL; | 83 | return NULL; |
84 | } | 84 | } |
85 | 85 | ||
86 | down(&sbi->s_alloc_sem); | 86 | mutex_lock(&sbi->s_alloc_mutex); |
87 | UDF_I_UNIQUE(inode) = 0; | 87 | UDF_I_UNIQUE(inode) = 0; |
88 | UDF_I_LENEXTENTS(inode) = 0; | 88 | UDF_I_LENEXTENTS(inode) = 0; |
89 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | 89 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; |
@@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) | |||
148 | UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); | 148 | UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); |
149 | insert_inode_hash(inode); | 149 | insert_inode_hash(inode); |
150 | mark_inode_dirty(inode); | 150 | mark_inode_dirty(inode); |
151 | up(&sbi->s_alloc_sem); | 151 | mutex_unlock(&sbi->s_alloc_mutex); |
152 | 152 | ||
153 | if (DQUOT_ALLOC_INODE(inode)) | 153 | if (DQUOT_ALLOC_INODE(inode)) |
154 | { | 154 | { |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 368d8f81fe54..9303c50c5d55 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1515 | sb->s_fs_info = sbi; | 1515 | sb->s_fs_info = sbi; |
1516 | memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); | 1516 | memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); |
1517 | 1517 | ||
1518 | init_MUTEX(&sbi->s_alloc_sem); | 1518 | mutex_init(&sbi->s_alloc_mutex); |
1519 | 1519 | ||
1520 | if (!udf_parse_options((char *)options, &uopt)) | 1520 | if (!udf_parse_options((char *)options, &uopt)) |
1521 | goto error_out; | 1521 | goto error_out; |
diff --git a/fs/ufs/file.c b/fs/ufs/file.c index ed69d7fe1b5d..62ad481810ef 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c | |||
@@ -23,18 +23,8 @@ | |||
23 | * ext2 fs regular file handling primitives | 23 | * ext2 fs regular file handling primitives |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/system.h> | ||
28 | |||
29 | #include <linux/errno.h> | ||
30 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
31 | #include <linux/ufs_fs.h> | 27 | #include <linux/ufs_fs.h> |
32 | #include <linux/fcntl.h> | ||
33 | #include <linux/time.h> | ||
34 | #include <linux/stat.h> | ||
35 | #include <linux/mm.h> | ||
36 | #include <linux/pagemap.h> | ||
37 | #include <linux/smp_lock.h> | ||
38 | 28 | ||
39 | /* | 29 | /* |
40 | * We have mostly NULL's here: the current defaults are ok for | 30 | * We have mostly NULL's here: the current defaults are ok for |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 2958cde7d3d6..8d5f98a01c74 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -43,18 +43,6 @@ | |||
43 | #define UFSD(x) | 43 | #define UFSD(x) |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | static inline void ufs_inc_count(struct inode *inode) | ||
47 | { | ||
48 | inode->i_nlink++; | ||
49 | mark_inode_dirty(inode); | ||
50 | } | ||
51 | |||
52 | static inline void ufs_dec_count(struct inode *inode) | ||
53 | { | ||
54 | inode->i_nlink--; | ||
55 | mark_inode_dirty(inode); | ||
56 | } | ||
57 | |||
58 | static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) | 46 | static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) |
59 | { | 47 | { |
60 | int err = ufs_add_link(dentry, inode); | 48 | int err = ufs_add_link(dentry, inode); |
@@ -62,7 +50,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) | |||
62 | d_instantiate(dentry, inode); | 50 | d_instantiate(dentry, inode); |
63 | return 0; | 51 | return 0; |
64 | } | 52 | } |
65 | ufs_dec_count(inode); | 53 | inode_dec_link_count(inode); |
66 | iput(inode); | 54 | iput(inode); |
67 | return err; | 55 | return err; |
68 | } | 56 | } |
@@ -173,7 +161,7 @@ out: | |||
173 | return err; | 161 | return err; |
174 | 162 | ||
175 | out_fail: | 163 | out_fail: |
176 | ufs_dec_count(inode); | 164 | inode_dec_link_count(inode); |
177 | iput(inode); | 165 | iput(inode); |
178 | goto out; | 166 | goto out; |
179 | } | 167 | } |
@@ -191,7 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, | |||
191 | } | 179 | } |
192 | 180 | ||
193 | inode->i_ctime = CURRENT_TIME_SEC; | 181 | inode->i_ctime = CURRENT_TIME_SEC; |
194 | ufs_inc_count(inode); | 182 | inode_inc_link_count(inode); |
195 | atomic_inc(&inode->i_count); | 183 | atomic_inc(&inode->i_count); |
196 | 184 | ||
197 | error = ufs_add_nondir(dentry, inode); | 185 | error = ufs_add_nondir(dentry, inode); |
@@ -208,7 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
208 | goto out; | 196 | goto out; |
209 | 197 | ||
210 | lock_kernel(); | 198 | lock_kernel(); |
211 | ufs_inc_count(dir); | 199 | inode_inc_link_count(dir); |
212 | 200 | ||
213 | inode = ufs_new_inode(dir, S_IFDIR|mode); | 201 | inode = ufs_new_inode(dir, S_IFDIR|mode); |
214 | err = PTR_ERR(inode); | 202 | err = PTR_ERR(inode); |
@@ -218,7 +206,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
218 | inode->i_op = &ufs_dir_inode_operations; | 206 | inode->i_op = &ufs_dir_inode_operations; |
219 | inode->i_fop = &ufs_dir_operations; | 207 | inode->i_fop = &ufs_dir_operations; |
220 | 208 | ||
221 | ufs_inc_count(inode); | 209 | inode_inc_link_count(inode); |
222 | 210 | ||
223 | err = ufs_make_empty(inode, dir); | 211 | err = ufs_make_empty(inode, dir); |
224 | if (err) | 212 | if (err) |
@@ -234,11 +222,11 @@ out: | |||
234 | return err; | 222 | return err; |
235 | 223 | ||
236 | out_fail: | 224 | out_fail: |
237 | ufs_dec_count(inode); | 225 | inode_dec_link_count(inode); |
238 | ufs_dec_count(inode); | 226 | inode_dec_link_count(inode); |
239 | iput (inode); | 227 | iput (inode); |
240 | out_dir: | 228 | out_dir: |
241 | ufs_dec_count(dir); | 229 | inode_dec_link_count(dir); |
242 | unlock_kernel(); | 230 | unlock_kernel(); |
243 | goto out; | 231 | goto out; |
244 | } | 232 | } |
@@ -260,7 +248,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry) | |||
260 | goto out; | 248 | goto out; |
261 | 249 | ||
262 | inode->i_ctime = dir->i_ctime; | 250 | inode->i_ctime = dir->i_ctime; |
263 | ufs_dec_count(inode); | 251 | inode_dec_link_count(inode); |
264 | err = 0; | 252 | err = 0; |
265 | out: | 253 | out: |
266 | unlock_kernel(); | 254 | unlock_kernel(); |
@@ -277,8 +265,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) | |||
277 | err = ufs_unlink(dir, dentry); | 265 | err = ufs_unlink(dir, dentry); |
278 | if (!err) { | 266 | if (!err) { |
279 | inode->i_size = 0; | 267 | inode->i_size = 0; |
280 | ufs_dec_count(inode); | 268 | inode_dec_link_count(inode); |
281 | ufs_dec_count(dir); | 269 | inode_dec_link_count(dir); |
282 | } | 270 | } |
283 | } | 271 | } |
284 | unlock_kernel(); | 272 | unlock_kernel(); |
@@ -319,35 +307,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
319 | new_de = ufs_find_entry (new_dentry, &new_bh); | 307 | new_de = ufs_find_entry (new_dentry, &new_bh); |
320 | if (!new_de) | 308 | if (!new_de) |
321 | goto out_dir; | 309 | goto out_dir; |
322 | ufs_inc_count(old_inode); | 310 | inode_inc_link_count(old_inode); |
323 | ufs_set_link(new_dir, new_de, new_bh, old_inode); | 311 | ufs_set_link(new_dir, new_de, new_bh, old_inode); |
324 | new_inode->i_ctime = CURRENT_TIME_SEC; | 312 | new_inode->i_ctime = CURRENT_TIME_SEC; |
325 | if (dir_de) | 313 | if (dir_de) |
326 | new_inode->i_nlink--; | 314 | new_inode->i_nlink--; |
327 | ufs_dec_count(new_inode); | 315 | inode_dec_link_count(new_inode); |
328 | } else { | 316 | } else { |
329 | if (dir_de) { | 317 | if (dir_de) { |
330 | err = -EMLINK; | 318 | err = -EMLINK; |
331 | if (new_dir->i_nlink >= UFS_LINK_MAX) | 319 | if (new_dir->i_nlink >= UFS_LINK_MAX) |
332 | goto out_dir; | 320 | goto out_dir; |
333 | } | 321 | } |
334 | ufs_inc_count(old_inode); | 322 | inode_inc_link_count(old_inode); |
335 | err = ufs_add_link(new_dentry, old_inode); | 323 | err = ufs_add_link(new_dentry, old_inode); |
336 | if (err) { | 324 | if (err) { |
337 | ufs_dec_count(old_inode); | 325 | inode_dec_link_count(old_inode); |
338 | goto out_dir; | 326 | goto out_dir; |
339 | } | 327 | } |
340 | if (dir_de) | 328 | if (dir_de) |
341 | ufs_inc_count(new_dir); | 329 | inode_inc_link_count(new_dir); |
342 | } | 330 | } |
343 | 331 | ||
344 | ufs_delete_entry (old_dir, old_de, old_bh); | 332 | ufs_delete_entry (old_dir, old_de, old_bh); |
345 | 333 | ||
346 | ufs_dec_count(old_inode); | 334 | inode_dec_link_count(old_inode); |
347 | 335 | ||
348 | if (dir_de) { | 336 | if (dir_de) { |
349 | ufs_set_link(old_inode, dir_de, dir_bh, new_dir); | 337 | ufs_set_link(old_inode, dir_de, dir_bh, new_dir); |
350 | ufs_dec_count(old_dir); | 338 | inode_dec_link_count(old_dir); |
351 | } | 339 | } |
352 | unlock_kernel(); | 340 | unlock_kernel(); |
353 | return 0; | 341 | return 0; |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index cdb905ab4dba..9fb0312665ca 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/blkdev.h> | 29 | #include <linux/blkdev.h> |
30 | #include <linux/hash.h> | 30 | #include <linux/hash.h> |
31 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
32 | #include <linux/migrate.h> | ||
32 | #include "xfs_linux.h" | 33 | #include "xfs_linux.h" |
33 | 34 | ||
34 | STATIC kmem_zone_t *xfs_buf_zone; | 35 | STATIC kmem_zone_t *xfs_buf_zone; |
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 8955720a2c6b..713e6a7505d0 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c | |||
@@ -62,18 +62,15 @@ xfs_read_xfsstats( | |||
62 | while (j < xstats[i].endpoint) { | 62 | while (j < xstats[i].endpoint) { |
63 | val = 0; | 63 | val = 0; |
64 | /* sum over all cpus */ | 64 | /* sum over all cpus */ |
65 | for (c = 0; c < NR_CPUS; c++) { | 65 | for_each_cpu(c) |
66 | if (!cpu_possible(c)) continue; | ||
67 | val += *(((__u32*)&per_cpu(xfsstats, c) + j)); | 66 | val += *(((__u32*)&per_cpu(xfsstats, c) + j)); |
68 | } | ||
69 | len += sprintf(buffer + len, " %u", val); | 67 | len += sprintf(buffer + len, " %u", val); |
70 | j++; | 68 | j++; |
71 | } | 69 | } |
72 | buffer[len++] = '\n'; | 70 | buffer[len++] = '\n'; |
73 | } | 71 | } |
74 | /* extra precision counters */ | 72 | /* extra precision counters */ |
75 | for (i = 0; i < NR_CPUS; i++) { | 73 | for_each_cpu(i) { |
76 | if (!cpu_possible(i)) continue; | ||
77 | xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; | 74 | xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; |
78 | xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; | 75 | xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; |
79 | xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; | 76 | xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; |
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index a02564972420..7079cc837210 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c | |||
@@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler( | |||
38 | 38 | ||
39 | if (!ret && write && *valp) { | 39 | if (!ret && write && *valp) { |
40 | printk("XFS Clearing xfsstats\n"); | 40 | printk("XFS Clearing xfsstats\n"); |
41 | for (c = 0; c < NR_CPUS; c++) { | 41 | for_each_cpu(c) { |
42 | if (!cpu_possible(c)) continue; | ||
43 | preempt_disable(); | 42 | preempt_disable(); |
44 | /* save vn_active, it's a universal truth! */ | 43 | /* save vn_active, it's a universal truth! */ |
45 | vn_active = per_cpu(xfsstats, c).vn_active; | 44 | vn_active = per_cpu(xfsstats, c).vn_active; |