aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/bmap.c7
-rw-r--r--fs/gfs2/file.c10
-rw-r--r--fs/gfs2/glock.c83
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h41
-rw-r--r--fs/gfs2/inode.c44
-rw-r--r--fs/gfs2/main.c19
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c321
-rw-r--r--fs/gfs2/quota.h9
-rw-r--r--fs/gfs2/rgrp.c208
-rw-r--r--fs/gfs2/rgrp.h4
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c20
-rw-r--r--fs/gfs2/util.h2
-rw-r--r--fs/gfs2/xattr.c3
-rw-r--r--include/linux/lockref.h6
-rw-r--r--lib/lockref.c1
21 files changed, 455 insertions, 339 deletions
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 1f7d8057ea68..b7fc035a6943 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -611,12 +611,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
611 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 611 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
612 612
613 if (alloc_required) { 613 if (alloc_required) {
614 struct gfs2_alloc_parms ap = { .aflags = 0, };
614 error = gfs2_quota_lock_check(ip); 615 error = gfs2_quota_lock_check(ip);
615 if (error) 616 if (error)
616 goto out_unlock; 617 goto out_unlock;
617 618
618 requested = data_blocks + ind_blocks; 619 requested = data_blocks + ind_blocks;
619 error = gfs2_inplace_reserve(ip, requested, 0); 620 ap.target = requested;
621 error = gfs2_inplace_reserve(ip, &ap);
620 if (error) 622 if (error)
621 goto out_qunlock; 623 goto out_qunlock;
622 } 624 }
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 62a65fc448dc..fe0500c0af7a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1216,6 +1216,7 @@ static int do_grow(struct inode *inode, u64 size)
1216{ 1216{
1217 struct gfs2_inode *ip = GFS2_I(inode); 1217 struct gfs2_inode *ip = GFS2_I(inode);
1218 struct gfs2_sbd *sdp = GFS2_SB(inode); 1218 struct gfs2_sbd *sdp = GFS2_SB(inode);
1219 struct gfs2_alloc_parms ap = { .target = 1, };
1219 struct buffer_head *dibh; 1220 struct buffer_head *dibh;
1220 int error; 1221 int error;
1221 int unstuff = 0; 1222 int unstuff = 0;
@@ -1226,7 +1227,7 @@ static int do_grow(struct inode *inode, u64 size)
1226 if (error) 1227 if (error)
1227 return error; 1228 return error;
1228 1229
1229 error = gfs2_inplace_reserve(ip, 1, 0); 1230 error = gfs2_inplace_reserve(ip, &ap);
1230 if (error) 1231 if (error)
1231 goto do_grow_qunlock; 1232 goto do_grow_qunlock;
1232 unstuff = 1; 1233 unstuff = 1;
@@ -1279,6 +1280,7 @@ do_grow_qunlock:
1279 1280
1280int gfs2_setattr_size(struct inode *inode, u64 newsize) 1281int gfs2_setattr_size(struct inode *inode, u64 newsize)
1281{ 1282{
1283 struct gfs2_inode *ip = GFS2_I(inode);
1282 int ret; 1284 int ret;
1283 u64 oldsize; 1285 u64 oldsize;
1284 1286
@@ -1294,7 +1296,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
1294 1296
1295 inode_dio_wait(inode); 1297 inode_dio_wait(inode);
1296 1298
1297 ret = gfs2_rs_alloc(GFS2_I(inode)); 1299 ret = gfs2_rs_alloc(ip);
1298 if (ret) 1300 if (ret)
1299 goto out; 1301 goto out;
1300 1302
@@ -1304,6 +1306,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
1304 goto out; 1306 goto out;
1305 } 1307 }
1306 1308
1309 gfs2_rs_deltree(ip->i_res);
1307 ret = do_shrink(inode, oldsize, newsize); 1310 ret = do_shrink(inode, oldsize, newsize);
1308out: 1311out:
1309 put_write_access(inode); 1312 put_write_access(inode);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 0621b46d474d..efc078f0ee4e 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -383,6 +383,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
383 struct inode *inode = file_inode(vma->vm_file); 383 struct inode *inode = file_inode(vma->vm_file);
384 struct gfs2_inode *ip = GFS2_I(inode); 384 struct gfs2_inode *ip = GFS2_I(inode);
385 struct gfs2_sbd *sdp = GFS2_SB(inode); 385 struct gfs2_sbd *sdp = GFS2_SB(inode);
386 struct gfs2_alloc_parms ap = { .aflags = 0, };
386 unsigned long last_index; 387 unsigned long last_index;
387 u64 pos = page->index << PAGE_CACHE_SHIFT; 388 u64 pos = page->index << PAGE_CACHE_SHIFT;
388 unsigned int data_blocks, ind_blocks, rblocks; 389 unsigned int data_blocks, ind_blocks, rblocks;
@@ -430,7 +431,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
430 if (ret) 431 if (ret)
431 goto out_unlock; 432 goto out_unlock;
432 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 433 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
433 ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0); 434 ap.target = data_blocks + ind_blocks;
435 ret = gfs2_inplace_reserve(ip, &ap);
434 if (ret) 436 if (ret)
435 goto out_quota_unlock; 437 goto out_quota_unlock;
436 438
@@ -620,7 +622,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
620 if (!(file->f_mode & FMODE_WRITE)) 622 if (!(file->f_mode & FMODE_WRITE))
621 return 0; 623 return 0;
622 624
623 gfs2_rs_delete(ip); 625 gfs2_rs_delete(ip, &inode->i_writecount);
624 return 0; 626 return 0;
625} 627}
626 628
@@ -800,6 +802,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
800 struct inode *inode = file_inode(file); 802 struct inode *inode = file_inode(file);
801 struct gfs2_sbd *sdp = GFS2_SB(inode); 803 struct gfs2_sbd *sdp = GFS2_SB(inode);
802 struct gfs2_inode *ip = GFS2_I(inode); 804 struct gfs2_inode *ip = GFS2_I(inode);
805 struct gfs2_alloc_parms ap = { .aflags = 0, };
803 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 806 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
804 loff_t bytes, max_bytes; 807 loff_t bytes, max_bytes;
805 int error; 808 int error;
@@ -850,7 +853,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
850retry: 853retry:
851 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); 854 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
852 855
853 error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0); 856 ap.target = data_blocks + ind_blocks;
857 error = gfs2_inplace_reserve(ip, &ap);
854 if (error) { 858 if (error) {
855 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { 859 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
856 bytes >>= 1; 860 bytes >>= 1;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c2f41b4d00b9..e66a8009aff1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -31,6 +31,7 @@
31#include <linux/bit_spinlock.h> 31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <linux/list_sort.h> 33#include <linux/list_sort.h>
34#include <linux/lockref.h>
34 35
35#include "gfs2.h" 36#include "gfs2.h"
36#include "incore.h" 37#include "incore.h"
@@ -129,10 +130,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
129 * 130 *
130 */ 131 */
131 132
132void gfs2_glock_hold(struct gfs2_glock *gl) 133static void gfs2_glock_hold(struct gfs2_glock *gl)
133{ 134{
134 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 135 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
135 atomic_inc(&gl->gl_ref); 136 lockref_get(&gl->gl_lockref);
136} 137}
137 138
138/** 139/**
@@ -187,20 +188,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
187} 188}
188 189
189/** 190/**
190 * gfs2_glock_put_nolock() - Decrement reference count on glock
191 * @gl: The glock to put
192 *
193 * This function should only be used if the caller has its own reference
194 * to the glock, in addition to the one it is dropping.
195 */
196
197void gfs2_glock_put_nolock(struct gfs2_glock *gl)
198{
199 if (atomic_dec_and_test(&gl->gl_ref))
200 GLOCK_BUG_ON(gl, 1);
201}
202
203/**
204 * gfs2_glock_put() - Decrement reference count on glock 191 * gfs2_glock_put() - Decrement reference count on glock
205 * @gl: The glock to put 192 * @gl: The glock to put
206 * 193 *
@@ -211,17 +198,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
211 struct gfs2_sbd *sdp = gl->gl_sbd; 198 struct gfs2_sbd *sdp = gl->gl_sbd;
212 struct address_space *mapping = gfs2_glock2aspace(gl); 199 struct address_space *mapping = gfs2_glock2aspace(gl);
213 200
214 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { 201 if (lockref_put_or_lock(&gl->gl_lockref))
215 __gfs2_glock_remove_from_lru(gl); 202 return;
216 spin_unlock(&lru_lock); 203
217 spin_lock_bucket(gl->gl_hash); 204 lockref_mark_dead(&gl->gl_lockref);
218 hlist_bl_del_rcu(&gl->gl_list); 205
219 spin_unlock_bucket(gl->gl_hash); 206 spin_lock(&lru_lock);
220 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 207 __gfs2_glock_remove_from_lru(gl);
221 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 208 spin_unlock(&lru_lock);
222 trace_gfs2_glock_put(gl); 209 spin_unlock(&gl->gl_lockref.lock);
223 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 210 spin_lock_bucket(gl->gl_hash);
224 } 211 hlist_bl_del_rcu(&gl->gl_list);
212 spin_unlock_bucket(gl->gl_hash);
213 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
214 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
215 trace_gfs2_glock_put(gl);
216 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
225} 217}
226 218
227/** 219/**
@@ -244,7 +236,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
244 continue; 236 continue;
245 if (gl->gl_sbd != sdp) 237 if (gl->gl_sbd != sdp)
246 continue; 238 continue;
247 if (atomic_inc_not_zero(&gl->gl_ref)) 239 if (lockref_get_not_dead(&gl->gl_lockref))
248 return gl; 240 return gl;
249 } 241 }
250 242
@@ -396,10 +388,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
396 held2 = (new_state != LM_ST_UNLOCKED); 388 held2 = (new_state != LM_ST_UNLOCKED);
397 389
398 if (held1 != held2) { 390 if (held1 != held2) {
391 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
399 if (held2) 392 if (held2)
400 gfs2_glock_hold(gl); 393 gl->gl_lockref.count++;
401 else 394 else
402 gfs2_glock_put_nolock(gl); 395 gl->gl_lockref.count--;
403 } 396 }
404 if (held1 && held2 && list_empty(&gl->gl_holders)) 397 if (held1 && held2 && list_empty(&gl->gl_holders))
405 clear_bit(GLF_QUEUED, &gl->gl_flags); 398 clear_bit(GLF_QUEUED, &gl->gl_flags);
@@ -626,9 +619,9 @@ out:
626out_sched: 619out_sched:
627 clear_bit(GLF_LOCK, &gl->gl_flags); 620 clear_bit(GLF_LOCK, &gl->gl_flags);
628 smp_mb__after_clear_bit(); 621 smp_mb__after_clear_bit();
629 gfs2_glock_hold(gl); 622 gl->gl_lockref.count++;
630 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 623 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
631 gfs2_glock_put_nolock(gl); 624 gl->gl_lockref.count--;
632 return; 625 return;
633 626
634out_unlock: 627out_unlock:
@@ -754,7 +747,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
754 gl->gl_sbd = sdp; 747 gl->gl_sbd = sdp;
755 gl->gl_flags = 0; 748 gl->gl_flags = 0;
756 gl->gl_name = name; 749 gl->gl_name = name;
757 atomic_set(&gl->gl_ref, 1); 750 gl->gl_lockref.count = 1;
758 gl->gl_state = LM_ST_UNLOCKED; 751 gl->gl_state = LM_ST_UNLOCKED;
759 gl->gl_target = LM_ST_UNLOCKED; 752 gl->gl_target = LM_ST_UNLOCKED;
760 gl->gl_demote_state = LM_ST_EXCLUSIVE; 753 gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1356,10 +1349,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1356 } 1349 }
1357 } 1350 }
1358 1351
1359 spin_unlock(&gl->gl_spin); 1352 gl->gl_lockref.count++;
1360 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1353 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1361 smp_wmb(); 1354 spin_unlock(&gl->gl_spin);
1362 gfs2_glock_hold(gl); 1355
1363 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1356 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1364 gfs2_glock_put(gl); 1357 gfs2_glock_put(gl);
1365} 1358}
@@ -1404,15 +1397,19 @@ __acquires(&lru_lock)
1404 while(!list_empty(list)) { 1397 while(!list_empty(list)) {
1405 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1398 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1406 list_del_init(&gl->gl_lru); 1399 list_del_init(&gl->gl_lru);
1400 if (!spin_trylock(&gl->gl_spin)) {
1401 list_add(&gl->gl_lru, &lru_list);
1402 atomic_inc(&lru_count);
1403 continue;
1404 }
1407 clear_bit(GLF_LRU, &gl->gl_flags); 1405 clear_bit(GLF_LRU, &gl->gl_flags);
1408 gfs2_glock_hold(gl);
1409 spin_unlock(&lru_lock); 1406 spin_unlock(&lru_lock);
1410 spin_lock(&gl->gl_spin); 1407 gl->gl_lockref.count++;
1411 if (demote_ok(gl)) 1408 if (demote_ok(gl))
1412 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1409 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1413 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1410 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1414 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1411 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1415 gfs2_glock_put_nolock(gl); 1412 gl->gl_lockref.count--;
1416 spin_unlock(&gl->gl_spin); 1413 spin_unlock(&gl->gl_spin);
1417 spin_lock(&lru_lock); 1414 spin_lock(&lru_lock);
1418 } 1415 }
@@ -1493,7 +1490,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1493 1490
1494 rcu_read_lock(); 1491 rcu_read_lock();
1495 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { 1492 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1496 if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref)) 1493 if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
1497 examiner(gl); 1494 examiner(gl);
1498 } 1495 }
1499 rcu_read_unlock(); 1496 rcu_read_unlock();
@@ -1746,7 +1743,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1746 state2str(gl->gl_demote_state), dtime, 1743 state2str(gl->gl_demote_state), dtime,
1747 atomic_read(&gl->gl_ail_count), 1744 atomic_read(&gl->gl_ail_count),
1748 atomic_read(&gl->gl_revokes), 1745 atomic_read(&gl->gl_revokes),
1749 atomic_read(&gl->gl_ref), gl->gl_hold_time); 1746 (int)gl->gl_lockref.count, gl->gl_hold_time);
1750 1747
1751 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1748 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1752 error = dump_holder(seq, gh); 1749 error = dump_holder(seq, gh);
@@ -1902,7 +1899,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1902 gi->nhash = 0; 1899 gi->nhash = 0;
1903 } 1900 }
1904 /* Skip entries for other sb and dead entries */ 1901 /* Skip entries for other sb and dead entries */
1905 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0); 1902 } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
1906 1903
1907 return 0; 1904 return 0;
1908} 1905}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 69f66e3d22bf..6647d77366ba 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -181,8 +181,6 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
181extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 181extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
182 const struct gfs2_glock_operations *glops, 182 const struct gfs2_glock_operations *glops,
183 int create, struct gfs2_glock **glp); 183 int create, struct gfs2_glock **glp);
184extern void gfs2_glock_hold(struct gfs2_glock *gl);
185extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
186extern void gfs2_glock_put(struct gfs2_glock *gl); 184extern void gfs2_glock_put(struct gfs2_glock *gl);
187extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 185extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
188 unsigned flags, struct gfs2_holder *gh); 186 unsigned flags, struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index e2e0a90396e7..db908f697139 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
525 525
526 if (gl->gl_demote_state == LM_ST_UNLOCKED && 526 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
527 gl->gl_state == LM_ST_SHARED && ip) { 527 gl->gl_state == LM_ST_SHARED && ip) {
528 gfs2_glock_hold(gl); 528 gl->gl_lockref.count++;
529 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 529 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
530 gfs2_glock_put_nolock(gl); 530 gl->gl_lockref.count--;
531 } 531 }
532} 532}
533 533
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 26aabd7caba7..ba1ea67f4eeb 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -21,6 +21,7 @@
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22#include <linux/ktime.h> 22#include <linux/ktime.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/lockref.h>
24 25
25#define DIO_WAIT 0x00000010 26#define DIO_WAIT 0x00000010
26#define DIO_METADATA 0x00000020 27#define DIO_METADATA 0x00000020
@@ -71,6 +72,7 @@ struct gfs2_bitmap {
71 u32 bi_offset; 72 u32 bi_offset;
72 u32 bi_start; 73 u32 bi_start;
73 u32 bi_len; 74 u32 bi_len;
75 u32 bi_blocks;
74}; 76};
75 77
76struct gfs2_rgrpd { 78struct gfs2_rgrpd {
@@ -101,19 +103,25 @@ struct gfs2_rgrpd {
101 103
102struct gfs2_rbm { 104struct gfs2_rbm {
103 struct gfs2_rgrpd *rgd; 105 struct gfs2_rgrpd *rgd;
104 struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
105 u32 offset; /* The offset is bitmap relative */ 106 u32 offset; /* The offset is bitmap relative */
107 int bii; /* Bitmap index */
106}; 108};
107 109
110static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
111{
112 return rbm->rgd->rd_bits + rbm->bii;
113}
114
108static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) 115static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
109{ 116{
110 return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset; 117 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
118 rbm->offset;
111} 119}
112 120
113static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1, 121static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
114 const struct gfs2_rbm *rbm2) 122 const struct gfs2_rbm *rbm2)
115{ 123{
116 return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) && 124 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
117 (rbm1->offset == rbm2->offset); 125 (rbm1->offset == rbm2->offset);
118} 126}
119 127
@@ -278,6 +286,20 @@ struct gfs2_blkreserv {
278 unsigned int rs_qa_qd_num; 286 unsigned int rs_qa_qd_num;
279}; 287};
280 288
289/*
290 * Allocation parameters
291 * @target: The number of blocks we'd ideally like to allocate
292 * @aflags: The flags (e.g. Orlov flag)
293 *
294 * The intent is to gradually expand this structure over time in
295 * order to give more information, e.g. alignment, min extent size
296 * to the allocation code.
297 */
298struct gfs2_alloc_parms {
299 u32 target;
300 u32 aflags;
301};
302
281enum { 303enum {
282 GLF_LOCK = 1, 304 GLF_LOCK = 1,
283 GLF_DEMOTE = 3, 305 GLF_DEMOTE = 3,
@@ -300,9 +322,9 @@ struct gfs2_glock {
300 struct gfs2_sbd *gl_sbd; 322 struct gfs2_sbd *gl_sbd;
301 unsigned long gl_flags; /* GLF_... */ 323 unsigned long gl_flags; /* GLF_... */
302 struct lm_lockname gl_name; 324 struct lm_lockname gl_name;
303 atomic_t gl_ref;
304 325
305 spinlock_t gl_spin; 326 struct lockref gl_lockref;
327#define gl_spin gl_lockref.lock
306 328
307 /* State fields protected by gl_spin */ 329 /* State fields protected by gl_spin */
308 unsigned int gl_state:2, /* Current state */ 330 unsigned int gl_state:2, /* Current state */
@@ -398,11 +420,10 @@ enum {
398 420
399struct gfs2_quota_data { 421struct gfs2_quota_data {
400 struct list_head qd_list; 422 struct list_head qd_list;
401 struct list_head qd_reclaim;
402
403 atomic_t qd_count;
404
405 struct kqid qd_id; 423 struct kqid qd_id;
424 struct lockref qd_lockref;
425 struct list_head qd_lru;
426
406 unsigned long qd_flags; /* QDF_... */ 427 unsigned long qd_flags; /* QDF_... */
407 428
408 s64 qd_change; 429 s64 qd_change;
@@ -516,7 +537,6 @@ struct gfs2_tune {
516 537
517 unsigned int gt_logd_secs; 538 unsigned int gt_logd_secs;
518 539
519 unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
520 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */ 540 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
521 unsigned int gt_quota_scale_num; /* Numerator */ 541 unsigned int gt_quota_scale_num; /* Numerator */
522 unsigned int gt_quota_scale_den; /* Denominator */ 542 unsigned int gt_quota_scale_den; /* Denominator */
@@ -694,6 +714,7 @@ struct gfs2_sbd {
694 struct list_head sd_quota_list; 714 struct list_head sd_quota_list;
695 atomic_t sd_quota_count; 715 atomic_t sd_quota_count;
696 struct mutex sd_quota_mutex; 716 struct mutex sd_quota_mutex;
717 struct mutex sd_quota_sync_mutex;
697 wait_queue_head_t sd_quota_wait; 718 wait_queue_head_t sd_quota_wait;
698 struct list_head sd_trunc_list; 719 struct list_head sd_trunc_list;
699 spinlock_t sd_trunc_lock; 720 spinlock_t sd_trunc_lock;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index ced3257f06e8..109ce9325b76 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -379,6 +379,7 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
379static int alloc_dinode(struct gfs2_inode *ip, u32 flags) 379static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
380{ 380{
381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
382 struct gfs2_alloc_parms ap = { .target = RES_DINODE, .aflags = flags, };
382 int error; 383 int error;
383 int dblocks = 1; 384 int dblocks = 1;
384 385
@@ -386,7 +387,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
386 if (error) 387 if (error)
387 goto out; 388 goto out;
388 389
389 error = gfs2_inplace_reserve(ip, RES_DINODE, flags); 390 error = gfs2_inplace_reserve(ip, &ap);
390 if (error) 391 if (error)
391 goto out_quota; 392 goto out_quota;
392 393
@@ -472,6 +473,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
472 struct gfs2_inode *ip, int arq) 473 struct gfs2_inode *ip, int arq)
473{ 474{
474 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 475 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
476 struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
475 int error; 477 int error;
476 478
477 if (arq) { 479 if (arq) {
@@ -479,7 +481,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
479 if (error) 481 if (error)
480 goto fail_quota_locks; 482 goto fail_quota_locks;
481 483
482 error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0); 484 error = gfs2_inplace_reserve(dip, &ap);
483 if (error) 485 if (error)
484 goto fail_quota_locks; 486 goto fail_quota_locks;
485 487
@@ -584,17 +586,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
584 if (!IS_ERR(inode)) { 586 if (!IS_ERR(inode)) {
585 d = d_splice_alias(inode, dentry); 587 d = d_splice_alias(inode, dentry);
586 error = 0; 588 error = 0;
587 if (file && !IS_ERR(d)) { 589 if (file) {
588 if (d == NULL) 590 if (S_ISREG(inode->i_mode)) {
589 d = dentry; 591 WARN_ON(d != NULL);
590 if (S_ISREG(inode->i_mode)) 592 error = finish_open(file, dentry, gfs2_open_common, opened);
591 error = finish_open(file, d, gfs2_open_common, opened); 593 } else {
592 else
593 error = finish_no_open(file, d); 594 error = finish_no_open(file, d);
595 }
596 } else {
597 dput(d);
594 } 598 }
595 gfs2_glock_dq_uninit(ghs); 599 gfs2_glock_dq_uninit(ghs);
596 if (IS_ERR(d))
597 return PTR_ERR(d);
598 return error; 600 return error;
599 } else if (error != -ENOENT) { 601 } else if (error != -ENOENT) {
600 goto fail_gunlock; 602 goto fail_gunlock;
@@ -713,7 +715,7 @@ fail_gunlock2:
713fail_free_inode: 715fail_free_inode:
714 if (ip->i_gl) 716 if (ip->i_gl)
715 gfs2_glock_put(ip->i_gl); 717 gfs2_glock_put(ip->i_gl);
716 gfs2_rs_delete(ip); 718 gfs2_rs_delete(ip, NULL);
717 free_inode_nonrcu(inode); 719 free_inode_nonrcu(inode);
718 inode = NULL; 720 inode = NULL;
719fail_gunlock: 721fail_gunlock:
@@ -781,8 +783,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
781 error = finish_open(file, dentry, gfs2_open_common, opened); 783 error = finish_open(file, dentry, gfs2_open_common, opened);
782 784
783 gfs2_glock_dq_uninit(&gh); 785 gfs2_glock_dq_uninit(&gh);
784 if (error) 786 if (error) {
787 dput(d);
785 return ERR_PTR(error); 788 return ERR_PTR(error);
789 }
786 return d; 790 return d;
787} 791}
788 792
@@ -874,11 +878,12 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
874 error = 0; 878 error = 0;
875 879
876 if (alloc_required) { 880 if (alloc_required) {
881 struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
877 error = gfs2_quota_lock_check(dip); 882 error = gfs2_quota_lock_check(dip);
878 if (error) 883 if (error)
879 goto out_gunlock; 884 goto out_gunlock;
880 885
881 error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0); 886 error = gfs2_inplace_reserve(dip, &ap);
882 if (error) 887 if (error)
883 goto out_gunlock_q; 888 goto out_gunlock_q;
884 889
@@ -1163,14 +1168,16 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
1163 d = __gfs2_lookup(dir, dentry, file, opened); 1168 d = __gfs2_lookup(dir, dentry, file, opened);
1164 if (IS_ERR(d)) 1169 if (IS_ERR(d))
1165 return PTR_ERR(d); 1170 return PTR_ERR(d);
1166 if (d == NULL) 1171 if (d != NULL)
1167 d = dentry; 1172 dentry = d;
1168 if (d->d_inode) { 1173 if (dentry->d_inode) {
1169 if (!(*opened & FILE_OPENED)) 1174 if (!(*opened & FILE_OPENED))
1170 return finish_no_open(file, d); 1175 return finish_no_open(file, dentry);
1176 dput(d);
1171 return 0; 1177 return 0;
1172 } 1178 }
1173 1179
1180 BUG_ON(d != NULL);
1174 if (!(flags & O_CREAT)) 1181 if (!(flags & O_CREAT))
1175 return -ENOENT; 1182 return -ENOENT;
1176 1183
@@ -1385,11 +1392,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
1385 goto out_gunlock; 1392 goto out_gunlock;
1386 1393
1387 if (alloc_required) { 1394 if (alloc_required) {
1395 struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
1388 error = gfs2_quota_lock_check(ndip); 1396 error = gfs2_quota_lock_check(ndip);
1389 if (error) 1397 if (error)
1390 goto out_gunlock; 1398 goto out_gunlock;
1391 1399
1392 error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0); 1400 error = gfs2_inplace_reserve(ndip, &ap);
1393 if (error) 1401 if (error)
1394 goto out_gunlock_q; 1402 goto out_gunlock_q;
1395 1403
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 351586e24e30..0650db2541ef 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -31,12 +31,6 @@
31 31
32struct workqueue_struct *gfs2_control_wq; 32struct workqueue_struct *gfs2_control_wq;
33 33
34static struct shrinker qd_shrinker = {
35 .count_objects = gfs2_qd_shrink_count,
36 .scan_objects = gfs2_qd_shrink_scan,
37 .seeks = DEFAULT_SEEKS,
38};
39
40static void gfs2_init_inode_once(void *foo) 34static void gfs2_init_inode_once(void *foo)
41{ 35{
42 struct gfs2_inode *ip = foo; 36 struct gfs2_inode *ip = foo;
@@ -87,6 +81,10 @@ static int __init init_gfs2_fs(void)
87 if (error) 81 if (error)
88 return error; 82 return error;
89 83
84 error = list_lru_init(&gfs2_qd_lru);
85 if (error)
86 goto fail_lru;
87
90 error = gfs2_glock_init(); 88 error = gfs2_glock_init();
91 if (error) 89 if (error)
92 goto fail; 90 goto fail;
@@ -139,7 +137,7 @@ static int __init init_gfs2_fs(void)
139 if (!gfs2_rsrv_cachep) 137 if (!gfs2_rsrv_cachep)
140 goto fail; 138 goto fail;
141 139
142 register_shrinker(&qd_shrinker); 140 register_shrinker(&gfs2_qd_shrinker);
143 141
144 error = register_filesystem(&gfs2_fs_type); 142 error = register_filesystem(&gfs2_fs_type);
145 if (error) 143 if (error)
@@ -179,7 +177,9 @@ fail_wq:
179fail_unregister: 177fail_unregister:
180 unregister_filesystem(&gfs2_fs_type); 178 unregister_filesystem(&gfs2_fs_type);
181fail: 179fail:
182 unregister_shrinker(&qd_shrinker); 180 list_lru_destroy(&gfs2_qd_lru);
181fail_lru:
182 unregister_shrinker(&gfs2_qd_shrinker);
183 gfs2_glock_exit(); 183 gfs2_glock_exit();
184 184
185 if (gfs2_rsrv_cachep) 185 if (gfs2_rsrv_cachep)
@@ -214,13 +214,14 @@ fail:
214 214
215static void __exit exit_gfs2_fs(void) 215static void __exit exit_gfs2_fs(void)
216{ 216{
217 unregister_shrinker(&qd_shrinker); 217 unregister_shrinker(&gfs2_qd_shrinker);
218 gfs2_glock_exit(); 218 gfs2_glock_exit();
219 gfs2_unregister_debugfs(); 219 gfs2_unregister_debugfs();
220 unregister_filesystem(&gfs2_fs_type); 220 unregister_filesystem(&gfs2_fs_type);
221 unregister_filesystem(&gfs2meta_fs_type); 221 unregister_filesystem(&gfs2meta_fs_type);
222 destroy_workqueue(gfs_recovery_wq); 222 destroy_workqueue(gfs_recovery_wq);
223 destroy_workqueue(gfs2_control_wq); 223 destroy_workqueue(gfs2_control_wq);
224 list_lru_destroy(&gfs2_qd_lru);
224 225
225 rcu_barrier(); 226 rcu_barrier();
226 227
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 19ff5e8c285c..82303b474958 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -51,7 +51,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
51{ 51{
52 spin_lock_init(&gt->gt_spin); 52 spin_lock_init(&gt->gt_spin);
53 53
54 gt->gt_quota_simul_sync = 64;
55 gt->gt_quota_warn_period = 10; 54 gt->gt_quota_warn_period = 10;
56 gt->gt_quota_scale_num = 1; 55 gt->gt_quota_scale_num = 1;
57 gt->gt_quota_scale_den = 1; 56 gt->gt_quota_scale_den = 1;
@@ -94,6 +93,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
94 93
95 INIT_LIST_HEAD(&sdp->sd_quota_list); 94 INIT_LIST_HEAD(&sdp->sd_quota_list);
96 mutex_init(&sdp->sd_quota_mutex); 95 mutex_init(&sdp->sd_quota_mutex);
96 mutex_init(&sdp->sd_quota_sync_mutex);
97 init_waitqueue_head(&sdp->sd_quota_wait); 97 init_waitqueue_head(&sdp->sd_quota_wait);
98 INIT_LIST_HEAD(&sdp->sd_trunc_list); 98 INIT_LIST_HEAD(&sdp->sd_trunc_list);
99 spin_lock_init(&sdp->sd_trunc_lock); 99 spin_lock_init(&sdp->sd_trunc_lock);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index db441359ee8c..453b50eaddec 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -50,6 +50,8 @@
50#include <linux/freezer.h> 50#include <linux/freezer.h>
51#include <linux/quota.h> 51#include <linux/quota.h>
52#include <linux/dqblk_xfs.h> 52#include <linux/dqblk_xfs.h>
53#include <linux/lockref.h>
54#include <linux/list_lru.h>
53 55
54#include "gfs2.h" 56#include "gfs2.h"
55#include "incore.h" 57#include "incore.h"
@@ -71,29 +73,25 @@ struct gfs2_quota_change_host {
71 struct kqid qc_id; 73 struct kqid qc_id;
72}; 74};
73 75
74static LIST_HEAD(qd_lru_list); 76/* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
75static atomic_t qd_lru_count = ATOMIC_INIT(0); 77static DEFINE_SPINLOCK(qd_lock);
76static DEFINE_SPINLOCK(qd_lru_lock); 78struct list_lru gfs2_qd_lru;
77 79
78unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, 80static void gfs2_qd_dispose(struct list_head *list)
79 struct shrink_control *sc)
80{ 81{
81 struct gfs2_quota_data *qd; 82 struct gfs2_quota_data *qd;
82 struct gfs2_sbd *sdp; 83 struct gfs2_sbd *sdp;
83 int nr_to_scan = sc->nr_to_scan;
84 long freed = 0;
85 84
86 if (!(sc->gfp_mask & __GFP_FS)) 85 while (!list_empty(list)) {
87 return SHRINK_STOP; 86 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
88
89 spin_lock(&qd_lru_lock);
90 while (nr_to_scan && !list_empty(&qd_lru_list)) {
91 qd = list_entry(qd_lru_list.next,
92 struct gfs2_quota_data, qd_reclaim);
93 sdp = qd->qd_gl->gl_sbd; 87 sdp = qd->qd_gl->gl_sbd;
94 88
89 list_del(&qd->qd_lru);
90
95 /* Free from the filesystem-specific list */ 91 /* Free from the filesystem-specific list */
92 spin_lock(&qd_lock);
96 list_del(&qd->qd_list); 93 list_del(&qd->qd_list);
94 spin_unlock(&qd_lock);
97 95
98 gfs2_assert_warn(sdp, !qd->qd_change); 96 gfs2_assert_warn(sdp, !qd->qd_change);
99 gfs2_assert_warn(sdp, !qd->qd_slot_count); 97 gfs2_assert_warn(sdp, !qd->qd_slot_count);
@@ -103,24 +101,59 @@ unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
103 atomic_dec(&sdp->sd_quota_count); 101 atomic_dec(&sdp->sd_quota_count);
104 102
105 /* Delete it from the common reclaim list */ 103 /* Delete it from the common reclaim list */
106 list_del_init(&qd->qd_reclaim);
107 atomic_dec(&qd_lru_count);
108 spin_unlock(&qd_lru_lock);
109 kmem_cache_free(gfs2_quotad_cachep, qd); 104 kmem_cache_free(gfs2_quotad_cachep, qd);
110 spin_lock(&qd_lru_lock);
111 nr_to_scan--;
112 freed++;
113 } 105 }
114 spin_unlock(&qd_lru_lock); 106}
107
108
109static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
110{
111 struct list_head *dispose = arg;
112 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
113
114 if (!spin_trylock(&qd->qd_lockref.lock))
115 return LRU_SKIP;
116
117 if (qd->qd_lockref.count == 0) {
118 lockref_mark_dead(&qd->qd_lockref);
119 list_move(&qd->qd_lru, dispose);
120 }
121
122 spin_unlock(&qd->qd_lockref.lock);
123 return LRU_REMOVED;
124}
125
126static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
127 struct shrink_control *sc)
128{
129 LIST_HEAD(dispose);
130 unsigned long freed;
131
132 if (!(sc->gfp_mask & __GFP_FS))
133 return SHRINK_STOP;
134
135 freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
136 &dispose, &sc->nr_to_scan);
137
138 gfs2_qd_dispose(&dispose);
139
115 return freed; 140 return freed;
116} 141}
117 142
118unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, 143static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
119 struct shrink_control *sc) 144 struct shrink_control *sc)
120{ 145{
121 return vfs_pressure_ratio(atomic_read(&qd_lru_count)); 146 return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
122} 147}
123 148
149struct shrinker gfs2_qd_shrinker = {
150 .count_objects = gfs2_qd_shrink_count,
151 .scan_objects = gfs2_qd_shrink_scan,
152 .seeks = DEFAULT_SEEKS,
153 .flags = SHRINKER_NUMA_AWARE,
154};
155
156
124static u64 qd2index(struct gfs2_quota_data *qd) 157static u64 qd2index(struct gfs2_quota_data *qd)
125{ 158{
126 struct kqid qid = qd->qd_id; 159 struct kqid qid = qd->qd_id;
@@ -148,10 +181,11 @@ static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
148 if (!qd) 181 if (!qd)
149 return -ENOMEM; 182 return -ENOMEM;
150 183
151 atomic_set(&qd->qd_count, 1); 184 qd->qd_lockref.count = 1;
185 spin_lock_init(&qd->qd_lockref.lock);
152 qd->qd_id = qid; 186 qd->qd_id = qid;
153 qd->qd_slot = -1; 187 qd->qd_slot = -1;
154 INIT_LIST_HEAD(&qd->qd_reclaim); 188 INIT_LIST_HEAD(&qd->qd_lru);
155 189
156 error = gfs2_glock_get(sdp, qd2index(qd), 190 error = gfs2_glock_get(sdp, qd2index(qd),
157 &gfs2_quota_glops, CREATE, &qd->qd_gl); 191 &gfs2_quota_glops, CREATE, &qd->qd_gl);
@@ -177,16 +211,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
177 211
178 for (;;) { 212 for (;;) {
179 found = 0; 213 found = 0;
180 spin_lock(&qd_lru_lock); 214 spin_lock(&qd_lock);
181 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 215 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
182 if (qid_eq(qd->qd_id, qid)) { 216 if (qid_eq(qd->qd_id, qid) &&
183 if (!atomic_read(&qd->qd_count) && 217 lockref_get_not_dead(&qd->qd_lockref)) {
184 !list_empty(&qd->qd_reclaim)) { 218 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
185 /* Remove it from reclaim list */
186 list_del_init(&qd->qd_reclaim);
187 atomic_dec(&qd_lru_count);
188 }
189 atomic_inc(&qd->qd_count);
190 found = 1; 219 found = 1;
191 break; 220 break;
192 } 221 }
@@ -202,7 +231,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
202 new_qd = NULL; 231 new_qd = NULL;
203 } 232 }
204 233
205 spin_unlock(&qd_lru_lock); 234 spin_unlock(&qd_lock);
206 235
207 if (qd) { 236 if (qd) {
208 if (new_qd) { 237 if (new_qd) {
@@ -222,18 +251,19 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
222static void qd_hold(struct gfs2_quota_data *qd) 251static void qd_hold(struct gfs2_quota_data *qd)
223{ 252{
224 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 253 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
225 gfs2_assert(sdp, atomic_read(&qd->qd_count)); 254 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
226 atomic_inc(&qd->qd_count); 255 lockref_get(&qd->qd_lockref);
227} 256}
228 257
229static void qd_put(struct gfs2_quota_data *qd) 258static void qd_put(struct gfs2_quota_data *qd)
230{ 259{
231 if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) { 260 if (lockref_put_or_lock(&qd->qd_lockref))
232 /* Add to the reclaim list */ 261 return;
233 list_add_tail(&qd->qd_reclaim, &qd_lru_list); 262
234 atomic_inc(&qd_lru_count); 263 qd->qd_lockref.count = 0;
235 spin_unlock(&qd_lru_lock); 264 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
236 } 265 spin_unlock(&qd->qd_lockref.lock);
266
237} 267}
238 268
239static int slot_get(struct gfs2_quota_data *qd) 269static int slot_get(struct gfs2_quota_data *qd)
@@ -242,10 +272,10 @@ static int slot_get(struct gfs2_quota_data *qd)
242 unsigned int c, o = 0, b; 272 unsigned int c, o = 0, b;
243 unsigned char byte = 0; 273 unsigned char byte = 0;
244 274
245 spin_lock(&qd_lru_lock); 275 spin_lock(&qd_lock);
246 276
247 if (qd->qd_slot_count++) { 277 if (qd->qd_slot_count++) {
248 spin_unlock(&qd_lru_lock); 278 spin_unlock(&qd_lock);
249 return 0; 279 return 0;
250 } 280 }
251 281
@@ -269,13 +299,13 @@ found:
269 299
270 sdp->sd_quota_bitmap[c][o] |= 1 << b; 300 sdp->sd_quota_bitmap[c][o] |= 1 << b;
271 301
272 spin_unlock(&qd_lru_lock); 302 spin_unlock(&qd_lock);
273 303
274 return 0; 304 return 0;
275 305
276fail: 306fail:
277 qd->qd_slot_count--; 307 qd->qd_slot_count--;
278 spin_unlock(&qd_lru_lock); 308 spin_unlock(&qd_lock);
279 return -ENOSPC; 309 return -ENOSPC;
280} 310}
281 311
@@ -283,23 +313,43 @@ static void slot_hold(struct gfs2_quota_data *qd)
283{ 313{
284 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 314 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
285 315
286 spin_lock(&qd_lru_lock); 316 spin_lock(&qd_lock);
287 gfs2_assert(sdp, qd->qd_slot_count); 317 gfs2_assert(sdp, qd->qd_slot_count);
288 qd->qd_slot_count++; 318 qd->qd_slot_count++;
289 spin_unlock(&qd_lru_lock); 319 spin_unlock(&qd_lock);
320}
321
322static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
323 unsigned int bit, int new_value)
324{
325 unsigned int c, o, b = bit;
326 int old_value;
327
328 c = b / (8 * PAGE_SIZE);
329 b %= 8 * PAGE_SIZE;
330 o = b / 8;
331 b %= 8;
332
333 old_value = (bitmap[c][o] & (1 << b));
334 gfs2_assert_withdraw(sdp, !old_value != !new_value);
335
336 if (new_value)
337 bitmap[c][o] |= 1 << b;
338 else
339 bitmap[c][o] &= ~(1 << b);
290} 340}
291 341
292static void slot_put(struct gfs2_quota_data *qd) 342static void slot_put(struct gfs2_quota_data *qd)
293{ 343{
294 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 344 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
295 345
296 spin_lock(&qd_lru_lock); 346 spin_lock(&qd_lock);
297 gfs2_assert(sdp, qd->qd_slot_count); 347 gfs2_assert(sdp, qd->qd_slot_count);
298 if (!--qd->qd_slot_count) { 348 if (!--qd->qd_slot_count) {
299 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); 349 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
300 qd->qd_slot = -1; 350 qd->qd_slot = -1;
301 } 351 }
302 spin_unlock(&qd_lru_lock); 352 spin_unlock(&qd_lock);
303} 353}
304 354
305static int bh_get(struct gfs2_quota_data *qd) 355static int bh_get(struct gfs2_quota_data *qd)
@@ -363,6 +413,25 @@ static void bh_put(struct gfs2_quota_data *qd)
363 mutex_unlock(&sdp->sd_quota_mutex); 413 mutex_unlock(&sdp->sd_quota_mutex);
364} 414}
365 415
416static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
417 u64 *sync_gen)
418{
419 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
420 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
421 (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
422 return 0;
423
424 if (!lockref_get_not_dead(&qd->qd_lockref))
425 return 0;
426
427 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
428 set_bit(QDF_LOCKED, &qd->qd_flags);
429 qd->qd_change_sync = qd->qd_change;
430 gfs2_assert_warn(sdp, qd->qd_slot_count);
431 qd->qd_slot_count++;
432 return 1;
433}
434
366static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) 435static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
367{ 436{
368 struct gfs2_quota_data *qd = NULL; 437 struct gfs2_quota_data *qd = NULL;
@@ -374,31 +443,18 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
374 if (sdp->sd_vfs->s_flags & MS_RDONLY) 443 if (sdp->sd_vfs->s_flags & MS_RDONLY)
375 return 0; 444 return 0;
376 445
377 spin_lock(&qd_lru_lock); 446 spin_lock(&qd_lock);
378 447
379 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { 448 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
380 if (test_bit(QDF_LOCKED, &qd->qd_flags) || 449 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
381 !test_bit(QDF_CHANGE, &qd->qd_flags) || 450 if (found)
382 qd->qd_sync_gen >= sdp->sd_quota_sync_gen) 451 break;
383 continue;
384
385 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
386
387 set_bit(QDF_LOCKED, &qd->qd_flags);
388 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
389 atomic_inc(&qd->qd_count);
390 qd->qd_change_sync = qd->qd_change;
391 gfs2_assert_warn(sdp, qd->qd_slot_count);
392 qd->qd_slot_count++;
393 found = 1;
394
395 break;
396 } 452 }
397 453
398 if (!found) 454 if (!found)
399 qd = NULL; 455 qd = NULL;
400 456
401 spin_unlock(&qd_lru_lock); 457 spin_unlock(&qd_lock);
402 458
403 if (qd) { 459 if (qd) {
404 gfs2_assert_warn(sdp, qd->qd_change_sync); 460 gfs2_assert_warn(sdp, qd->qd_change_sync);
@@ -416,43 +472,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
416 return 0; 472 return 0;
417} 473}
418 474
419static int qd_trylock(struct gfs2_quota_data *qd)
420{
421 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
422
423 if (sdp->sd_vfs->s_flags & MS_RDONLY)
424 return 0;
425
426 spin_lock(&qd_lru_lock);
427
428 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
429 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
430 spin_unlock(&qd_lru_lock);
431 return 0;
432 }
433
434 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
435
436 set_bit(QDF_LOCKED, &qd->qd_flags);
437 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
438 atomic_inc(&qd->qd_count);
439 qd->qd_change_sync = qd->qd_change;
440 gfs2_assert_warn(sdp, qd->qd_slot_count);
441 qd->qd_slot_count++;
442
443 spin_unlock(&qd_lru_lock);
444
445 gfs2_assert_warn(sdp, qd->qd_change_sync);
446 if (bh_get(qd)) {
447 clear_bit(QDF_LOCKED, &qd->qd_flags);
448 slot_put(qd);
449 qd_put(qd);
450 return 0;
451 }
452
453 return 1;
454}
455
456static void qd_unlock(struct gfs2_quota_data *qd) 475static void qd_unlock(struct gfs2_quota_data *qd)
457{ 476{
458 gfs2_assert_warn(qd->qd_gl->gl_sbd, 477 gfs2_assert_warn(qd->qd_gl->gl_sbd,
@@ -602,9 +621,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
602 x = be64_to_cpu(qc->qc_change) + change; 621 x = be64_to_cpu(qc->qc_change) + change;
603 qc->qc_change = cpu_to_be64(x); 622 qc->qc_change = cpu_to_be64(x);
604 623
605 spin_lock(&qd_lru_lock); 624 spin_lock(&qd_lock);
606 qd->qd_change = x; 625 qd->qd_change = x;
607 spin_unlock(&qd_lru_lock); 626 spin_unlock(&qd_lock);
608 627
609 if (!x) { 628 if (!x) {
610 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); 629 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
@@ -763,6 +782,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
763{ 782{
764 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; 783 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
765 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 784 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
785 struct gfs2_alloc_parms ap = { .aflags = 0, };
766 unsigned int data_blocks, ind_blocks; 786 unsigned int data_blocks, ind_blocks;
767 struct gfs2_holder *ghs, i_gh; 787 struct gfs2_holder *ghs, i_gh;
768 unsigned int qx, x; 788 unsigned int qx, x;
@@ -815,7 +835,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
815 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; 835 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
816 836
817 reserved = 1 + (nalloc * (data_blocks + ind_blocks)); 837 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
818 error = gfs2_inplace_reserve(ip, reserved, 0); 838 ap.target = reserved;
839 error = gfs2_inplace_reserve(ip, &ap);
819 if (error) 840 if (error)
820 goto out_alloc; 841 goto out_alloc;
821 842
@@ -974,9 +995,9 @@ static int need_sync(struct gfs2_quota_data *qd)
974 if (!qd->qd_qb.qb_limit) 995 if (!qd->qd_qb.qb_limit)
975 return 0; 996 return 0;
976 997
977 spin_lock(&qd_lru_lock); 998 spin_lock(&qd_lock);
978 value = qd->qd_change; 999 value = qd->qd_change;
979 spin_unlock(&qd_lru_lock); 1000 spin_unlock(&qd_lock);
980 1001
981 spin_lock(&gt->gt_spin); 1002 spin_lock(&gt->gt_spin);
982 num = gt->gt_quota_scale_num; 1003 num = gt->gt_quota_scale_num;
@@ -1001,9 +1022,11 @@ static int need_sync(struct gfs2_quota_data *qd)
1001 1022
1002void gfs2_quota_unlock(struct gfs2_inode *ip) 1023void gfs2_quota_unlock(struct gfs2_inode *ip)
1003{ 1024{
1025 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1004 struct gfs2_quota_data *qda[4]; 1026 struct gfs2_quota_data *qda[4];
1005 unsigned int count = 0; 1027 unsigned int count = 0;
1006 unsigned int x; 1028 unsigned int x;
1029 int found;
1007 1030
1008 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) 1031 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1009 goto out; 1032 goto out;
@@ -1016,9 +1039,25 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
1016 sync = need_sync(qd); 1039 sync = need_sync(qd);
1017 1040
1018 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); 1041 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
1042 if (!sync)
1043 continue;
1044
1045 spin_lock(&qd_lock);
1046 found = qd_check_sync(sdp, qd, NULL);
1047 spin_unlock(&qd_lock);
1048
1049 if (!found)
1050 continue;
1019 1051
1020 if (sync && qd_trylock(qd)) 1052 gfs2_assert_warn(sdp, qd->qd_change_sync);
1021 qda[count++] = qd; 1053 if (bh_get(qd)) {
1054 clear_bit(QDF_LOCKED, &qd->qd_flags);
1055 slot_put(qd);
1056 qd_put(qd);
1057 continue;
1058 }
1059
1060 qda[count++] = qd;
1022 } 1061 }
1023 1062
1024 if (count) { 1063 if (count) {
@@ -1067,9 +1106,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1067 continue; 1106 continue;
1068 1107
1069 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); 1108 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1070 spin_lock(&qd_lru_lock); 1109 spin_lock(&qd_lock);
1071 value += qd->qd_change; 1110 value += qd->qd_change;
1072 spin_unlock(&qd_lru_lock); 1111 spin_unlock(&qd_lock);
1073 1112
1074 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1113 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1075 print_message(qd, "exceeded"); 1114 print_message(qd, "exceeded");
@@ -1118,17 +1157,18 @@ int gfs2_quota_sync(struct super_block *sb, int type)
1118{ 1157{
1119 struct gfs2_sbd *sdp = sb->s_fs_info; 1158 struct gfs2_sbd *sdp = sb->s_fs_info;
1120 struct gfs2_quota_data **qda; 1159 struct gfs2_quota_data **qda;
1121 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1160 unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
1122 unsigned int num_qd; 1161 unsigned int num_qd;
1123 unsigned int x; 1162 unsigned int x;
1124 int error = 0; 1163 int error = 0;
1125 1164
1126 sdp->sd_quota_sync_gen++;
1127
1128 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); 1165 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1129 if (!qda) 1166 if (!qda)
1130 return -ENOMEM; 1167 return -ENOMEM;
1131 1168
1169 mutex_lock(&sdp->sd_quota_sync_mutex);
1170 sdp->sd_quota_sync_gen++;
1171
1132 do { 1172 do {
1133 num_qd = 0; 1173 num_qd = 0;
1134 1174
@@ -1153,6 +1193,7 @@ int gfs2_quota_sync(struct super_block *sb, int type)
1153 } 1193 }
1154 } while (!error && num_qd == max_qd); 1194 } while (!error && num_qd == max_qd);
1155 1195
1196 mutex_unlock(&sdp->sd_quota_sync_mutex);
1156 kfree(qda); 1197 kfree(qda);
1157 1198
1158 return error; 1199 return error;
@@ -1258,11 +1299,11 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1258 qd->qd_slot = slot; 1299 qd->qd_slot = slot;
1259 qd->qd_slot_count = 1; 1300 qd->qd_slot_count = 1;
1260 1301
1261 spin_lock(&qd_lru_lock); 1302 spin_lock(&qd_lock);
1262 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); 1303 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1263 list_add(&qd->qd_list, &sdp->sd_quota_list); 1304 list_add(&qd->qd_list, &sdp->sd_quota_list);
1264 atomic_inc(&sdp->sd_quota_count); 1305 atomic_inc(&sdp->sd_quota_count);
1265 spin_unlock(&qd_lru_lock); 1306 spin_unlock(&qd_lock);
1266 1307
1267 found++; 1308 found++;
1268 } 1309 }
@@ -1288,30 +1329,34 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1288 struct gfs2_quota_data *qd; 1329 struct gfs2_quota_data *qd;
1289 unsigned int x; 1330 unsigned int x;
1290 1331
1291 spin_lock(&qd_lru_lock); 1332 spin_lock(&qd_lock);
1292 while (!list_empty(head)) { 1333 while (!list_empty(head)) {
1293 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); 1334 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1294 1335
1295 if (atomic_read(&qd->qd_count) > 1 || 1336 /*
1296 (atomic_read(&qd->qd_count) && 1337 * To be removed in due course... we should be able to
1297 !test_bit(QDF_CHANGE, &qd->qd_flags))) { 1338 * ensure that all refs to the qd have done by this point
1339 * so that this rather odd test is not required
1340 */
1341 spin_lock(&qd->qd_lockref.lock);
1342 if (qd->qd_lockref.count > 1 ||
1343 (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1344 spin_unlock(&qd->qd_lockref.lock);
1298 list_move(&qd->qd_list, head); 1345 list_move(&qd->qd_list, head);
1299 spin_unlock(&qd_lru_lock); 1346 spin_unlock(&qd_lock);
1300 schedule(); 1347 schedule();
1301 spin_lock(&qd_lru_lock); 1348 spin_lock(&qd_lock);
1302 continue; 1349 continue;
1303 } 1350 }
1351 spin_unlock(&qd->qd_lockref.lock);
1304 1352
1305 list_del(&qd->qd_list); 1353 list_del(&qd->qd_list);
1306 /* Also remove if this qd exists in the reclaim list */ 1354 /* Also remove if this qd exists in the reclaim list */
1307 if (!list_empty(&qd->qd_reclaim)) { 1355 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1308 list_del_init(&qd->qd_reclaim);
1309 atomic_dec(&qd_lru_count);
1310 }
1311 atomic_dec(&sdp->sd_quota_count); 1356 atomic_dec(&sdp->sd_quota_count);
1312 spin_unlock(&qd_lru_lock); 1357 spin_unlock(&qd_lock);
1313 1358
1314 if (!atomic_read(&qd->qd_count)) { 1359 if (!qd->qd_lockref.count) {
1315 gfs2_assert_warn(sdp, !qd->qd_change); 1360 gfs2_assert_warn(sdp, !qd->qd_change);
1316 gfs2_assert_warn(sdp, !qd->qd_slot_count); 1361 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1317 } else 1362 } else
@@ -1321,9 +1366,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1321 gfs2_glock_put(qd->qd_gl); 1366 gfs2_glock_put(qd->qd_gl);
1322 kmem_cache_free(gfs2_quotad_cachep, qd); 1367 kmem_cache_free(gfs2_quotad_cachep, qd);
1323 1368
1324 spin_lock(&qd_lru_lock); 1369 spin_lock(&qd_lock);
1325 } 1370 }
1326 spin_unlock(&qd_lru_lock); 1371 spin_unlock(&qd_lock);
1327 1372
1328 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1373 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1329 1374
@@ -1462,7 +1507,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
1462 } 1507 }
1463 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ 1508 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1464 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ 1509 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1465 fqs->qs_incoredqs = atomic_read(&qd_lru_count); 1510 fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
1466 return 0; 1511 return 0;
1467} 1512}
1468 1513
@@ -1573,10 +1618,12 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1573 if (gfs2_is_stuffed(ip)) 1618 if (gfs2_is_stuffed(ip))
1574 alloc_required = 1; 1619 alloc_required = 1;
1575 if (alloc_required) { 1620 if (alloc_required) {
1621 struct gfs2_alloc_parms ap = { .aflags = 0, };
1576 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), 1622 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1577 &data_blocks, &ind_blocks); 1623 &data_blocks, &ind_blocks);
1578 blocks = 1 + data_blocks + ind_blocks; 1624 blocks = 1 + data_blocks + ind_blocks;
1579 error = gfs2_inplace_reserve(ip, blocks, 0); 1625 ap.target = blocks;
1626 error = gfs2_inplace_reserve(ip, &ap);
1580 if (error) 1627 if (error)
1581 goto out_i; 1628 goto out_i;
1582 blocks += gfs2_rg_blocks(ip, blocks); 1629 blocks += gfs2_rg_blocks(ip, blocks);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0f64d9deb1b0..96e4f34a03b0 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -10,9 +10,10 @@
10#ifndef __QUOTA_DOT_H__ 10#ifndef __QUOTA_DOT_H__
11#define __QUOTA_DOT_H__ 11#define __QUOTA_DOT_H__
12 12
13#include <linux/list_lru.h>
14
13struct gfs2_inode; 15struct gfs2_inode;
14struct gfs2_sbd; 16struct gfs2_sbd;
15struct shrink_control;
16 17
17#define NO_UID_QUOTA_CHANGE INVALID_UID 18#define NO_UID_QUOTA_CHANGE INVALID_UID
18#define NO_GID_QUOTA_CHANGE INVALID_GID 19#define NO_GID_QUOTA_CHANGE INVALID_GID
@@ -53,10 +54,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
53 return ret; 54 return ret;
54} 55}
55 56
56extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
57 struct shrink_control *sc);
58extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
59 struct shrink_control *sc);
60extern const struct quotactl_ops gfs2_quotactl_ops; 57extern const struct quotactl_ops gfs2_quotactl_ops;
58extern struct shrinker gfs2_qd_shrinker;
59extern struct list_lru gfs2_qd_lru;
61 60
62#endif /* __QUOTA_DOT_H__ */ 61#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 69317435faa7..4d83abdd5635 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -81,11 +81,12 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
81 unsigned char new_state) 81 unsigned char new_state)
82{ 82{
83 unsigned char *byte1, *byte2, *end, cur_state; 83 unsigned char *byte1, *byte2, *end, cur_state;
84 unsigned int buflen = rbm->bi->bi_len; 84 struct gfs2_bitmap *bi = rbm_bi(rbm);
85 unsigned int buflen = bi->bi_len;
85 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; 86 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
86 87
87 byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 88 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
88 end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen; 89 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
89 90
90 BUG_ON(byte1 >= end); 91 BUG_ON(byte1 >= end);
91 92
@@ -95,18 +96,17 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
95 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, " 96 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
96 "new_state=%d\n", rbm->offset, cur_state, new_state); 97 "new_state=%d\n", rbm->offset, cur_state, new_state);
97 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n", 98 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
98 (unsigned long long)rbm->rgd->rd_addr, 99 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
99 rbm->bi->bi_start);
100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n", 100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
101 rbm->bi->bi_offset, rbm->bi->bi_len); 101 bi->bi_offset, bi->bi_len);
102 dump_stack(); 102 dump_stack();
103 gfs2_consist_rgrpd(rbm->rgd); 103 gfs2_consist_rgrpd(rbm->rgd);
104 return; 104 return;
105 } 105 }
106 *byte1 ^= (cur_state ^ new_state) << bit; 106 *byte1 ^= (cur_state ^ new_state) << bit;
107 107
108 if (do_clone && rbm->bi->bi_clone) { 108 if (do_clone && bi->bi_clone) {
109 byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 109 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; 110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
111 *byte2 ^= (cur_state ^ new_state) << bit; 111 *byte2 ^= (cur_state ^ new_state) << bit;
112 } 112 }
@@ -121,7 +121,8 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
121 121
122static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm) 122static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
123{ 123{
124 const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset; 124 struct gfs2_bitmap *bi = rbm_bi(rbm);
125 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
125 const u8 *byte; 126 const u8 *byte;
126 unsigned int bit; 127 unsigned int bit;
127 128
@@ -252,29 +253,53 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
252static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) 253static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
253{ 254{
254 u64 rblock = block - rbm->rgd->rd_data0; 255 u64 rblock = block - rbm->rgd->rd_data0;
255 u32 x;
256 256
257 if (WARN_ON_ONCE(rblock > UINT_MAX)) 257 if (WARN_ON_ONCE(rblock > UINT_MAX))
258 return -EINVAL; 258 return -EINVAL;
259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) 259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
260 return -E2BIG; 260 return -E2BIG;
261 261
262 rbm->bi = rbm->rgd->rd_bits; 262 rbm->bii = 0;
263 rbm->offset = (u32)(rblock); 263 rbm->offset = (u32)(rblock);
264 /* Check if the block is within the first block */ 264 /* Check if the block is within the first block */
265 if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) 265 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
266 return 0; 266 return 0;
267 267
268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */ 268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
269 rbm->offset += (sizeof(struct gfs2_rgrp) - 269 rbm->offset += (sizeof(struct gfs2_rgrp) -
270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY; 270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
271 x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 271 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
272 rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 272 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
273 rbm->bi += x;
274 return 0; 273 return 0;
275} 274}
276 275
277/** 276/**
277 * gfs2_rbm_incr - increment an rbm structure
278 * @rbm: The rbm with rgd already set correctly
279 *
280 * This function takes an existing rbm structure and increments it to the next
281 * viable block offset.
282 *
283 * Returns: If incrementing the offset would cause the rbm to go past the
284 * end of the rgrp, true is returned, otherwise false.
285 *
286 */
287
288static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
289{
290 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
291 rbm->offset++;
292 return false;
293 }
294 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
295 return true;
296
297 rbm->offset = 0;
298 rbm->bii++;
299 return false;
300}
301
302/**
278 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned 303 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
279 * @rbm: Position to search (value/result) 304 * @rbm: Position to search (value/result)
280 * @n_unaligned: Number of unaligned blocks to check 305 * @n_unaligned: Number of unaligned blocks to check
@@ -285,7 +310,6 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
285 310
286static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) 311static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
287{ 312{
288 u64 block;
289 u32 n; 313 u32 n;
290 u8 res; 314 u8 res;
291 315
@@ -296,8 +320,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
296 (*len)--; 320 (*len)--;
297 if (*len == 0) 321 if (*len == 0)
298 return true; 322 return true;
299 block = gfs2_rbm_to_block(rbm); 323 if (gfs2_rbm_incr(rbm))
300 if (gfs2_rbm_from_block(rbm, block + 1))
301 return true; 324 return true;
302 } 325 }
303 326
@@ -328,6 +351,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
328 u32 chunk_size; 351 u32 chunk_size;
329 u8 *ptr, *start, *end; 352 u8 *ptr, *start, *end;
330 u64 block; 353 u64 block;
354 struct gfs2_bitmap *bi;
331 355
332 if (n_unaligned && 356 if (n_unaligned &&
333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) 357 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -336,11 +360,12 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
336 n_unaligned = len & 3; 360 n_unaligned = len & 3;
337 /* Start is now byte aligned */ 361 /* Start is now byte aligned */
338 while (len > 3) { 362 while (len > 3) {
339 start = rbm.bi->bi_bh->b_data; 363 bi = rbm_bi(&rbm);
340 if (rbm.bi->bi_clone) 364 start = bi->bi_bh->b_data;
341 start = rbm.bi->bi_clone; 365 if (bi->bi_clone)
342 end = start + rbm.bi->bi_bh->b_size; 366 start = bi->bi_clone;
343 start += rbm.bi->bi_offset; 367 end = start + bi->bi_bh->b_size;
368 start += bi->bi_offset;
344 BUG_ON(rbm.offset & 3); 369 BUG_ON(rbm.offset & 3);
345 start += (rbm.offset / GFS2_NBBY); 370 start += (rbm.offset / GFS2_NBBY);
346 bytes = min_t(u32, len / GFS2_NBBY, (end - start)); 371 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -605,11 +630,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
605 RB_CLEAR_NODE(&rs->rs_node); 630 RB_CLEAR_NODE(&rs->rs_node);
606 631
607 if (rs->rs_free) { 632 if (rs->rs_free) {
633 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
634
608 /* return reserved blocks to the rgrp */ 635 /* return reserved blocks to the rgrp */
609 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); 636 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
610 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; 637 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
611 rs->rs_free = 0; 638 rs->rs_free = 0;
612 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags); 639 clear_bit(GBF_FULL, &bi->bi_flags);
613 smp_mb__after_clear_bit(); 640 smp_mb__after_clear_bit();
614 } 641 }
615} 642}
@@ -634,14 +661,13 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
634/** 661/**
635 * gfs2_rs_delete - delete a multi-block reservation 662 * gfs2_rs_delete - delete a multi-block reservation
636 * @ip: The inode for this reservation 663 * @ip: The inode for this reservation
664 * @wcount: The inode's write count, or NULL
637 * 665 *
638 */ 666 */
639void gfs2_rs_delete(struct gfs2_inode *ip) 667void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
640{ 668{
641 struct inode *inode = &ip->i_inode;
642
643 down_write(&ip->i_rw_mutex); 669 down_write(&ip->i_rw_mutex);
644 if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) { 670 if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
645 gfs2_rs_deltree(ip->i_res); 671 gfs2_rs_deltree(ip->i_res);
646 BUG_ON(ip->i_res->rs_free); 672 BUG_ON(ip->i_res->rs_free);
647 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); 673 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -743,18 +769,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
743 bi->bi_offset = sizeof(struct gfs2_rgrp); 769 bi->bi_offset = sizeof(struct gfs2_rgrp);
744 bi->bi_start = 0; 770 bi->bi_start = 0;
745 bi->bi_len = bytes; 771 bi->bi_len = bytes;
772 bi->bi_blocks = bytes * GFS2_NBBY;
746 /* header block */ 773 /* header block */
747 } else if (x == 0) { 774 } else if (x == 0) {
748 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); 775 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
749 bi->bi_offset = sizeof(struct gfs2_rgrp); 776 bi->bi_offset = sizeof(struct gfs2_rgrp);
750 bi->bi_start = 0; 777 bi->bi_start = 0;
751 bi->bi_len = bytes; 778 bi->bi_len = bytes;
779 bi->bi_blocks = bytes * GFS2_NBBY;
752 /* last block */ 780 /* last block */
753 } else if (x + 1 == length) { 781 } else if (x + 1 == length) {
754 bytes = bytes_left; 782 bytes = bytes_left;
755 bi->bi_offset = sizeof(struct gfs2_meta_header); 783 bi->bi_offset = sizeof(struct gfs2_meta_header);
756 bi->bi_start = rgd->rd_bitbytes - bytes_left; 784 bi->bi_start = rgd->rd_bitbytes - bytes_left;
757 bi->bi_len = bytes; 785 bi->bi_len = bytes;
786 bi->bi_blocks = bytes * GFS2_NBBY;
758 /* other blocks */ 787 /* other blocks */
759 } else { 788 } else {
760 bytes = sdp->sd_sb.sb_bsize - 789 bytes = sdp->sd_sb.sb_bsize -
@@ -762,6 +791,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
762 bi->bi_offset = sizeof(struct gfs2_meta_header); 791 bi->bi_offset = sizeof(struct gfs2_meta_header);
763 bi->bi_start = rgd->rd_bitbytes - bytes_left; 792 bi->bi_start = rgd->rd_bitbytes - bytes_left;
764 bi->bi_len = bytes; 793 bi->bi_len = bytes;
794 bi->bi_blocks = bytes * GFS2_NBBY;
765 } 795 }
766 796
767 bytes_left -= bytes; 797 bytes_left -= bytes;
@@ -1392,12 +1422,12 @@ static void rs_insert(struct gfs2_inode *ip)
1392 * rg_mblk_search - find a group of multiple free blocks to form a reservation 1422 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1393 * @rgd: the resource group descriptor 1423 * @rgd: the resource group descriptor
1394 * @ip: pointer to the inode for which we're reserving blocks 1424 * @ip: pointer to the inode for which we're reserving blocks
1395 * @requested: number of blocks required for this allocation 1425 * @ap: the allocation parameters
1396 * 1426 *
1397 */ 1427 */
1398 1428
1399static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, 1429static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1400 unsigned requested) 1430 const struct gfs2_alloc_parms *ap)
1401{ 1431{
1402 struct gfs2_rbm rbm = { .rgd = rgd, }; 1432 struct gfs2_rbm rbm = { .rgd = rgd, };
1403 u64 goal; 1433 u64 goal;
@@ -1410,7 +1440,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1410 if (S_ISDIR(inode->i_mode)) 1440 if (S_ISDIR(inode->i_mode))
1411 extlen = 1; 1441 extlen = 1;
1412 else { 1442 else {
1413 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); 1443 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1414 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); 1444 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1415 } 1445 }
1416 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) 1446 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
@@ -1554,14 +1584,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1554 const struct gfs2_inode *ip, bool nowrap) 1584 const struct gfs2_inode *ip, bool nowrap)
1555{ 1585{
1556 struct buffer_head *bh; 1586 struct buffer_head *bh;
1557 struct gfs2_bitmap *initial_bi; 1587 int initial_bii;
1558 u32 initial_offset; 1588 u32 initial_offset;
1559 u32 offset; 1589 u32 offset;
1560 u8 *buffer; 1590 u8 *buffer;
1561 int index;
1562 int n = 0; 1591 int n = 0;
1563 int iters = rbm->rgd->rd_length; 1592 int iters = rbm->rgd->rd_length;
1564 int ret; 1593 int ret;
1594 struct gfs2_bitmap *bi;
1565 1595
1566 /* If we are not starting at the beginning of a bitmap, then we 1596 /* If we are not starting at the beginning of a bitmap, then we
1567 * need to add one to the bitmap count to ensure that we search 1597 * need to add one to the bitmap count to ensure that we search
@@ -1571,52 +1601,53 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1571 iters++; 1601 iters++;
1572 1602
1573 while(1) { 1603 while(1) {
1574 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) && 1604 bi = rbm_bi(rbm);
1605 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1575 (state == GFS2_BLKST_FREE)) 1606 (state == GFS2_BLKST_FREE))
1576 goto next_bitmap; 1607 goto next_bitmap;
1577 1608
1578 bh = rbm->bi->bi_bh; 1609 bh = bi->bi_bh;
1579 buffer = bh->b_data + rbm->bi->bi_offset; 1610 buffer = bh->b_data + bi->bi_offset;
1580 WARN_ON(!buffer_uptodate(bh)); 1611 WARN_ON(!buffer_uptodate(bh));
1581 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone) 1612 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1582 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset; 1613 buffer = bi->bi_clone + bi->bi_offset;
1583 initial_offset = rbm->offset; 1614 initial_offset = rbm->offset;
1584 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state); 1615 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1585 if (offset == BFITNOENT) 1616 if (offset == BFITNOENT)
1586 goto bitmap_full; 1617 goto bitmap_full;
1587 rbm->offset = offset; 1618 rbm->offset = offset;
1588 if (ip == NULL) 1619 if (ip == NULL)
1589 return 0; 1620 return 0;
1590 1621
1591 initial_bi = rbm->bi; 1622 initial_bii = rbm->bii;
1592 ret = gfs2_reservation_check_and_update(rbm, ip, minext); 1623 ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1593 if (ret == 0) 1624 if (ret == 0)
1594 return 0; 1625 return 0;
1595 if (ret > 0) { 1626 if (ret > 0) {
1596 n += (rbm->bi - initial_bi); 1627 n += (rbm->bii - initial_bii);
1597 goto next_iter; 1628 goto next_iter;
1598 } 1629 }
1599 if (ret == -E2BIG) { 1630 if (ret == -E2BIG) {
1600 index = 0; 1631 rbm->bii = 0;
1601 rbm->offset = 0; 1632 rbm->offset = 0;
1602 n += (rbm->bi - initial_bi); 1633 n += (rbm->bii - initial_bii);
1603 goto res_covered_end_of_rgrp; 1634 goto res_covered_end_of_rgrp;
1604 } 1635 }
1605 return ret; 1636 return ret;
1606 1637
1607bitmap_full: /* Mark bitmap as full and fall through */ 1638bitmap_full: /* Mark bitmap as full and fall through */
1608 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) 1639 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
1609 set_bit(GBF_FULL, &rbm->bi->bi_flags); 1640 struct gfs2_bitmap *bi = rbm_bi(rbm);
1641 set_bit(GBF_FULL, &bi->bi_flags);
1642 }
1610 1643
1611next_bitmap: /* Find next bitmap in the rgrp */ 1644next_bitmap: /* Find next bitmap in the rgrp */
1612 rbm->offset = 0; 1645 rbm->offset = 0;
1613 index = rbm->bi - rbm->rgd->rd_bits; 1646 rbm->bii++;
1614 index++; 1647 if (rbm->bii == rbm->rgd->rd_length)
1615 if (index == rbm->rgd->rd_length) 1648 rbm->bii = 0;
1616 index = 0;
1617res_covered_end_of_rgrp: 1649res_covered_end_of_rgrp:
1618 rbm->bi = &rbm->rgd->rd_bits[index]; 1650 if ((rbm->bii == 0) && nowrap)
1619 if ((index == 0) && nowrap)
1620 break; 1651 break;
1621 n++; 1652 n++;
1622next_iter: 1653next_iter:
@@ -1645,7 +1676,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
1645 struct gfs2_inode *ip; 1676 struct gfs2_inode *ip;
1646 int error; 1677 int error;
1647 int found = 0; 1678 int found = 0;
1648 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 }; 1679 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1649 1680
1650 while (1) { 1681 while (1) {
1651 down_write(&sdp->sd_log_flush_lock); 1682 down_write(&sdp->sd_log_flush_lock);
@@ -1800,12 +1831,12 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
1800/** 1831/**
1801 * gfs2_inplace_reserve - Reserve space in the filesystem 1832 * gfs2_inplace_reserve - Reserve space in the filesystem
1802 * @ip: the inode to reserve space for 1833 * @ip: the inode to reserve space for
1803 * @requested: the number of blocks to be reserved 1834 * @ap: the allocation parameters
1804 * 1835 *
1805 * Returns: errno 1836 * Returns: errno
1806 */ 1837 */
1807 1838
1808int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags) 1839int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
1809{ 1840{
1810 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1841 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1811 struct gfs2_rgrpd *begin = NULL; 1842 struct gfs2_rgrpd *begin = NULL;
@@ -1817,17 +1848,16 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
1817 1848
1818 if (sdp->sd_args.ar_rgrplvb) 1849 if (sdp->sd_args.ar_rgrplvb)
1819 flags |= GL_SKIP; 1850 flags |= GL_SKIP;
1820 if (gfs2_assert_warn(sdp, requested)) 1851 if (gfs2_assert_warn(sdp, ap->target))
1821 return -EINVAL; 1852 return -EINVAL;
1822 if (gfs2_rs_active(rs)) { 1853 if (gfs2_rs_active(rs)) {
1823 begin = rs->rs_rbm.rgd; 1854 begin = rs->rs_rbm.rgd;
1824 flags = 0; /* Yoda: Do or do not. There is no try */
1825 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) { 1855 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1826 rs->rs_rbm.rgd = begin = ip->i_rgd; 1856 rs->rs_rbm.rgd = begin = ip->i_rgd;
1827 } else { 1857 } else {
1828 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); 1858 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1829 } 1859 }
1830 if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV)) 1860 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
1831 skip = gfs2_orlov_skip(ip); 1861 skip = gfs2_orlov_skip(ip);
1832 if (rs->rs_rbm.rgd == NULL) 1862 if (rs->rs_rbm.rgd == NULL)
1833 return -EBADSLT; 1863 return -EBADSLT;
@@ -1869,14 +1899,14 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
1869 1899
1870 /* Get a reservation if we don't already have one */ 1900 /* Get a reservation if we don't already have one */
1871 if (!gfs2_rs_active(rs)) 1901 if (!gfs2_rs_active(rs))
1872 rg_mblk_search(rs->rs_rbm.rgd, ip, requested); 1902 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
1873 1903
1874 /* Skip rgrps when we can't get a reservation on first pass */ 1904 /* Skip rgrps when we can't get a reservation on first pass */
1875 if (!gfs2_rs_active(rs) && (loops < 1)) 1905 if (!gfs2_rs_active(rs) && (loops < 1))
1876 goto check_rgrp; 1906 goto check_rgrp;
1877 1907
1878 /* If rgrp has enough free space, use it */ 1908 /* If rgrp has enough free space, use it */
1879 if (rs->rs_rbm.rgd->rd_free_clone >= requested) { 1909 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
1880 ip->i_rgd = rs->rs_rbm.rgd; 1910 ip->i_rgd = rs->rs_rbm.rgd;
1881 return 0; 1911 return 0;
1882 } 1912 }
@@ -1973,14 +2003,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1973 2003
1974 *n = 1; 2004 *n = 1;
1975 block = gfs2_rbm_to_block(rbm); 2005 block = gfs2_rbm_to_block(rbm);
1976 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh); 2006 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
1977 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 2007 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1978 block++; 2008 block++;
1979 while (*n < elen) { 2009 while (*n < elen) {
1980 ret = gfs2_rbm_from_block(&pos, block); 2010 ret = gfs2_rbm_from_block(&pos, block);
1981 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) 2011 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1982 break; 2012 break;
1983 gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh); 2013 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
1984 gfs2_setbit(&pos, true, GFS2_BLKST_USED); 2014 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1985 (*n)++; 2015 (*n)++;
1986 block++; 2016 block++;
@@ -2001,6 +2031,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2001 u32 blen, unsigned char new_state) 2031 u32 blen, unsigned char new_state)
2002{ 2032{
2003 struct gfs2_rbm rbm; 2033 struct gfs2_rbm rbm;
2034 struct gfs2_bitmap *bi;
2004 2035
2005 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); 2036 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2006 if (!rbm.rgd) { 2037 if (!rbm.rgd) {
@@ -2011,15 +2042,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2011 2042
2012 while (blen--) { 2043 while (blen--) {
2013 gfs2_rbm_from_block(&rbm, bstart); 2044 gfs2_rbm_from_block(&rbm, bstart);
2045 bi = rbm_bi(&rbm);
2014 bstart++; 2046 bstart++;
2015 if (!rbm.bi->bi_clone) { 2047 if (!bi->bi_clone) {
2016 rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size, 2048 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2017 GFP_NOFS | __GFP_NOFAIL); 2049 GFP_NOFS | __GFP_NOFAIL);
2018 memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset, 2050 memcpy(bi->bi_clone + bi->bi_offset,
2019 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, 2051 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
2020 rbm.bi->bi_len);
2021 } 2052 }
2022 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh); 2053 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2023 gfs2_setbit(&rbm, false, new_state); 2054 gfs2_setbit(&rbm, false, new_state);
2024 } 2055 }
2025 2056
@@ -2103,6 +2134,35 @@ out:
2103} 2134}
2104 2135
2105/** 2136/**
2137 * gfs2_set_alloc_start - Set starting point for block allocation
2138 * @rbm: The rbm which will be set to the required location
2139 * @ip: The gfs2 inode
2140 * @dinode: Flag to say if allocation includes a new inode
2141 *
2142 * This sets the starting point from the reservation if one is active
2143 * otherwise it falls back to guessing a start point based on the
2144 * inode's goal block or the last allocation point in the rgrp.
2145 */
2146
2147static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2148 const struct gfs2_inode *ip, bool dinode)
2149{
2150 u64 goal;
2151
2152 if (gfs2_rs_active(ip->i_res)) {
2153 *rbm = ip->i_res->rs_rbm;
2154 return;
2155 }
2156
2157 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2158 goal = ip->i_goal;
2159 else
2160 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2161
2162 gfs2_rbm_from_block(rbm, goal);
2163}
2164
2165/**
2106 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode 2166 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2107 * @ip: the inode to allocate the block for 2167 * @ip: the inode to allocate the block for
2108 * @bn: Used to return the starting block number 2168 * @bn: Used to return the starting block number
@@ -2120,22 +2180,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2120 struct buffer_head *dibh; 2180 struct buffer_head *dibh;
2121 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, }; 2181 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2122 unsigned int ndata; 2182 unsigned int ndata;
2123 u64 goal;
2124 u64 block; /* block, within the file system scope */ 2183 u64 block; /* block, within the file system scope */
2125 int error; 2184 int error;
2126 2185
2127 if (gfs2_rs_active(ip->i_res)) 2186 gfs2_set_alloc_start(&rbm, ip, dinode);
2128 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
2129 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
2130 goal = ip->i_goal;
2131 else
2132 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
2133
2134 gfs2_rbm_from_block(&rbm, goal);
2135 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false); 2187 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
2136 2188
2137 if (error == -ENOSPC) { 2189 if (error == -ENOSPC) {
2138 gfs2_rbm_from_block(&rbm, goal); 2190 gfs2_set_alloc_start(&rbm, ip, dinode);
2139 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false); 2191 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
2140 } 2192 }
2141 2193
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 5b3f4a896e6c..3a10d2ffbbe7 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -40,7 +40,7 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
40extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); 40extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
41 41
42#define GFS2_AF_ORLOV 1 42#define GFS2_AF_ORLOV 1
43extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags); 43extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
44extern void gfs2_inplace_release(struct gfs2_inode *ip); 44extern void gfs2_inplace_release(struct gfs2_inode *ip);
45 45
46extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, 46extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
@@ -48,7 +48,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
48 48
49extern int gfs2_rs_alloc(struct gfs2_inode *ip); 49extern int gfs2_rs_alloc(struct gfs2_inode *ip);
50extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs); 50extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
51extern void gfs2_rs_delete(struct gfs2_inode *ip); 51extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
52extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta); 52extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
53extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); 53extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
54extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip); 54extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e5639dec66c4..35da5b19c0de 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1526,7 +1526,7 @@ out_unlock:
1526out: 1526out:
1527 /* Case 3 starts here */ 1527 /* Case 3 starts here */
1528 truncate_inode_pages(&inode->i_data, 0); 1528 truncate_inode_pages(&inode->i_data, 0);
1529 gfs2_rs_delete(ip); 1529 gfs2_rs_delete(ip, NULL);
1530 gfs2_ordered_del_inode(ip); 1530 gfs2_ordered_del_inode(ip);
1531 clear_inode(inode); 1531 clear_inode(inode);
1532 gfs2_dir_hash_inval(ip); 1532 gfs2_dir_hash_inval(ip);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index aa5c48044966..d09f6edda0ff 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -587,7 +587,6 @@ TUNE_ATTR(max_readahead, 0);
587TUNE_ATTR(complain_secs, 0); 587TUNE_ATTR(complain_secs, 0);
588TUNE_ATTR(statfs_slow, 0); 588TUNE_ATTR(statfs_slow, 0);
589TUNE_ATTR(new_files_jdata, 0); 589TUNE_ATTR(new_files_jdata, 0);
590TUNE_ATTR(quota_simul_sync, 1);
591TUNE_ATTR(statfs_quantum, 1); 590TUNE_ATTR(statfs_quantum, 1);
592TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 591TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
593 592
@@ -597,7 +596,6 @@ static struct attribute *tune_attrs[] = {
597 &tune_attr_max_readahead.attr, 596 &tune_attr_max_readahead.attr,
598 &tune_attr_complain_secs.attr, 597 &tune_attr_complain_secs.attr,
599 &tune_attr_statfs_slow.attr, 598 &tune_attr_statfs_slow.attr,
600 &tune_attr_quota_simul_sync.attr,
601 &tune_attr_statfs_quantum.attr, 599 &tune_attr_statfs_quantum.attr,
602 &tune_attr_quota_scale.attr, 600 &tune_attr_quota_scale.attr,
603 &tune_attr_new_files_jdata.attr, 601 &tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 6402fb69d71b..f7109f689e61 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -268,23 +268,3 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
268 return rv; 268 return rv;
269} 269}
270 270
271void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
272 unsigned int bit, int new_value)
273{
274 unsigned int c, o, b = bit;
275 int old_value;
276
277 c = b / (8 * PAGE_SIZE);
278 b %= 8 * PAGE_SIZE;
279 o = b / 8;
280 b %= 8;
281
282 old_value = (bitmap[c][o] & (1 << b));
283 gfs2_assert_withdraw(sdp, !old_value != !new_value);
284
285 if (new_value)
286 bitmap[c][o] |= 1 << b;
287 else
288 bitmap[c][o] &= ~(1 << b);
289}
290
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 80535739ac7b..b7ffb09b99ea 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -164,8 +164,6 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
164#define gfs2_tune_get(sdp, field) \ 164#define gfs2_tune_get(sdp, field) \
165gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field) 165gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
166 166
167void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
168 unsigned int bit, int new_value);
169int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...); 167int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
170 168
171#endif /* __UTIL_DOT_H__ */ 169#endif /* __UTIL_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index ecd37f30ab91..8c6a6f6bdba9 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -723,6 +723,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
723 unsigned int blks, 723 unsigned int blks,
724 ea_skeleton_call_t skeleton_call, void *private) 724 ea_skeleton_call_t skeleton_call, void *private)
725{ 725{
726 struct gfs2_alloc_parms ap = { .target = blks };
726 struct buffer_head *dibh; 727 struct buffer_head *dibh;
727 int error; 728 int error;
728 729
@@ -734,7 +735,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
734 if (error) 735 if (error)
735 return error; 736 return error;
736 737
737 error = gfs2_inplace_reserve(ip, blks, 0); 738 error = gfs2_inplace_reserve(ip, &ap);
738 if (error) 739 if (error)
739 goto out_gunlock_q; 740 goto out_gunlock_q;
740 741
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index f279ed9a9163..13dfd36a3294 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -36,4 +36,10 @@ extern int lockref_put_or_lock(struct lockref *);
36extern void lockref_mark_dead(struct lockref *); 36extern void lockref_mark_dead(struct lockref *);
37extern int lockref_get_not_dead(struct lockref *); 37extern int lockref_get_not_dead(struct lockref *);
38 38
39/* Must be called under spinlock for reliable results */
40static inline int __lockref_is_dead(const struct lockref *l)
41{
42 return ((int)l->count < 0);
43}
44
39#endif /* __LINUX_LOCKREF_H */ 45#endif /* __LINUX_LOCKREF_H */
diff --git a/lib/lockref.c b/lib/lockref.c
index 6f9d434c1521..af6e95d0bed6 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -153,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref)
153 assert_spin_locked(&lockref->lock); 153 assert_spin_locked(&lockref->lock);
154 lockref->count = -128; 154 lockref->count = -128;
155} 155}
156EXPORT_SYMBOL(lockref_mark_dead);
156 157
157/** 158/**
158 * lockref_get_not_dead - Increments count unless the ref is dead 159 * lockref_get_not_dead - Increments count unless the ref is dead