aboutsummaryrefslogtreecommitdiffstats
path: root/fs/quota
diff options
context:
space:
mode:
Diffstat (limited to 'fs/quota')
-rw-r--r--fs/quota/Kconfig10
-rw-r--r--fs/quota/dquot.c418
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/quota/quota_v1.c2
-rw-r--r--fs/quota/quota_v2.c170
-rw-r--r--fs/quota/quotaio_v2.h19
6 files changed, 431 insertions, 281 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 8047e01ef46b..efc02ebb8c70 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -17,7 +17,7 @@ config QUOTA
17 17
18config QUOTA_NETLINK_INTERFACE 18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface" 19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET 20 depends on QUOTACTL && NET
21 help 21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching 22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure, 23 hardlimit, etc.) will be reported through netlink interface. If unsure,
@@ -46,12 +46,14 @@ config QFMT_V1
46 format say Y here. 46 format say Y here.
47 47
48config QFMT_V2 48config QFMT_V2
49 tristate "Quota format v2 support" 49 tristate "Quota format vfsv0 and vfsv1 support"
50 depends on QUOTA 50 depends on QUOTA
51 select QUOTA_TREE 51 select QUOTA_TREE
52 help 52 help
53 This quota format allows using quotas with 32-bit UIDs/GIDs. If you 53 This config option enables kernel support for vfsv0 and vfsv1 quota
54 need this functionality say Y here. 54 formats. Both these formats support 32-bit UIDs/GIDs and vfsv1 format
55 also supports 64-bit inode and block quota limits. If you need this
56 functionality say Y here.
55 57
56config QUOTACTL 58config QUOTACTL
57 bool 59 bool
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 39b49c42a7ed..dea86abdf2e7 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -77,10 +77,6 @@
77#include <linux/capability.h> 77#include <linux/capability.h>
78#include <linux/quotaops.h> 78#include <linux/quotaops.h>
79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ 79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81#include <net/netlink.h>
82#include <net/genetlink.h>
83#endif
84 80
85#include <asm/uaccess.h> 81#include <asm/uaccess.h>
86 82
@@ -327,6 +323,30 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
327} 323}
328EXPORT_SYMBOL(dquot_mark_dquot_dirty); 324EXPORT_SYMBOL(dquot_mark_dquot_dirty);
329 325
326/* Dirtify all the dquots - this can block when journalling */
327static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
328{
329 int ret, err, cnt;
330
331 ret = err = 0;
332 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
333 if (dquot[cnt])
334 /* Even in case of error we have to continue */
335 ret = mark_dquot_dirty(dquot[cnt]);
336 if (!err)
337 err = ret;
338 }
339 return err;
340}
341
342static inline void dqput_all(struct dquot **dquot)
343{
344 unsigned int cnt;
345
346 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
347 dqput(dquot[cnt]);
348}
349
330/* This function needs dq_list_lock */ 350/* This function needs dq_list_lock */
331static inline int clear_dquot_dirty(struct dquot *dquot) 351static inline int clear_dquot_dirty(struct dquot *dquot)
332{ 352{
@@ -1071,73 +1091,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
1071} 1091}
1072#endif 1092#endif
1073 1093
1074#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1075
1076/* Netlink family structure for quota */
1077static struct genl_family quota_genl_family = {
1078 .id = GENL_ID_GENERATE,
1079 .hdrsize = 0,
1080 .name = "VFS_DQUOT",
1081 .version = 1,
1082 .maxattr = QUOTA_NL_A_MAX,
1083};
1084
1085/* Send warning to userspace about user which exceeded quota */
1086static void send_warning(const struct dquot *dquot, const char warntype)
1087{
1088 static atomic_t seq;
1089 struct sk_buff *skb;
1090 void *msg_head;
1091 int ret;
1092 int msg_size = 4 * nla_total_size(sizeof(u32)) +
1093 2 * nla_total_size(sizeof(u64));
1094
1095 /* We have to allocate using GFP_NOFS as we are called from a
1096 * filesystem performing write and thus further recursion into
1097 * the fs to free some data could cause deadlocks. */
1098 skb = genlmsg_new(msg_size, GFP_NOFS);
1099 if (!skb) {
1100 printk(KERN_ERR
1101 "VFS: Not enough memory to send quota warning.\n");
1102 return;
1103 }
1104 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
1105 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
1106 if (!msg_head) {
1107 printk(KERN_ERR
1108 "VFS: Cannot store netlink header in quota warning.\n");
1109 goto err_out;
1110 }
1111 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
1112 if (ret)
1113 goto attr_err_out;
1114 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
1115 if (ret)
1116 goto attr_err_out;
1117 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
1118 if (ret)
1119 goto attr_err_out;
1120 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
1121 MAJOR(dquot->dq_sb->s_dev));
1122 if (ret)
1123 goto attr_err_out;
1124 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
1125 MINOR(dquot->dq_sb->s_dev));
1126 if (ret)
1127 goto attr_err_out;
1128 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
1129 if (ret)
1130 goto attr_err_out;
1131 genlmsg_end(skb, msg_head);
1132
1133 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1134 return;
1135attr_err_out:
1136 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
1137err_out:
1138 kfree_skb(skb);
1139}
1140#endif
1141/* 1094/*
1142 * Write warnings to the console and send warning messages over netlink. 1095 * Write warnings to the console and send warning messages over netlink.
1143 * 1096 *
@@ -1145,18 +1098,20 @@ err_out:
1145 */ 1098 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype) 1099static void flush_warnings(struct dquot *const *dquots, char *warntype)
1147{ 1100{
1101 struct dquot *dq;
1148 int i; 1102 int i;
1149 1103
1150 for (i = 0; i < MAXQUOTAS; i++) 1104 for (i = 0; i < MAXQUOTAS; i++) {
1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && 1105 dq = dquots[i];
1152 !warning_issued(dquots[i], warntype[i])) { 1106 if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1107 !warning_issued(dq, warntype[i])) {
1153#ifdef CONFIG_PRINT_QUOTA_WARNING 1108#ifdef CONFIG_PRINT_QUOTA_WARNING
1154 print_warning(dquots[i], warntype[i]); 1109 print_warning(dq, warntype[i]);
1155#endif
1156#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1157 send_warning(dquots[i], warntype[i]);
1158#endif 1110#endif
1111 quota_send_warning(dq->dq_type, dq->dq_id,
1112 dq->dq_sb->s_dev, warntype[i]);
1159 } 1113 }
1114 }
1160} 1115}
1161 1116
1162static int ignore_hardlimit(struct dquot *dquot) 1117static int ignore_hardlimit(struct dquot *dquot)
@@ -1337,8 +1292,7 @@ int dquot_initialize(struct inode *inode, int type)
1337out_err: 1292out_err:
1338 up_write(&sb_dqopt(sb)->dqptr_sem); 1293 up_write(&sb_dqopt(sb)->dqptr_sem);
1339 /* Drop unused references */ 1294 /* Drop unused references */
1340 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1295 dqput_all(got);
1341 dqput(got[cnt]);
1342 return ret; 1296 return ret;
1343} 1297}
1344EXPORT_SYMBOL(dquot_initialize); 1298EXPORT_SYMBOL(dquot_initialize);
@@ -1357,9 +1311,7 @@ int dquot_drop(struct inode *inode)
1357 inode->i_dquot[cnt] = NULL; 1311 inode->i_dquot[cnt] = NULL;
1358 } 1312 }
1359 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1313 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1360 1314 dqput_all(put);
1361 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1362 dqput(put[cnt]);
1363 return 0; 1315 return 0;
1364} 1316}
1365EXPORT_SYMBOL(dquot_drop); 1317EXPORT_SYMBOL(dquot_drop);
@@ -1388,6 +1340,67 @@ void vfs_dq_drop(struct inode *inode)
1388EXPORT_SYMBOL(vfs_dq_drop); 1340EXPORT_SYMBOL(vfs_dq_drop);
1389 1341
1390/* 1342/*
1343 * inode_reserved_space is managed internally by quota, and protected by
1344 * i_lock similar to i_blocks+i_bytes.
1345 */
1346static qsize_t *inode_reserved_space(struct inode * inode)
1347{
1348 /* Filesystem must explicitly define it's own method in order to use
1349 * quota reservation interface */
1350 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1351 return inode->i_sb->dq_op->get_reserved_space(inode);
1352}
1353
1354static void inode_add_rsv_space(struct inode *inode, qsize_t number)
1355{
1356 spin_lock(&inode->i_lock);
1357 *inode_reserved_space(inode) += number;
1358 spin_unlock(&inode->i_lock);
1359}
1360
1361
1362static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1363{
1364 spin_lock(&inode->i_lock);
1365 *inode_reserved_space(inode) -= number;
1366 __inode_add_bytes(inode, number);
1367 spin_unlock(&inode->i_lock);
1368}
1369
1370static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1371{
1372 spin_lock(&inode->i_lock);
1373 *inode_reserved_space(inode) -= number;
1374 spin_unlock(&inode->i_lock);
1375}
1376
1377static qsize_t inode_get_rsv_space(struct inode *inode)
1378{
1379 qsize_t ret;
1380 spin_lock(&inode->i_lock);
1381 ret = *inode_reserved_space(inode);
1382 spin_unlock(&inode->i_lock);
1383 return ret;
1384}
1385
1386static void inode_incr_space(struct inode *inode, qsize_t number,
1387 int reserve)
1388{
1389 if (reserve)
1390 inode_add_rsv_space(inode, number);
1391 else
1392 inode_add_bytes(inode, number);
1393}
1394
1395static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1396{
1397 if (reserve)
1398 inode_sub_rsv_space(inode, number);
1399 else
1400 inode_sub_bytes(inode, number);
1401}
1402
1403/*
1391 * Following four functions update i_blocks+i_bytes fields and 1404 * Following four functions update i_blocks+i_bytes fields and
1392 * quota information (together with appropriate checks) 1405 * quota information (together with appropriate checks)
1393 * NOTE: We absolutely rely on the fact that caller dirties 1406 * NOTE: We absolutely rely on the fact that caller dirties
@@ -1405,6 +1418,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
1405 int cnt, ret = QUOTA_OK; 1418 int cnt, ret = QUOTA_OK;
1406 char warntype[MAXQUOTAS]; 1419 char warntype[MAXQUOTAS];
1407 1420
1421 /*
1422 * First test before acquiring mutex - solves deadlocks when we
1423 * re-enter the quota code and are already holding the mutex
1424 */
1425 if (IS_NOQUOTA(inode)) {
1426 inode_incr_space(inode, number, reserve);
1427 goto out;
1428 }
1429
1430 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1431 if (IS_NOQUOTA(inode)) {
1432 inode_incr_space(inode, number, reserve);
1433 goto out_unlock;
1434 }
1435
1408 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1436 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1409 warntype[cnt] = QUOTA_NL_NOWARN; 1437 warntype[cnt] = QUOTA_NL_NOWARN;
1410 1438
@@ -1415,7 +1443,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
1415 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) 1443 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
1416 == NO_QUOTA) { 1444 == NO_QUOTA) {
1417 ret = NO_QUOTA; 1445 ret = NO_QUOTA;
1418 goto out_unlock; 1446 spin_unlock(&dq_data_lock);
1447 goto out_flush_warn;
1419 } 1448 }
1420 } 1449 }
1421 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1450 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1426,64 +1455,29 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
1426 else 1455 else
1427 dquot_incr_space(inode->i_dquot[cnt], number); 1456 dquot_incr_space(inode->i_dquot[cnt], number);
1428 } 1457 }
1429 if (!reserve) 1458 inode_incr_space(inode, number, reserve);
1430 inode_add_bytes(inode, number);
1431out_unlock:
1432 spin_unlock(&dq_data_lock); 1459 spin_unlock(&dq_data_lock);
1460
1461 if (reserve)
1462 goto out_flush_warn;
1463 mark_all_dquot_dirty(inode->i_dquot);
1464out_flush_warn:
1433 flush_warnings(inode->i_dquot, warntype); 1465 flush_warnings(inode->i_dquot, warntype);
1466out_unlock:
1467 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1468out:
1434 return ret; 1469 return ret;
1435} 1470}
1436 1471
1437int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) 1472int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1438{ 1473{
1439 int cnt, ret = QUOTA_OK; 1474 return __dquot_alloc_space(inode, number, warn, 0);
1440
1441 /*
1442 * First test before acquiring mutex - solves deadlocks when we
1443 * re-enter the quota code and are already holding the mutex
1444 */
1445 if (IS_NOQUOTA(inode)) {
1446 inode_add_bytes(inode, number);
1447 goto out;
1448 }
1449
1450 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1451 if (IS_NOQUOTA(inode)) {
1452 inode_add_bytes(inode, number);
1453 goto out_unlock;
1454 }
1455
1456 ret = __dquot_alloc_space(inode, number, warn, 0);
1457 if (ret == NO_QUOTA)
1458 goto out_unlock;
1459
1460 /* Dirtify all the dquots - this can block when journalling */
1461 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1462 if (inode->i_dquot[cnt])
1463 mark_dquot_dirty(inode->i_dquot[cnt]);
1464out_unlock:
1465 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1466out:
1467 return ret;
1468} 1475}
1469EXPORT_SYMBOL(dquot_alloc_space); 1476EXPORT_SYMBOL(dquot_alloc_space);
1470 1477
1471int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) 1478int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1472{ 1479{
1473 int ret = QUOTA_OK; 1480 return __dquot_alloc_space(inode, number, warn, 1);
1474
1475 if (IS_NOQUOTA(inode))
1476 goto out;
1477
1478 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1479 if (IS_NOQUOTA(inode))
1480 goto out_unlock;
1481
1482 ret = __dquot_alloc_space(inode, number, warn, 1);
1483out_unlock:
1484 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1485out:
1486 return ret;
1487} 1481}
1488EXPORT_SYMBOL(dquot_reserve_space); 1482EXPORT_SYMBOL(dquot_reserve_space);
1489 1483
@@ -1524,10 +1518,7 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
1524warn_put_all: 1518warn_put_all:
1525 spin_unlock(&dq_data_lock); 1519 spin_unlock(&dq_data_lock);
1526 if (ret == QUOTA_OK) 1520 if (ret == QUOTA_OK)
1527 /* Dirtify all the dquots - this can block when journalling */ 1521 mark_all_dquot_dirty(inode->i_dquot);
1528 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1529 if (inode->i_dquot[cnt])
1530 mark_dquot_dirty(inode->i_dquot[cnt]);
1531 flush_warnings(inode->i_dquot, warntype); 1522 flush_warnings(inode->i_dquot, warntype);
1532 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1523 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1533 return ret; 1524 return ret;
@@ -1540,14 +1531,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
1540 int ret = QUOTA_OK; 1531 int ret = QUOTA_OK;
1541 1532
1542 if (IS_NOQUOTA(inode)) { 1533 if (IS_NOQUOTA(inode)) {
1543 inode_add_bytes(inode, number); 1534 inode_claim_rsv_space(inode, number);
1544 goto out; 1535 goto out;
1545 } 1536 }
1546 1537
1547 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1538 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1548 if (IS_NOQUOTA(inode)) { 1539 if (IS_NOQUOTA(inode)) {
1549 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1540 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1550 inode_add_bytes(inode, number); 1541 inode_claim_rsv_space(inode, number);
1551 goto out; 1542 goto out;
1552 } 1543 }
1553 1544
@@ -1559,12 +1550,9 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
1559 number); 1550 number);
1560 } 1551 }
1561 /* Update inode bytes */ 1552 /* Update inode bytes */
1562 inode_add_bytes(inode, number); 1553 inode_claim_rsv_space(inode, number);
1563 spin_unlock(&dq_data_lock); 1554 spin_unlock(&dq_data_lock);
1564 /* Dirtify all the dquots - this can block when journalling */ 1555 mark_all_dquot_dirty(inode->i_dquot);
1565 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1566 if (inode->i_dquot[cnt])
1567 mark_dquot_dirty(inode->i_dquot[cnt]);
1568 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1556 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1569out: 1557out:
1570 return ret; 1558 return ret;
@@ -1572,38 +1560,9 @@ out:
1572EXPORT_SYMBOL(dquot_claim_space); 1560EXPORT_SYMBOL(dquot_claim_space);
1573 1561
1574/* 1562/*
1575 * Release reserved quota space
1576 */
1577void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1578{
1579 int cnt;
1580
1581 if (IS_NOQUOTA(inode))
1582 goto out;
1583
1584 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1585 if (IS_NOQUOTA(inode))
1586 goto out_unlock;
1587
1588 spin_lock(&dq_data_lock);
1589 /* Release reserved dquots */
1590 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1591 if (inode->i_dquot[cnt])
1592 dquot_free_reserved_space(inode->i_dquot[cnt], number);
1593 }
1594 spin_unlock(&dq_data_lock);
1595
1596out_unlock:
1597 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1598out:
1599 return;
1600}
1601EXPORT_SYMBOL(dquot_release_reserved_space);
1602
1603/*
1604 * This operation can block, but only after everything is updated 1563 * This operation can block, but only after everything is updated
1605 */ 1564 */
1606int dquot_free_space(struct inode *inode, qsize_t number) 1565int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
1607{ 1566{
1608 unsigned int cnt; 1567 unsigned int cnt;
1609 char warntype[MAXQUOTAS]; 1568 char warntype[MAXQUOTAS];
@@ -1612,7 +1571,7 @@ int dquot_free_space(struct inode *inode, qsize_t number)
1612 * re-enter the quota code and are already holding the mutex */ 1571 * re-enter the quota code and are already holding the mutex */
1613 if (IS_NOQUOTA(inode)) { 1572 if (IS_NOQUOTA(inode)) {
1614out_sub: 1573out_sub:
1615 inode_sub_bytes(inode, number); 1574 inode_decr_space(inode, number, reserve);
1616 return QUOTA_OK; 1575 return QUOTA_OK;
1617 } 1576 }
1618 1577
@@ -1627,21 +1586,40 @@ out_sub:
1627 if (!inode->i_dquot[cnt]) 1586 if (!inode->i_dquot[cnt])
1628 continue; 1587 continue;
1629 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); 1588 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1630 dquot_decr_space(inode->i_dquot[cnt], number); 1589 if (reserve)
1590 dquot_free_reserved_space(inode->i_dquot[cnt], number);
1591 else
1592 dquot_decr_space(inode->i_dquot[cnt], number);
1631 } 1593 }
1632 inode_sub_bytes(inode, number); 1594 inode_decr_space(inode, number, reserve);
1633 spin_unlock(&dq_data_lock); 1595 spin_unlock(&dq_data_lock);
1634 /* Dirtify all the dquots - this can block when journalling */ 1596
1635 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1597 if (reserve)
1636 if (inode->i_dquot[cnt]) 1598 goto out_unlock;
1637 mark_dquot_dirty(inode->i_dquot[cnt]); 1599 mark_all_dquot_dirty(inode->i_dquot);
1600out_unlock:
1638 flush_warnings(inode->i_dquot, warntype); 1601 flush_warnings(inode->i_dquot, warntype);
1639 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1602 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1640 return QUOTA_OK; 1603 return QUOTA_OK;
1641} 1604}
1605
1606int dquot_free_space(struct inode *inode, qsize_t number)
1607{
1608 return __dquot_free_space(inode, number, 0);
1609}
1642EXPORT_SYMBOL(dquot_free_space); 1610EXPORT_SYMBOL(dquot_free_space);
1643 1611
1644/* 1612/*
1613 * Release reserved quota space
1614 */
1615void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1616{
1617 __dquot_free_space(inode, number, 1);
1618
1619}
1620EXPORT_SYMBOL(dquot_release_reserved_space);
1621
1622/*
1645 * This operation can block, but only after everything is updated 1623 * This operation can block, but only after everything is updated
1646 */ 1624 */
1647int dquot_free_inode(const struct inode *inode, qsize_t number) 1625int dquot_free_inode(const struct inode *inode, qsize_t number)
@@ -1668,10 +1646,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1668 dquot_decr_inodes(inode->i_dquot[cnt], number); 1646 dquot_decr_inodes(inode->i_dquot[cnt], number);
1669 } 1647 }
1670 spin_unlock(&dq_data_lock); 1648 spin_unlock(&dq_data_lock);
1671 /* Dirtify all the dquots - this can block when journalling */ 1649 mark_all_dquot_dirty(inode->i_dquot);
1672 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1673 if (inode->i_dquot[cnt])
1674 mark_dquot_dirty(inode->i_dquot[cnt]);
1675 flush_warnings(inode->i_dquot, warntype); 1650 flush_warnings(inode->i_dquot, warntype);
1676 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1651 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1677 return QUOTA_OK; 1652 return QUOTA_OK;
@@ -1679,19 +1654,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1679EXPORT_SYMBOL(dquot_free_inode); 1654EXPORT_SYMBOL(dquot_free_inode);
1680 1655
1681/* 1656/*
1682 * call back function, get reserved quota space from underlying fs
1683 */
1684qsize_t dquot_get_reserved_space(struct inode *inode)
1685{
1686 qsize_t reserved_space = 0;
1687
1688 if (sb_any_quota_active(inode->i_sb) &&
1689 inode->i_sb->dq_op->get_reserved_space)
1690 reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
1691 return reserved_space;
1692}
1693
1694/*
1695 * Transfer the number of inode and blocks from one diskquota to an other. 1657 * Transfer the number of inode and blocks from one diskquota to an other.
1696 * 1658 *
1697 * This operation can block, but only after everything is updated 1659 * This operation can block, but only after everything is updated
@@ -1734,7 +1696,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1734 } 1696 }
1735 spin_lock(&dq_data_lock); 1697 spin_lock(&dq_data_lock);
1736 cur_space = inode_get_bytes(inode); 1698 cur_space = inode_get_bytes(inode);
1737 rsv_space = dquot_get_reserved_space(inode); 1699 rsv_space = inode_get_rsv_space(inode);
1738 space = cur_space + rsv_space; 1700 space = cur_space + rsv_space;
1739 /* Build the transfer_from list and check the limits */ 1701 /* Build the transfer_from list and check the limits */
1740 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1702 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1778,25 +1740,18 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1778 spin_unlock(&dq_data_lock); 1740 spin_unlock(&dq_data_lock);
1779 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1741 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1780 1742
1781 /* Dirtify all the dquots - this can block when journalling */ 1743 mark_all_dquot_dirty(transfer_from);
1782 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1744 mark_all_dquot_dirty(transfer_to);
1783 if (transfer_from[cnt]) 1745 /* The reference we got is transferred to the inode */
1784 mark_dquot_dirty(transfer_from[cnt]); 1746 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1785 if (transfer_to[cnt]) { 1747 transfer_to[cnt] = NULL;
1786 mark_dquot_dirty(transfer_to[cnt]);
1787 /* The reference we got is transferred to the inode */
1788 transfer_to[cnt] = NULL;
1789 }
1790 }
1791warn_put_all: 1748warn_put_all:
1792 flush_warnings(transfer_to, warntype_to); 1749 flush_warnings(transfer_to, warntype_to);
1793 flush_warnings(transfer_from, warntype_from_inodes); 1750 flush_warnings(transfer_from, warntype_from_inodes);
1794 flush_warnings(transfer_from, warntype_from_space); 1751 flush_warnings(transfer_from, warntype_from_space);
1795put_all: 1752put_all:
1796 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1753 dqput_all(transfer_from);
1797 dqput(transfer_from[cnt]); 1754 dqput_all(transfer_to);
1798 dqput(transfer_to[cnt]);
1799 }
1800 return ret; 1755 return ret;
1801over_quota: 1756over_quota:
1802 spin_unlock(&dq_data_lock); 1757 spin_unlock(&dq_data_lock);
@@ -2233,7 +2188,9 @@ int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
2233 struct dentry *dentry; 2188 struct dentry *dentry;
2234 int error; 2189 int error;
2235 2190
2191 mutex_lock(&sb->s_root->d_inode->i_mutex);
2236 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); 2192 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2193 mutex_unlock(&sb->s_root->d_inode->i_mutex);
2237 if (IS_ERR(dentry)) 2194 if (IS_ERR(dentry))
2238 return PTR_ERR(dentry); 2195 return PTR_ERR(dentry);
2239 2196
@@ -2473,100 +2430,89 @@ const struct quotactl_ops vfs_quotactl_ops = {
2473 2430
2474static ctl_table fs_dqstats_table[] = { 2431static ctl_table fs_dqstats_table[] = {
2475 { 2432 {
2476 .ctl_name = FS_DQ_LOOKUPS,
2477 .procname = "lookups", 2433 .procname = "lookups",
2478 .data = &dqstats.lookups, 2434 .data = &dqstats.lookups,
2479 .maxlen = sizeof(int), 2435 .maxlen = sizeof(int),
2480 .mode = 0444, 2436 .mode = 0444,
2481 .proc_handler = &proc_dointvec, 2437 .proc_handler = proc_dointvec,
2482 }, 2438 },
2483 { 2439 {
2484 .ctl_name = FS_DQ_DROPS,
2485 .procname = "drops", 2440 .procname = "drops",
2486 .data = &dqstats.drops, 2441 .data = &dqstats.drops,
2487 .maxlen = sizeof(int), 2442 .maxlen = sizeof(int),
2488 .mode = 0444, 2443 .mode = 0444,
2489 .proc_handler = &proc_dointvec, 2444 .proc_handler = proc_dointvec,
2490 }, 2445 },
2491 { 2446 {
2492 .ctl_name = FS_DQ_READS,
2493 .procname = "reads", 2447 .procname = "reads",
2494 .data = &dqstats.reads, 2448 .data = &dqstats.reads,
2495 .maxlen = sizeof(int), 2449 .maxlen = sizeof(int),
2496 .mode = 0444, 2450 .mode = 0444,
2497 .proc_handler = &proc_dointvec, 2451 .proc_handler = proc_dointvec,
2498 }, 2452 },
2499 { 2453 {
2500 .ctl_name = FS_DQ_WRITES,
2501 .procname = "writes", 2454 .procname = "writes",
2502 .data = &dqstats.writes, 2455 .data = &dqstats.writes,
2503 .maxlen = sizeof(int), 2456 .maxlen = sizeof(int),
2504 .mode = 0444, 2457 .mode = 0444,
2505 .proc_handler = &proc_dointvec, 2458 .proc_handler = proc_dointvec,
2506 }, 2459 },
2507 { 2460 {
2508 .ctl_name = FS_DQ_CACHE_HITS,
2509 .procname = "cache_hits", 2461 .procname = "cache_hits",
2510 .data = &dqstats.cache_hits, 2462 .data = &dqstats.cache_hits,
2511 .maxlen = sizeof(int), 2463 .maxlen = sizeof(int),
2512 .mode = 0444, 2464 .mode = 0444,
2513 .proc_handler = &proc_dointvec, 2465 .proc_handler = proc_dointvec,
2514 }, 2466 },
2515 { 2467 {
2516 .ctl_name = FS_DQ_ALLOCATED,
2517 .procname = "allocated_dquots", 2468 .procname = "allocated_dquots",
2518 .data = &dqstats.allocated_dquots, 2469 .data = &dqstats.allocated_dquots,
2519 .maxlen = sizeof(int), 2470 .maxlen = sizeof(int),
2520 .mode = 0444, 2471 .mode = 0444,
2521 .proc_handler = &proc_dointvec, 2472 .proc_handler = proc_dointvec,
2522 }, 2473 },
2523 { 2474 {
2524 .ctl_name = FS_DQ_FREE,
2525 .procname = "free_dquots", 2475 .procname = "free_dquots",
2526 .data = &dqstats.free_dquots, 2476 .data = &dqstats.free_dquots,
2527 .maxlen = sizeof(int), 2477 .maxlen = sizeof(int),
2528 .mode = 0444, 2478 .mode = 0444,
2529 .proc_handler = &proc_dointvec, 2479 .proc_handler = proc_dointvec,
2530 }, 2480 },
2531 { 2481 {
2532 .ctl_name = FS_DQ_SYNCS,
2533 .procname = "syncs", 2482 .procname = "syncs",
2534 .data = &dqstats.syncs, 2483 .data = &dqstats.syncs,
2535 .maxlen = sizeof(int), 2484 .maxlen = sizeof(int),
2536 .mode = 0444, 2485 .mode = 0444,
2537 .proc_handler = &proc_dointvec, 2486 .proc_handler = proc_dointvec,
2538 }, 2487 },
2539#ifdef CONFIG_PRINT_QUOTA_WARNING 2488#ifdef CONFIG_PRINT_QUOTA_WARNING
2540 { 2489 {
2541 .ctl_name = FS_DQ_WARNINGS,
2542 .procname = "warnings", 2490 .procname = "warnings",
2543 .data = &flag_print_warnings, 2491 .data = &flag_print_warnings,
2544 .maxlen = sizeof(int), 2492 .maxlen = sizeof(int),
2545 .mode = 0644, 2493 .mode = 0644,
2546 .proc_handler = &proc_dointvec, 2494 .proc_handler = proc_dointvec,
2547 }, 2495 },
2548#endif 2496#endif
2549 { .ctl_name = 0 }, 2497 { },
2550}; 2498};
2551 2499
2552static ctl_table fs_table[] = { 2500static ctl_table fs_table[] = {
2553 { 2501 {
2554 .ctl_name = FS_DQSTATS,
2555 .procname = "quota", 2502 .procname = "quota",
2556 .mode = 0555, 2503 .mode = 0555,
2557 .child = fs_dqstats_table, 2504 .child = fs_dqstats_table,
2558 }, 2505 },
2559 { .ctl_name = 0 }, 2506 { },
2560}; 2507};
2561 2508
2562static ctl_table sys_table[] = { 2509static ctl_table sys_table[] = {
2563 { 2510 {
2564 .ctl_name = CTL_FS,
2565 .procname = "fs", 2511 .procname = "fs",
2566 .mode = 0555, 2512 .mode = 0555,
2567 .child = fs_table, 2513 .child = fs_table,
2568 }, 2514 },
2569 { .ctl_name = 0 }, 2515 { },
2570}; 2516};
2571 2517
2572static int __init dquot_init(void) 2518static int __init dquot_init(void)
@@ -2607,12 +2553,6 @@ static int __init dquot_init(void)
2607 2553
2608 register_shrinker(&dqcache_shrinker); 2554 register_shrinker(&dqcache_shrinker);
2609 2555
2610#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2611 if (genl_register_family(&quota_genl_family) != 0)
2612 printk(KERN_ERR
2613 "VFS: Failed to create quota netlink interface.\n");
2614#endif
2615
2616 return 0; 2556 return 0;
2617} 2557}
2618module_init(dquot_init); 2558module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95c5b42384b2..ee91e2756950 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/quotaops.h> 19#include <linux/quotaops.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <net/netlink.h>
22#include <net/genetlink.h>
21 23
22/* Check validity of generic quotactl commands */ 24/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, 25static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
525 return ret; 527 return ret;
526} 528}
527#endif 529#endif
530
531
532#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
533
534/* Netlink family structure for quota */
535static struct genl_family quota_genl_family = {
536 .id = GENL_ID_GENERATE,
537 .hdrsize = 0,
538 .name = "VFS_DQUOT",
539 .version = 1,
540 .maxattr = QUOTA_NL_A_MAX,
541};
542
543/**
544 * quota_send_warning - Send warning to userspace about exceeded quota
545 * @type: The quota type: USRQQUOTA, GRPQUOTA,...
546 * @id: The user or group id of the quota that was exceeded
547 * @dev: The device on which the fs is mounted (sb->s_dev)
548 * @warntype: The type of the warning: QUOTA_NL_...
549 *
550 * This can be used by filesystems (including those which don't use
551 * dquot) to send a message to userspace relating to quota limits.
552 *
553 */
554
555void quota_send_warning(short type, unsigned int id, dev_t dev,
556 const char warntype)
557{
558 static atomic_t seq;
559 struct sk_buff *skb;
560 void *msg_head;
561 int ret;
562 int msg_size = 4 * nla_total_size(sizeof(u32)) +
563 2 * nla_total_size(sizeof(u64));
564
565 /* We have to allocate using GFP_NOFS as we are called from a
566 * filesystem performing write and thus further recursion into
567 * the fs to free some data could cause deadlocks. */
568 skb = genlmsg_new(msg_size, GFP_NOFS);
569 if (!skb) {
570 printk(KERN_ERR
571 "VFS: Not enough memory to send quota warning.\n");
572 return;
573 }
574 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
575 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
576 if (!msg_head) {
577 printk(KERN_ERR
578 "VFS: Cannot store netlink header in quota warning.\n");
579 goto err_out;
580 }
581 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
582 if (ret)
583 goto attr_err_out;
584 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
585 if (ret)
586 goto attr_err_out;
587 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
588 if (ret)
589 goto attr_err_out;
590 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
591 if (ret)
592 goto attr_err_out;
593 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
594 if (ret)
595 goto attr_err_out;
596 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
597 if (ret)
598 goto attr_err_out;
599 genlmsg_end(skb, msg_head);
600
601 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
602 return;
603attr_err_out:
604 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
605err_out:
606 kfree_skb(skb);
607}
608EXPORT_SYMBOL(quota_send_warning);
609
610static int __init quota_init(void)
611{
612 if (genl_register_family(&quota_genl_family) != 0)
613 printk(KERN_ERR
614 "VFS: Failed to create quota netlink interface.\n");
615 return 0;
616};
617
618module_init(quota_init);
619#endif
620
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 0edcf42b1778..2ae757e9c008 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -204,7 +204,7 @@ out:
204 return ret; 204 return ret;
205} 205}
206 206
207static struct quota_format_ops v1_format_ops = { 207static const struct quota_format_ops v1_format_ops = {
208 .check_quota_file = v1_check_quota_file, 208 .check_quota_file = v1_check_quota_file,
209 .read_file_info = v1_read_file_info, 209 .read_file_info = v1_read_file_info,
210 .write_file_info = v1_write_file_info, 210 .write_file_info = v1_write_file_info,
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index a5475fb1ae44..e3da02f4986f 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -23,14 +23,23 @@ MODULE_LICENSE("GPL");
23 23
24#define __QUOTA_V2_PARANOIA 24#define __QUOTA_V2_PARANOIA
25 25
26static void v2_mem2diskdqb(void *dp, struct dquot *dquot); 26static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot);
27static void v2_disk2memdqb(struct dquot *dquot, void *dp); 27static void v2r0_disk2memdqb(struct dquot *dquot, void *dp);
28static int v2_is_id(void *dp, struct dquot *dquot); 28static int v2r0_is_id(void *dp, struct dquot *dquot);
29 29static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot);
30static struct qtree_fmt_operations v2_qtree_ops = { 30static void v2r1_disk2memdqb(struct dquot *dquot, void *dp);
31 .mem2disk_dqblk = v2_mem2diskdqb, 31static int v2r1_is_id(void *dp, struct dquot *dquot);
32 .disk2mem_dqblk = v2_disk2memdqb, 32
33 .is_id = v2_is_id, 33static struct qtree_fmt_operations v2r0_qtree_ops = {
34 .mem2disk_dqblk = v2r0_mem2diskdqb,
35 .disk2mem_dqblk = v2r0_disk2memdqb,
36 .is_id = v2r0_is_id,
37};
38
39static struct qtree_fmt_operations v2r1_qtree_ops = {
40 .mem2disk_dqblk = v2r1_mem2diskdqb,
41 .disk2mem_dqblk = v2r1_disk2memdqb,
42 .is_id = v2r1_is_id,
34}; 43};
35 44
36#define QUOTABLOCK_BITS 10 45#define QUOTABLOCK_BITS 10
@@ -46,23 +55,33 @@ static inline qsize_t v2_qbtos(qsize_t blocks)
46 return blocks << QUOTABLOCK_BITS; 55 return blocks << QUOTABLOCK_BITS;
47} 56}
48 57
58static int v2_read_header(struct super_block *sb, int type,
59 struct v2_disk_dqheader *dqhead)
60{
61 ssize_t size;
62
63 size = sb->s_op->quota_read(sb, type, (char *)dqhead,
64 sizeof(struct v2_disk_dqheader), 0);
65 if (size != sizeof(struct v2_disk_dqheader)) {
66 printk(KERN_WARNING "quota_v2: Failed header read:"
67 " expected=%zd got=%zd\n",
68 sizeof(struct v2_disk_dqheader), size);
69 return 0;
70 }
71 return 1;
72}
73
49/* Check whether given file is really vfsv0 quotafile */ 74/* Check whether given file is really vfsv0 quotafile */
50static int v2_check_quota_file(struct super_block *sb, int type) 75static int v2_check_quota_file(struct super_block *sb, int type)
51{ 76{
52 struct v2_disk_dqheader dqhead; 77 struct v2_disk_dqheader dqhead;
53 ssize_t size;
54 static const uint quota_magics[] = V2_INITQMAGICS; 78 static const uint quota_magics[] = V2_INITQMAGICS;
55 static const uint quota_versions[] = V2_INITQVERSIONS; 79 static const uint quota_versions[] = V2_INITQVERSIONS;
56 80
57 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, 81 if (!v2_read_header(sb, type, &dqhead))
58 sizeof(struct v2_disk_dqheader), 0);
59 if (size != sizeof(struct v2_disk_dqheader)) {
60 printk("quota_v2: failed read expected=%zd got=%zd\n",
61 sizeof(struct v2_disk_dqheader), size);
62 return 0; 82 return 0;
63 }
64 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || 83 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
65 le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) 84 le32_to_cpu(dqhead.dqh_version) > quota_versions[type])
66 return 0; 85 return 0;
67 return 1; 86 return 1;
68} 87}
@@ -71,14 +90,23 @@ static int v2_check_quota_file(struct super_block *sb, int type)
71static int v2_read_file_info(struct super_block *sb, int type) 90static int v2_read_file_info(struct super_block *sb, int type)
72{ 91{
73 struct v2_disk_dqinfo dinfo; 92 struct v2_disk_dqinfo dinfo;
93 struct v2_disk_dqheader dqhead;
74 struct mem_dqinfo *info = sb_dqinfo(sb, type); 94 struct mem_dqinfo *info = sb_dqinfo(sb, type);
75 struct qtree_mem_dqinfo *qinfo; 95 struct qtree_mem_dqinfo *qinfo;
76 ssize_t size; 96 ssize_t size;
97 unsigned int version;
98
99 if (!v2_read_header(sb, type, &dqhead))
100 return -1;
101 version = le32_to_cpu(dqhead.dqh_version);
102 if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) ||
103 (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1))
104 return -1;
77 105
78 size = sb->s_op->quota_read(sb, type, (char *)&dinfo, 106 size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
79 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); 107 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
80 if (size != sizeof(struct v2_disk_dqinfo)) { 108 if (size != sizeof(struct v2_disk_dqinfo)) {
81 printk(KERN_WARNING "Can't read info structure on device %s.\n", 109 printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
82 sb->s_id); 110 sb->s_id);
83 return -1; 111 return -1;
84 } 112 }
@@ -89,9 +117,15 @@ static int v2_read_file_info(struct super_block *sb, int type)
89 return -1; 117 return -1;
90 } 118 }
91 qinfo = info->dqi_priv; 119 qinfo = info->dqi_priv;
92 /* limits are stored as unsigned 32-bit data */ 120 if (version == 0) {
93 info->dqi_maxblimit = 0xffffffff; 121 /* limits are stored as unsigned 32-bit data */
94 info->dqi_maxilimit = 0xffffffff; 122 info->dqi_maxblimit = 0xffffffff;
123 info->dqi_maxilimit = 0xffffffff;
124 } else {
125 /* used space is stored as unsigned 64-bit value */
126 info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */
127 info->dqi_maxilimit = 0xffffffffffffffffULL;
128 }
95 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); 129 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
96 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); 130 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
97 info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); 131 info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
@@ -103,8 +137,13 @@ static int v2_read_file_info(struct super_block *sb, int type)
103 qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; 137 qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
104 qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; 138 qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
105 qinfo->dqi_qtree_depth = qtree_depth(qinfo); 139 qinfo->dqi_qtree_depth = qtree_depth(qinfo);
106 qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); 140 if (version == 0) {
107 qinfo->dqi_ops = &v2_qtree_ops; 141 qinfo->dqi_entry_size = sizeof(struct v2r0_disk_dqblk);
142 qinfo->dqi_ops = &v2r0_qtree_ops;
143 } else {
144 qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
145 qinfo->dqi_ops = &v2r1_qtree_ops;
146 }
108 return 0; 147 return 0;
109} 148}
110 149
@@ -135,9 +174,9 @@ static int v2_write_file_info(struct super_block *sb, int type)
135 return 0; 174 return 0;
136} 175}
137 176
138static void v2_disk2memdqb(struct dquot *dquot, void *dp) 177static void v2r0_disk2memdqb(struct dquot *dquot, void *dp)
139{ 178{
140 struct v2_disk_dqblk *d = dp, empty; 179 struct v2r0_disk_dqblk *d = dp, empty;
141 struct mem_dqblk *m = &dquot->dq_dqb; 180 struct mem_dqblk *m = &dquot->dq_dqb;
142 181
143 m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); 182 m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
@@ -149,15 +188,15 @@ static void v2_disk2memdqb(struct dquot *dquot, void *dp)
149 m->dqb_curspace = le64_to_cpu(d->dqb_curspace); 188 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
150 m->dqb_btime = le64_to_cpu(d->dqb_btime); 189 m->dqb_btime = le64_to_cpu(d->dqb_btime);
151 /* We need to escape back all-zero structure */ 190 /* We need to escape back all-zero structure */
152 memset(&empty, 0, sizeof(struct v2_disk_dqblk)); 191 memset(&empty, 0, sizeof(struct v2r0_disk_dqblk));
153 empty.dqb_itime = cpu_to_le64(1); 192 empty.dqb_itime = cpu_to_le64(1);
154 if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) 193 if (!memcmp(&empty, dp, sizeof(struct v2r0_disk_dqblk)))
155 m->dqb_itime = 0; 194 m->dqb_itime = 0;
156} 195}
157 196
158static void v2_mem2diskdqb(void *dp, struct dquot *dquot) 197static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot)
159{ 198{
160 struct v2_disk_dqblk *d = dp; 199 struct v2r0_disk_dqblk *d = dp;
161 struct mem_dqblk *m = &dquot->dq_dqb; 200 struct mem_dqblk *m = &dquot->dq_dqb;
162 struct qtree_mem_dqinfo *info = 201 struct qtree_mem_dqinfo *info =
163 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 202 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
@@ -175,9 +214,60 @@ static void v2_mem2diskdqb(void *dp, struct dquot *dquot)
175 d->dqb_itime = cpu_to_le64(1); 214 d->dqb_itime = cpu_to_le64(1);
176} 215}
177 216
178static int v2_is_id(void *dp, struct dquot *dquot) 217static int v2r0_is_id(void *dp, struct dquot *dquot)
179{ 218{
180 struct v2_disk_dqblk *d = dp; 219 struct v2r0_disk_dqblk *d = dp;
220 struct qtree_mem_dqinfo *info =
221 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
222
223 if (qtree_entry_unused(info, dp))
224 return 0;
225 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
226}
227
228static void v2r1_disk2memdqb(struct dquot *dquot, void *dp)
229{
230 struct v2r1_disk_dqblk *d = dp, empty;
231 struct mem_dqblk *m = &dquot->dq_dqb;
232
233 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
234 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
235 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
236 m->dqb_itime = le64_to_cpu(d->dqb_itime);
237 m->dqb_bhardlimit = v2_qbtos(le64_to_cpu(d->dqb_bhardlimit));
238 m->dqb_bsoftlimit = v2_qbtos(le64_to_cpu(d->dqb_bsoftlimit));
239 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
240 m->dqb_btime = le64_to_cpu(d->dqb_btime);
241 /* We need to escape back all-zero structure */
242 memset(&empty, 0, sizeof(struct v2r1_disk_dqblk));
243 empty.dqb_itime = cpu_to_le64(1);
244 if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk)))
245 m->dqb_itime = 0;
246}
247
248static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
249{
250 struct v2r1_disk_dqblk *d = dp;
251 struct mem_dqblk *m = &dquot->dq_dqb;
252 struct qtree_mem_dqinfo *info =
253 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
254
255 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
256 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
257 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
258 d->dqb_itime = cpu_to_le64(m->dqb_itime);
259 d->dqb_bhardlimit = cpu_to_le64(v2_stoqb(m->dqb_bhardlimit));
260 d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit));
261 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
262 d->dqb_btime = cpu_to_le64(m->dqb_btime);
263 d->dqb_id = cpu_to_le32(dquot->dq_id);
264 if (qtree_entry_unused(info, dp))
265 d->dqb_itime = cpu_to_le64(1);
266}
267
268static int v2r1_is_id(void *dp, struct dquot *dquot)
269{
270 struct v2r1_disk_dqblk *d = dp;
181 struct qtree_mem_dqinfo *info = 271 struct qtree_mem_dqinfo *info =
182 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 272 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
183 273
@@ -207,7 +297,7 @@ static int v2_free_file_info(struct super_block *sb, int type)
207 return 0; 297 return 0;
208} 298}
209 299
210static struct quota_format_ops v2_format_ops = { 300static const struct quota_format_ops v2_format_ops = {
211 .check_quota_file = v2_check_quota_file, 301 .check_quota_file = v2_check_quota_file,
212 .read_file_info = v2_read_file_info, 302 .read_file_info = v2_read_file_info,
213 .write_file_info = v2_write_file_info, 303 .write_file_info = v2_write_file_info,
@@ -217,20 +307,32 @@ static struct quota_format_ops v2_format_ops = {
217 .release_dqblk = v2_release_dquot, 307 .release_dqblk = v2_release_dquot,
218}; 308};
219 309
220static struct quota_format_type v2_quota_format = { 310static struct quota_format_type v2r0_quota_format = {
221 .qf_fmt_id = QFMT_VFS_V0, 311 .qf_fmt_id = QFMT_VFS_V0,
222 .qf_ops = &v2_format_ops, 312 .qf_ops = &v2_format_ops,
223 .qf_owner = THIS_MODULE 313 .qf_owner = THIS_MODULE
224}; 314};
225 315
316static struct quota_format_type v2r1_quota_format = {
317 .qf_fmt_id = QFMT_VFS_V1,
318 .qf_ops = &v2_format_ops,
319 .qf_owner = THIS_MODULE
320};
321
226static int __init init_v2_quota_format(void) 322static int __init init_v2_quota_format(void)
227{ 323{
228 return register_quota_format(&v2_quota_format); 324 int ret;
325
326 ret = register_quota_format(&v2r0_quota_format);
327 if (ret)
328 return ret;
329 return register_quota_format(&v2r1_quota_format);
229} 330}
230 331
231static void __exit exit_v2_quota_format(void) 332static void __exit exit_v2_quota_format(void)
232{ 333{
233 unregister_quota_format(&v2_quota_format); 334 unregister_quota_format(&v2r0_quota_format);
335 unregister_quota_format(&v2r1_quota_format);
234} 336}
235 337
236module_init(init_v2_quota_format); 338module_init(init_v2_quota_format);
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h
index 530fe580685c..f1966b42c2fd 100644
--- a/fs/quota/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
@@ -17,8 +17,8 @@
17} 17}
18 18
19#define V2_INITQVERSIONS {\ 19#define V2_INITQVERSIONS {\
20 0, /* USRQUOTA */\ 20 1, /* USRQUOTA */\
21 0 /* GRPQUOTA */\ 21 1 /* GRPQUOTA */\
22} 22}
23 23
24/* First generic header */ 24/* First generic header */
@@ -32,7 +32,7 @@ struct v2_disk_dqheader {
32 * (as it appears on disk) - the file is a radix tree whose leaves point 32 * (as it appears on disk) - the file is a radix tree whose leaves point
33 * to blocks of these structures. 33 * to blocks of these structures.
34 */ 34 */
35struct v2_disk_dqblk { 35struct v2r0_disk_dqblk {
36 __le32 dqb_id; /* id this quota applies to */ 36 __le32 dqb_id; /* id this quota applies to */
37 __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ 37 __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */
38 __le32 dqb_isoftlimit; /* preferred inode limit */ 38 __le32 dqb_isoftlimit; /* preferred inode limit */
@@ -44,6 +44,19 @@ struct v2_disk_dqblk {
44 __le64 dqb_itime; /* time limit for excessive inode use */ 44 __le64 dqb_itime; /* time limit for excessive inode use */
45}; 45};
46 46
47struct v2r1_disk_dqblk {
48 __le32 dqb_id; /* id this quota applies to */
49 __le32 dqb_pad;
50 __le64 dqb_ihardlimit; /* absolute limit on allocated inodes */
51 __le64 dqb_isoftlimit; /* preferred inode limit */
52 __le64 dqb_curinodes; /* current # allocated inodes */
53 __le64 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
54 __le64 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
55 __le64 dqb_curspace; /* current space occupied (in bytes) */
56 __le64 dqb_btime; /* time limit for excessive disk use */
57 __le64 dqb_itime; /* time limit for excessive inode use */
58};
59
47/* Header with type and version specific information */ 60/* Header with type and version specific information */
48struct v2_disk_dqinfo { 61struct v2_disk_dqinfo {
49 __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ 62 __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */