aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 18:15:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 18:15:33 -0400
commit2eee010d092903ee95716b6c2fbd9d3289839aa4 (patch)
tree77755f1b46dcf2e238b3cbd43f840ec56628e38d
parent513a4befae06c4469abfb836e8f71977de58c636 (diff)
parent18017479cabaeb5c659d789f04ecf7939f8ee28f (diff)
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "Lots of bug fixes and cleanups" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (40 commits) ext4: remove unused variable ext4: use journal inode to determine journal overhead ext4: create function to read journal inode ext4: unmap metadata when zeroing blocks ext4: remove plugging from ext4_file_write_iter() ext4: allow unlocked direct IO when pages are cached ext4: require encryption feature for EXT4_IOC_SET_ENCRYPTION_POLICY fscrypto: use standard macros to compute length of fname ciphertext ext4: do not unnecessarily null-terminate encrypted symlink data ext4: release bh in make_indexed_dir ext4: Allow parallel DIO reads ext4: allow DAX writeback for hole punch jbd2: fix lockdep annotation in add_transaction_credits() blockgroup_lock.h: simplify definition of NR_BG_LOCKS blockgroup_lock.h: remove debris from bgl_lock_ptr() conversion fscrypto: make filename crypto functions return 0 on success fscrypto: rename completion callbacks to reflect usage fscrypto: remove unnecessary includes fscrypto: improved validation when loading inode encryption metadata ext4: fix memory leak when symlink decryption fails ...
-rw-r--r--fs/crypto/crypto.c11
-rw-r--r--fs/crypto/fname.c85
-rw-r--r--fs/crypto/keyinfo.c71
-rw-r--r--fs/ext4/dir.c8
-rw-r--r--fs/ext4/ext4.h35
-rw-r--r--fs/ext4/extents.c27
-rw-r--r--fs/ext4/file.c10
-rw-r--r--fs/ext4/fsync.c9
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c71
-rw-r--r--fs/ext4/ioctl.c11
-rw-r--r--fs/ext4/move_extent.c7
-rw-r--r--fs/ext4/namei.c22
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/ext4/super.c126
-rw-r--r--fs/ext4/symlink.c10
-rw-r--r--fs/ext4/xattr.c340
-rw-r--r--fs/f2fs/dir.c6
-rw-r--r--fs/f2fs/namei.c6
-rw-r--r--fs/jbd2/journal.c131
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/mbcache.c6
-rw-r--r--include/linux/blockgroup_lock.h28
-rw-r--r--include/linux/fscrypto.h24
24 files changed, 533 insertions, 523 deletions
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index c502c116924c..61057b7dbddb 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -28,7 +28,6 @@
28#include <linux/dcache.h> 28#include <linux/dcache.h>
29#include <linux/namei.h> 29#include <linux/namei.h>
30#include <linux/fscrypto.h> 30#include <linux/fscrypto.h>
31#include <linux/ecryptfs.h>
32 31
33static unsigned int num_prealloc_crypto_pages = 32; 32static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128; 33static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -128,11 +127,11 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
128EXPORT_SYMBOL(fscrypt_get_ctx); 127EXPORT_SYMBOL(fscrypt_get_ctx);
129 128
130/** 129/**
131 * fscrypt_complete() - The completion callback for page encryption 130 * page_crypt_complete() - completion callback for page crypto
132 * @req: The asynchronous encryption request context 131 * @req: The asynchronous cipher request context
133 * @res: The result of the encryption operation 132 * @res: The result of the cipher operation
134 */ 133 */
135static void fscrypt_complete(struct crypto_async_request *req, int res) 134static void page_crypt_complete(struct crypto_async_request *req, int res)
136{ 135{
137 struct fscrypt_completion_result *ecr = req->data; 136 struct fscrypt_completion_result *ecr = req->data;
138 137
@@ -170,7 +169,7 @@ static int do_page_crypto(struct inode *inode,
170 169
171 skcipher_request_set_callback( 170 skcipher_request_set_callback(
172 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 171 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
173 fscrypt_complete, &ecr); 172 page_crypt_complete, &ecr);
174 173
175 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); 174 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
176 memcpy(xts_tweak, &index, sizeof(index)); 175 memcpy(xts_tweak, &index, sizeof(index));
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 5d6d49113efa..9a28133ac3b8 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -10,21 +10,16 @@
10 * This has not yet undergone a rigorous security audit. 10 * This has not yet undergone a rigorous security audit.
11 */ 11 */
12 12
13#include <keys/encrypted-type.h>
14#include <keys/user-type.h>
15#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
16#include <linux/ratelimit.h> 14#include <linux/ratelimit.h>
17#include <linux/fscrypto.h> 15#include <linux/fscrypto.h>
18 16
19static u32 size_round_up(size_t size, size_t blksize)
20{
21 return ((size + blksize - 1) / blksize) * blksize;
22}
23
24/** 17/**
25 * dir_crypt_complete() - 18 * fname_crypt_complete() - completion callback for filename crypto
19 * @req: The asynchronous cipher request context
20 * @res: The result of the cipher operation
26 */ 21 */
27static void dir_crypt_complete(struct crypto_async_request *req, int res) 22static void fname_crypt_complete(struct crypto_async_request *req, int res)
28{ 23{
29 struct fscrypt_completion_result *ecr = req->data; 24 struct fscrypt_completion_result *ecr = req->data;
30 25
@@ -35,11 +30,11 @@ static void dir_crypt_complete(struct crypto_async_request *req, int res)
35} 30}
36 31
37/** 32/**
38 * fname_encrypt() - 33 * fname_encrypt() - encrypt a filename
39 * 34 *
40 * This function encrypts the input filename, and returns the length of the 35 * The caller must have allocated sufficient memory for the @oname string.
41 * ciphertext. Errors are returned as negative numbers. We trust the caller to 36 *
42 * allocate sufficient memory to oname string. 37 * Return: 0 on success, -errno on failure
43 */ 38 */
44static int fname_encrypt(struct inode *inode, 39static int fname_encrypt(struct inode *inode,
45 const struct qstr *iname, struct fscrypt_str *oname) 40 const struct qstr *iname, struct fscrypt_str *oname)
@@ -60,10 +55,9 @@ static int fname_encrypt(struct inode *inode,
60 if (iname->len <= 0 || iname->len > lim) 55 if (iname->len <= 0 || iname->len > lim)
61 return -EIO; 56 return -EIO;
62 57
63 ciphertext_len = (iname->len < FS_CRYPTO_BLOCK_SIZE) ? 58 ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE);
64 FS_CRYPTO_BLOCK_SIZE : iname->len; 59 ciphertext_len = round_up(ciphertext_len, padding);
65 ciphertext_len = size_round_up(ciphertext_len, padding); 60 ciphertext_len = min(ciphertext_len, lim);
66 ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len;
67 61
68 if (ciphertext_len <= sizeof(buf)) { 62 if (ciphertext_len <= sizeof(buf)) {
69 workbuf = buf; 63 workbuf = buf;
@@ -84,7 +78,7 @@ static int fname_encrypt(struct inode *inode,
84 } 78 }
85 skcipher_request_set_callback(req, 79 skcipher_request_set_callback(req,
86 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 80 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
87 dir_crypt_complete, &ecr); 81 fname_crypt_complete, &ecr);
88 82
89 /* Copy the input */ 83 /* Copy the input */
90 memcpy(workbuf, iname->name, iname->len); 84 memcpy(workbuf, iname->name, iname->len);
@@ -105,20 +99,22 @@ static int fname_encrypt(struct inode *inode,
105 } 99 }
106 kfree(alloc_buf); 100 kfree(alloc_buf);
107 skcipher_request_free(req); 101 skcipher_request_free(req);
108 if (res < 0) 102 if (res < 0) {
109 printk_ratelimited(KERN_ERR 103 printk_ratelimited(KERN_ERR
110 "%s: Error (error code %d)\n", __func__, res); 104 "%s: Error (error code %d)\n", __func__, res);
105 return res;
106 }
111 107
112 oname->len = ciphertext_len; 108 oname->len = ciphertext_len;
113 return res; 109 return 0;
114} 110}
115 111
116/* 112/**
117 * fname_decrypt() 113 * fname_decrypt() - decrypt a filename
118 * This function decrypts the input filename, and returns 114 *
119 * the length of the plaintext. 115 * The caller must have allocated sufficient memory for the @oname string.
120 * Errors are returned as negative numbers. 116 *
121 * We trust the caller to allocate sufficient memory to oname string. 117 * Return: 0 on success, -errno on failure
122 */ 118 */
123static int fname_decrypt(struct inode *inode, 119static int fname_decrypt(struct inode *inode,
124 const struct fscrypt_str *iname, 120 const struct fscrypt_str *iname,
@@ -146,7 +142,7 @@ static int fname_decrypt(struct inode *inode,
146 } 142 }
147 skcipher_request_set_callback(req, 143 skcipher_request_set_callback(req,
148 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 144 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
149 dir_crypt_complete, &ecr); 145 fname_crypt_complete, &ecr);
150 146
151 /* Initialize IV */ 147 /* Initialize IV */
152 memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); 148 memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
@@ -168,7 +164,7 @@ static int fname_decrypt(struct inode *inode,
168 } 164 }
169 165
170 oname->len = strnlen(oname->name, iname->len); 166 oname->len = strnlen(oname->name, iname->len);
171 return oname->len; 167 return 0;
172} 168}
173 169
174static const char *lookup_table = 170static const char *lookup_table =
@@ -231,9 +227,8 @@ u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
231 227
232 if (ci) 228 if (ci)
233 padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); 229 padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
234 if (ilen < FS_CRYPTO_BLOCK_SIZE) 230 ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
235 ilen = FS_CRYPTO_BLOCK_SIZE; 231 return round_up(ilen, padding);
236 return size_round_up(ilen, padding);
237} 232}
238EXPORT_SYMBOL(fscrypt_fname_encrypted_size); 233EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
239 234
@@ -279,6 +274,10 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer);
279/** 274/**
280 * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user 275 * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
281 * space 276 * space
277 *
278 * The caller must have allocated sufficient memory for the @oname string.
279 *
280 * Return: 0 on success, -errno on failure
282 */ 281 */
283int fscrypt_fname_disk_to_usr(struct inode *inode, 282int fscrypt_fname_disk_to_usr(struct inode *inode,
284 u32 hash, u32 minor_hash, 283 u32 hash, u32 minor_hash,
@@ -287,13 +286,12 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
287{ 286{
288 const struct qstr qname = FSTR_TO_QSTR(iname); 287 const struct qstr qname = FSTR_TO_QSTR(iname);
289 char buf[24]; 288 char buf[24];
290 int ret;
291 289
292 if (fscrypt_is_dot_dotdot(&qname)) { 290 if (fscrypt_is_dot_dotdot(&qname)) {
293 oname->name[0] = '.'; 291 oname->name[0] = '.';
294 oname->name[iname->len - 1] = '.'; 292 oname->name[iname->len - 1] = '.';
295 oname->len = iname->len; 293 oname->len = iname->len;
296 return oname->len; 294 return 0;
297 } 295 }
298 296
299 if (iname->len < FS_CRYPTO_BLOCK_SIZE) 297 if (iname->len < FS_CRYPTO_BLOCK_SIZE)
@@ -303,9 +301,9 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
303 return fname_decrypt(inode, iname, oname); 301 return fname_decrypt(inode, iname, oname);
304 302
305 if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) { 303 if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
306 ret = digest_encode(iname->name, iname->len, oname->name); 304 oname->len = digest_encode(iname->name, iname->len,
307 oname->len = ret; 305 oname->name);
308 return ret; 306 return 0;
309 } 307 }
310 if (hash) { 308 if (hash) {
311 memcpy(buf, &hash, 4); 309 memcpy(buf, &hash, 4);
@@ -315,15 +313,18 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
315 } 313 }
316 memcpy(buf + 8, iname->name + iname->len - 16, 16); 314 memcpy(buf + 8, iname->name + iname->len - 16, 16);
317 oname->name[0] = '_'; 315 oname->name[0] = '_';
318 ret = digest_encode(buf, 24, oname->name + 1); 316 oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
319 oname->len = ret + 1; 317 return 0;
320 return ret + 1;
321} 318}
322EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); 319EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
323 320
324/** 321/**
325 * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk 322 * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
326 * space 323 * space
324 *
325 * The caller must have allocated sufficient memory for the @oname string.
326 *
327 * Return: 0 on success, -errno on failure
327 */ 328 */
328int fscrypt_fname_usr_to_disk(struct inode *inode, 329int fscrypt_fname_usr_to_disk(struct inode *inode,
329 const struct qstr *iname, 330 const struct qstr *iname,
@@ -333,7 +334,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
333 oname->name[0] = '.'; 334 oname->name[0] = '.';
334 oname->name[iname->len - 1] = '.'; 335 oname->name[iname->len - 1] = '.';
335 oname->len = iname->len; 336 oname->len = iname->len;
336 return oname->len; 337 return 0;
337 } 338 }
338 if (inode->i_crypt_info) 339 if (inode->i_crypt_info)
339 return fname_encrypt(inode, iname, oname); 340 return fname_encrypt(inode, iname, oname);
@@ -367,10 +368,10 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
367 if (dir->i_crypt_info) { 368 if (dir->i_crypt_info) {
368 ret = fscrypt_fname_alloc_buffer(dir, iname->len, 369 ret = fscrypt_fname_alloc_buffer(dir, iname->len,
369 &fname->crypto_buf); 370 &fname->crypto_buf);
370 if (ret < 0) 371 if (ret)
371 return ret; 372 return ret;
372 ret = fname_encrypt(dir, iname, &fname->crypto_buf); 373 ret = fname_encrypt(dir, iname, &fname->crypto_buf);
373 if (ret < 0) 374 if (ret)
374 goto errout; 375 goto errout;
375 fname->disk_name.name = fname->crypto_buf.name; 376 fname->disk_name.name = fname->crypto_buf.name;
376 fname->disk_name.len = fname->crypto_buf.len; 377 fname->disk_name.len = fname->crypto_buf.len;
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 1ac263eddc4e..82f0285f5d08 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -8,11 +8,8 @@
8 * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015. 8 * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
9 */ 9 */
10 10
11#include <keys/encrypted-type.h>
12#include <keys/user-type.h> 11#include <keys/user-type.h>
13#include <linux/random.h>
14#include <linux/scatterlist.h> 12#include <linux/scatterlist.h>
15#include <uapi/linux/keyctl.h>
16#include <linux/fscrypto.h> 13#include <linux/fscrypto.h>
17 14
18static void derive_crypt_complete(struct crypto_async_request *req, int rc) 15static void derive_crypt_complete(struct crypto_async_request *req, int rc)
@@ -139,6 +136,38 @@ out:
139 return res; 136 return res;
140} 137}
141 138
139static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
140 const char **cipher_str_ret, int *keysize_ret)
141{
142 if (S_ISREG(inode->i_mode)) {
143 if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
144 *cipher_str_ret = "xts(aes)";
145 *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
146 return 0;
147 }
148 pr_warn_once("fscrypto: unsupported contents encryption mode "
149 "%d for inode %lu\n",
150 ci->ci_data_mode, inode->i_ino);
151 return -ENOKEY;
152 }
153
154 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
155 if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
156 *cipher_str_ret = "cts(cbc(aes))";
157 *keysize_ret = FS_AES_256_CTS_KEY_SIZE;
158 return 0;
159 }
160 pr_warn_once("fscrypto: unsupported filenames encryption mode "
161 "%d for inode %lu\n",
162 ci->ci_filename_mode, inode->i_ino);
163 return -ENOKEY;
164 }
165
166 pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
167 (inode->i_mode & S_IFMT), inode->i_ino);
168 return -ENOKEY;
169}
170
142static void put_crypt_info(struct fscrypt_info *ci) 171static void put_crypt_info(struct fscrypt_info *ci)
143{ 172{
144 if (!ci) 173 if (!ci)
@@ -155,8 +184,8 @@ int get_crypt_info(struct inode *inode)
155 struct fscrypt_context ctx; 184 struct fscrypt_context ctx;
156 struct crypto_skcipher *ctfm; 185 struct crypto_skcipher *ctfm;
157 const char *cipher_str; 186 const char *cipher_str;
187 int keysize;
158 u8 raw_key[FS_MAX_KEY_SIZE]; 188 u8 raw_key[FS_MAX_KEY_SIZE];
159 u8 mode;
160 int res; 189 int res;
161 190
162 res = fscrypt_initialize(); 191 res = fscrypt_initialize();
@@ -179,13 +208,19 @@ retry:
179 if (res < 0) { 208 if (res < 0) {
180 if (!fscrypt_dummy_context_enabled(inode)) 209 if (!fscrypt_dummy_context_enabled(inode))
181 return res; 210 return res;
211 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
182 ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; 212 ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
183 ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; 213 ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
184 ctx.flags = 0; 214 ctx.flags = 0;
185 } else if (res != sizeof(ctx)) { 215 } else if (res != sizeof(ctx)) {
186 return -EINVAL; 216 return -EINVAL;
187 } 217 }
188 res = 0; 218
219 if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
220 return -EINVAL;
221
222 if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
223 return -EINVAL;
189 224
190 crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); 225 crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
191 if (!crypt_info) 226 if (!crypt_info)
@@ -198,27 +233,11 @@ retry:
198 crypt_info->ci_keyring_key = NULL; 233 crypt_info->ci_keyring_key = NULL;
199 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, 234 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
200 sizeof(crypt_info->ci_master_key)); 235 sizeof(crypt_info->ci_master_key));
201 if (S_ISREG(inode->i_mode)) 236
202 mode = crypt_info->ci_data_mode; 237 res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
203 else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 238 if (res)
204 mode = crypt_info->ci_filename_mode;
205 else
206 BUG();
207
208 switch (mode) {
209 case FS_ENCRYPTION_MODE_AES_256_XTS:
210 cipher_str = "xts(aes)";
211 break;
212 case FS_ENCRYPTION_MODE_AES_256_CTS:
213 cipher_str = "cts(cbc(aes))";
214 break;
215 default:
216 printk_once(KERN_WARNING
217 "%s: unsupported key mode %d (ino %u)\n",
218 __func__, mode, (unsigned) inode->i_ino);
219 res = -ENOKEY;
220 goto out; 239 goto out;
221 } 240
222 if (fscrypt_dummy_context_enabled(inode)) { 241 if (fscrypt_dummy_context_enabled(inode)) {
223 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); 242 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
224 goto got_key; 243 goto got_key;
@@ -253,7 +272,7 @@ got_key:
253 crypt_info->ci_ctfm = ctfm; 272 crypt_info->ci_ctfm = ctfm;
254 crypto_skcipher_clear_flags(ctfm, ~0); 273 crypto_skcipher_clear_flags(ctfm, ~0);
255 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); 274 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
256 res = crypto_skcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode)); 275 res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
257 if (res) 276 if (res)
258 goto out; 277 goto out;
259 278
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 67415e0e6af0..e8b365000d73 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -260,11 +260,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
260 /* Directory is encrypted */ 260 /* Directory is encrypted */
261 err = fscrypt_fname_disk_to_usr(inode, 261 err = fscrypt_fname_disk_to_usr(inode,
262 0, 0, &de_name, &fstr); 262 0, 0, &de_name, &fstr);
263 de_name = fstr;
263 fstr.len = save_len; 264 fstr.len = save_len;
264 if (err < 0) 265 if (err)
265 goto errout; 266 goto errout;
266 if (!dir_emit(ctx, 267 if (!dir_emit(ctx,
267 fstr.name, err, 268 de_name.name, de_name.len,
268 le32_to_cpu(de->inode), 269 le32_to_cpu(de->inode),
269 get_dtype(sb, de->file_type))) 270 get_dtype(sb, de->file_type)))
270 goto done; 271 goto done;
@@ -627,7 +628,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
627 int buf_size) 628 int buf_size)
628{ 629{
629 struct ext4_dir_entry_2 *de; 630 struct ext4_dir_entry_2 *de;
630 int nlen, rlen; 631 int rlen;
631 unsigned int offset = 0; 632 unsigned int offset = 0;
632 char *top; 633 char *top;
633 634
@@ -637,7 +638,6 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
637 if (ext4_check_dir_entry(dir, NULL, de, bh, 638 if (ext4_check_dir_entry(dir, NULL, de, bh,
638 buf, buf_size, offset)) 639 buf, buf_size, offset))
639 return -EFSCORRUPTED; 640 return -EFSCORRUPTED;
640 nlen = EXT4_DIR_REC_LEN(de->name_len);
641 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); 641 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
642 de = (struct ext4_dir_entry_2 *)((char *)de + rlen); 642 de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
643 offset += rlen; 643 offset += rlen;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ea31931386ec..282a51b07c57 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -262,6 +262,9 @@ struct ext4_io_submit {
262 (s)->s_first_ino) 262 (s)->s_first_ino)
263#endif 263#endif
264#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits))) 264#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
265#define EXT4_MAX_BLOCKS(size, offset, blkbits) \
266 ((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
267 blkbits))
265 268
266/* Translate a block number to a cluster number */ 269/* Translate a block number to a cluster number */
267#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits) 270#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
@@ -1117,9 +1120,15 @@ struct ext4_inode_info {
1117#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ 1120#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
1118#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */ 1121#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
1119#define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */ 1122#define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
1120#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ 1123#define EXT4_MOUNT_QUOTA 0x40000 /* Some quota option set */
1121#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ 1124#define EXT4_MOUNT_USRQUOTA 0x80000 /* "old" user quota,
1122#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ 1125 * enable enforcement for hidden
1126 * quota files */
1127#define EXT4_MOUNT_GRPQUOTA 0x100000 /* "old" group quota, enable
1128 * enforcement for hidden quota
1129 * files */
1130#define EXT4_MOUNT_PRJQUOTA 0x200000 /* Enable project quota
1131 * enforcement */
1123#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ 1132#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
1124#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ 1133#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
1125#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ 1134#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
@@ -1636,26 +1645,6 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1636 * Feature set definitions 1645 * Feature set definitions
1637 */ 1646 */
1638 1647
1639/* Use the ext4_{has,set,clear}_feature_* helpers; these will be removed */
1640#define EXT4_HAS_COMPAT_FEATURE(sb,mask) \
1641 ((EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) != 0)
1642#define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask) \
1643 ((EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) != 0)
1644#define EXT4_HAS_INCOMPAT_FEATURE(sb,mask) \
1645 ((EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) != 0)
1646#define EXT4_SET_COMPAT_FEATURE(sb,mask) \
1647 EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
1648#define EXT4_SET_RO_COMPAT_FEATURE(sb,mask) \
1649 EXT4_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
1650#define EXT4_SET_INCOMPAT_FEATURE(sb,mask) \
1651 EXT4_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
1652#define EXT4_CLEAR_COMPAT_FEATURE(sb,mask) \
1653 EXT4_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
1654#define EXT4_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
1655 EXT4_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
1656#define EXT4_CLEAR_INCOMPAT_FEATURE(sb,mask) \
1657 EXT4_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
1658
1659#define EXT4_FEATURE_COMPAT_DIR_PREALLOC 0x0001 1648#define EXT4_FEATURE_COMPAT_DIR_PREALLOC 0x0001
1660#define EXT4_FEATURE_COMPAT_IMAGIC_INODES 0x0002 1649#define EXT4_FEATURE_COMPAT_IMAGIC_INODES 0x0002
1661#define EXT4_FEATURE_COMPAT_HAS_JOURNAL 0x0004 1650#define EXT4_FEATURE_COMPAT_HAS_JOURNAL 0x0004
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index d7ccb7f51dfc..c930a0110fb4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4679,6 +4679,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4679 unsigned int credits; 4679 unsigned int credits;
4680 loff_t epos; 4680 loff_t epos;
4681 4681
4682 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4682 map.m_lblk = offset; 4683 map.m_lblk = offset;
4683 map.m_len = len; 4684 map.m_len = len;
4684 /* 4685 /*
@@ -4693,13 +4694,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4693 * credits to insert 1 extent into extent tree 4694 * credits to insert 1 extent into extent tree
4694 */ 4695 */
4695 credits = ext4_chunk_trans_blocks(inode, len); 4696 credits = ext4_chunk_trans_blocks(inode, len);
4696 /* 4697 depth = ext_depth(inode);
4697 * We can only call ext_depth() on extent based inodes
4698 */
4699 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4700 depth = ext_depth(inode);
4701 else
4702 depth = -1;
4703 4698
4704retry: 4699retry:
4705 while (ret >= 0 && len) { 4700 while (ret >= 0 && len) {
@@ -4966,13 +4961,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4966 4961
4967 trace_ext4_fallocate_enter(inode, offset, len, mode); 4962 trace_ext4_fallocate_enter(inode, offset, len, mode);
4968 lblk = offset >> blkbits; 4963 lblk = offset >> blkbits;
4969 /*
4970 * We can't just convert len to max_blocks because
4971 * If blocksize = 4096 offset = 3072 and len = 2048
4972 */
4973 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4974 - lblk;
4975 4964
4965 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4976 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4966 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4977 if (mode & FALLOC_FL_KEEP_SIZE) 4967 if (mode & FALLOC_FL_KEEP_SIZE)
4978 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4968 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
@@ -5035,12 +5025,8 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
5035 unsigned int credits, blkbits = inode->i_blkbits; 5025 unsigned int credits, blkbits = inode->i_blkbits;
5036 5026
5037 map.m_lblk = offset >> blkbits; 5027 map.m_lblk = offset >> blkbits;
5038 /* 5028 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
5039 * We can't just convert len to max_blocks because 5029
5040 * If blocksize = 4096 offset = 3072 and len = 2048
5041 */
5042 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
5043 map.m_lblk);
5044 /* 5030 /*
5045 * This is somewhat ugly but the idea is clear: When transaction is 5031 * This is somewhat ugly but the idea is clear: When transaction is
5046 * reserved, everything goes into it. Otherwise we rather start several 5032 * reserved, everything goes into it. Otherwise we rather start several
@@ -5734,6 +5720,9 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5734 up_write(&EXT4_I(inode)->i_data_sem); 5720 up_write(&EXT4_I(inode)->i_data_sem);
5735 goto out_stop; 5721 goto out_stop;
5736 } 5722 }
5723 } else {
5724 ext4_ext_drop_refs(path);
5725 kfree(path);
5737 } 5726 }
5738 5727
5739 ret = ext4_es_remove_extent(inode, offset_lblk, 5728 ret = ext4_es_remove_extent(inode, offset_lblk,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 261ac3734c58..25f763f2201a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -91,9 +91,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
91static ssize_t 91static ssize_t
92ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 92ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93{ 93{
94 struct file *file = iocb->ki_filp;
95 struct inode *inode = file_inode(iocb->ki_filp); 94 struct inode *inode = file_inode(iocb->ki_filp);
96 struct blk_plug plug;
97 int o_direct = iocb->ki_flags & IOCB_DIRECT; 95 int o_direct = iocb->ki_flags & IOCB_DIRECT;
98 int unaligned_aio = 0; 96 int unaligned_aio = 0;
99 int overwrite = 0; 97 int overwrite = 0;
@@ -134,18 +132,16 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
134 if (o_direct) { 132 if (o_direct) {
135 size_t length = iov_iter_count(from); 133 size_t length = iov_iter_count(from);
136 loff_t pos = iocb->ki_pos; 134 loff_t pos = iocb->ki_pos;
137 blk_start_plug(&plug);
138 135
139 /* check whether we do a DIO overwrite or not */ 136 /* check whether we do a DIO overwrite or not */
140 if (ext4_should_dioread_nolock(inode) && !unaligned_aio && 137 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
141 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { 138 pos + length <= i_size_read(inode)) {
142 struct ext4_map_blocks map; 139 struct ext4_map_blocks map;
143 unsigned int blkbits = inode->i_blkbits; 140 unsigned int blkbits = inode->i_blkbits;
144 int err, len; 141 int err, len;
145 142
146 map.m_lblk = pos >> blkbits; 143 map.m_lblk = pos >> blkbits;
147 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits) 144 map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
148 - map.m_lblk;
149 len = map.m_len; 145 len = map.m_len;
150 146
151 err = ext4_map_blocks(NULL, inode, &map, 0); 147 err = ext4_map_blocks(NULL, inode, &map, 0);
@@ -171,8 +167,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
171 167
172 if (ret > 0) 168 if (ret > 0)
173 ret = generic_write_sync(iocb, ret); 169 ret = generic_write_sync(iocb, ret);
174 if (o_direct)
175 blk_finish_plug(&plug);
176 170
177 return ret; 171 return ret;
178 172
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 5c4372512ef7..88effb1053c7 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -61,6 +61,13 @@ static int ext4_sync_parent(struct inode *inode)
61 break; 61 break;
62 iput(inode); 62 iput(inode);
63 inode = next; 63 inode = next;
64 /*
65 * The directory inode may have gone through rmdir by now. But
66 * the inode itself and its blocks are still allocated (we hold
67 * a reference to the inode so it didn't go through
68 * ext4_evict_inode()) and so we are safe to flush metadata
69 * blocks and the inode.
70 */
64 ret = sync_mapping_buffers(inode->i_mapping); 71 ret = sync_mapping_buffers(inode->i_mapping);
65 if (ret) 72 if (ret)
66 break; 73 break;
@@ -107,7 +114,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
107 114
108 if (!journal) { 115 if (!journal) {
109 ret = __generic_file_fsync(file, start, end, datasync); 116 ret = __generic_file_fsync(file, start, end, datasync);
110 if (!ret && !hlist_empty(&inode->i_dentry)) 117 if (!ret)
111 ret = ext4_sync_parent(inode); 118 ret = ext4_sync_parent(inode);
112 if (test_opt(inode->i_sb, BARRIER)) 119 if (test_opt(inode->i_sb, BARRIER))
113 goto issue_flush; 120 goto issue_flush;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9e66cd1d7b78..170421edfdfe 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -802,7 +802,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
802 } else 802 } else
803 inode_init_owner(inode, dir, mode); 803 inode_init_owner(inode, dir, mode);
804 804
805 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) && 805 if (ext4_has_feature_project(sb) &&
806 ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) 806 ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
807 ei->i_projid = EXT4_I(dir)->i_projid; 807 ei->i_projid = EXT4_I(dir)->i_projid;
808 else 808 else
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c6ea25a190f8..cd918823b352 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -647,11 +647,19 @@ found:
647 /* 647 /*
648 * We have to zeroout blocks before inserting them into extent 648 * We have to zeroout blocks before inserting them into extent
649 * status tree. Otherwise someone could look them up there and 649 * status tree. Otherwise someone could look them up there and
650 * use them before they are really zeroed. 650 * use them before they are really zeroed. We also have to
651 * unmap metadata before zeroing as otherwise writeback can
652 * overwrite zeros with stale data from block device.
651 */ 653 */
652 if (flags & EXT4_GET_BLOCKS_ZERO && 654 if (flags & EXT4_GET_BLOCKS_ZERO &&
653 map->m_flags & EXT4_MAP_MAPPED && 655 map->m_flags & EXT4_MAP_MAPPED &&
654 map->m_flags & EXT4_MAP_NEW) { 656 map->m_flags & EXT4_MAP_NEW) {
657 ext4_lblk_t i;
658
659 for (i = 0; i < map->m_len; i++) {
660 unmap_underlying_metadata(inode->i_sb->s_bdev,
661 map->m_pblk + i);
662 }
655 ret = ext4_issue_zeroout(inode, map->m_lblk, 663 ret = ext4_issue_zeroout(inode, map->m_lblk,
656 map->m_pblk, map->m_len); 664 map->m_pblk, map->m_len);
657 if (ret) { 665 if (ret) {
@@ -1649,6 +1657,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1649 BUG_ON(!PageLocked(page)); 1657 BUG_ON(!PageLocked(page));
1650 BUG_ON(PageWriteback(page)); 1658 BUG_ON(PageWriteback(page));
1651 if (invalidate) { 1659 if (invalidate) {
1660 if (page_mapped(page))
1661 clear_page_dirty_for_io(page);
1652 block_invalidatepage(page, 0, PAGE_SIZE); 1662 block_invalidatepage(page, 0, PAGE_SIZE);
1653 ClearPageUptodate(page); 1663 ClearPageUptodate(page);
1654 } 1664 }
@@ -3526,35 +3536,31 @@ out:
3526 3536
3527static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) 3537static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3528{ 3538{
3529 int unlocked = 0; 3539 struct address_space *mapping = iocb->ki_filp->f_mapping;
3530 struct inode *inode = iocb->ki_filp->f_mapping->host; 3540 struct inode *inode = mapping->host;
3531 ssize_t ret; 3541 ssize_t ret;
3532 3542
3533 if (ext4_should_dioread_nolock(inode)) { 3543 /*
3534 /* 3544 * Shared inode_lock is enough for us - it protects against concurrent
3535 * Nolock dioread optimization may be dynamically disabled 3545 * writes & truncates and since we take care of writing back page cache,
3536 * via ext4_inode_block_unlocked_dio(). Check inode's state 3546 * we are protected against page writeback as well.
3537 * while holding extra i_dio_count ref. 3547 */
3538 */ 3548 inode_lock_shared(inode);
3539 inode_dio_begin(inode);
3540 smp_mb();
3541 if (unlikely(ext4_test_inode_state(inode,
3542 EXT4_STATE_DIOREAD_LOCK)))
3543 inode_dio_end(inode);
3544 else
3545 unlocked = 1;
3546 }
3547 if (IS_DAX(inode)) { 3549 if (IS_DAX(inode)) {
3548 ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, 3550 ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, 0);
3549 NULL, unlocked ? 0 : DIO_LOCKING);
3550 } else { 3551 } else {
3552 size_t count = iov_iter_count(iter);
3553
3554 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3555 iocb->ki_pos + count);
3556 if (ret)
3557 goto out_unlock;
3551 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3558 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3552 iter, ext4_dio_get_block, 3559 iter, ext4_dio_get_block,
3553 NULL, NULL, 3560 NULL, NULL, 0);
3554 unlocked ? 0 : DIO_LOCKING);
3555 } 3561 }
3556 if (unlocked) 3562out_unlock:
3557 inode_dio_end(inode); 3563 inode_unlock_shared(inode);
3558 return ret; 3564 return ret;
3559} 3565}
3560 3566
@@ -3890,7 +3896,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3890} 3896}
3891 3897
3892/* 3898/*
3893 * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3899 * ext4_punch_hole: punches a hole in a file by releasing the blocks
3894 * associated with the given offset and length 3900 * associated with the given offset and length
3895 * 3901 *
3896 * @inode: File inode 3902 * @inode: File inode
@@ -3919,7 +3925,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3919 * Write out all dirty pages to avoid race conditions 3925 * Write out all dirty pages to avoid race conditions
3920 * Then release them. 3926 * Then release them.
3921 */ 3927 */
3922 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 3928 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3923 ret = filemap_write_and_wait_range(mapping, offset, 3929 ret = filemap_write_and_wait_range(mapping, offset,
3924 offset + length - 1); 3930 offset + length - 1);
3925 if (ret) 3931 if (ret)
@@ -4414,7 +4420,7 @@ static inline void ext4_iget_extra_inode(struct inode *inode,
4414 4420
4415int ext4_get_projid(struct inode *inode, kprojid_t *projid) 4421int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4416{ 4422{
4417 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_PROJECT)) 4423 if (!ext4_has_feature_project(inode->i_sb))
4418 return -EOPNOTSUPP; 4424 return -EOPNOTSUPP;
4419 *projid = EXT4_I(inode)->i_projid; 4425 *projid = EXT4_I(inode)->i_projid;
4420 return 0; 4426 return 0;
@@ -4481,7 +4487,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4481 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4487 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4482 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4488 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4483 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4489 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4484 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) && 4490 if (ext4_has_feature_project(sb) &&
4485 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && 4491 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4486 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) 4492 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4487 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); 4493 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
@@ -4814,14 +4820,14 @@ static int ext4_do_update_inode(handle_t *handle,
4814 * Fix up interoperability with old kernels. Otherwise, old inodes get 4820 * Fix up interoperability with old kernels. Otherwise, old inodes get
4815 * re-used with the upper 16 bits of the uid/gid intact 4821 * re-used with the upper 16 bits of the uid/gid intact
4816 */ 4822 */
4817 if (!ei->i_dtime) { 4823 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4824 raw_inode->i_uid_high = 0;
4825 raw_inode->i_gid_high = 0;
4826 } else {
4818 raw_inode->i_uid_high = 4827 raw_inode->i_uid_high =
4819 cpu_to_le16(high_16_bits(i_uid)); 4828 cpu_to_le16(high_16_bits(i_uid));
4820 raw_inode->i_gid_high = 4829 raw_inode->i_gid_high =
4821 cpu_to_le16(high_16_bits(i_gid)); 4830 cpu_to_le16(high_16_bits(i_gid));
4822 } else {
4823 raw_inode->i_uid_high = 0;
4824 raw_inode->i_gid_high = 0;
4825 } 4831 }
4826 } else { 4832 } else {
4827 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 4833 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
@@ -4885,8 +4891,7 @@ static int ext4_do_update_inode(handle_t *handle,
4885 } 4891 }
4886 } 4892 }
4887 4893
4888 BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 4894 BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
4889 EXT4_FEATURE_RO_COMPAT_PROJECT) &&
4890 i_projid != EXT4_DEF_PROJID); 4895 i_projid != EXT4_DEF_PROJID);
4891 4896
4892 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 4897 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 1bb7df5e4536..bf5ae8ebbc97 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -19,8 +19,6 @@
19#include "ext4_jbd2.h" 19#include "ext4_jbd2.h"
20#include "ext4.h" 20#include "ext4.h"
21 21
22#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
23
24/** 22/**
25 * Swap memory between @a and @b for @len bytes. 23 * Swap memory between @a and @b for @len bytes.
26 * 24 *
@@ -310,8 +308,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
310 struct ext4_inode *raw_inode; 308 struct ext4_inode *raw_inode;
311 struct dquot *transfer_to[MAXQUOTAS] = { }; 309 struct dquot *transfer_to[MAXQUOTAS] = { };
312 310
313 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 311 if (!ext4_has_feature_project(sb)) {
314 EXT4_FEATURE_RO_COMPAT_PROJECT)) {
315 if (projid != EXT4_DEF_PROJID) 312 if (projid != EXT4_DEF_PROJID)
316 return -EOPNOTSUPP; 313 return -EOPNOTSUPP;
317 else 314 else
@@ -772,6 +769,9 @@ resizefs_out:
772#ifdef CONFIG_EXT4_FS_ENCRYPTION 769#ifdef CONFIG_EXT4_FS_ENCRYPTION
773 struct fscrypt_policy policy; 770 struct fscrypt_policy policy;
774 771
772 if (!ext4_has_feature_encrypt(sb))
773 return -EOPNOTSUPP;
774
775 if (copy_from_user(&policy, 775 if (copy_from_user(&policy,
776 (struct fscrypt_policy __user *)arg, 776 (struct fscrypt_policy __user *)arg,
777 sizeof(policy))) 777 sizeof(policy)))
@@ -842,8 +842,7 @@ resizefs_out:
842 ext4_get_inode_flags(ei); 842 ext4_get_inode_flags(ei);
843 fa.fsx_xflags = ext4_iflags_to_xflags(ei->i_flags & EXT4_FL_USER_VISIBLE); 843 fa.fsx_xflags = ext4_iflags_to_xflags(ei->i_flags & EXT4_FL_USER_VISIBLE);
844 844
845 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 845 if (ext4_has_feature_project(inode->i_sb)) {
846 EXT4_FEATURE_RO_COMPAT_PROJECT)) {
847 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 846 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
848 EXT4_I(inode)->i_projid); 847 EXT4_I(inode)->i_projid);
849 } 848 }
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index a920c5d29fac..6fc14def0c70 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
598 return -EOPNOTSUPP; 598 return -EOPNOTSUPP;
599 } 599 }
600 600
601 if (ext4_encrypted_inode(orig_inode) ||
602 ext4_encrypted_inode(donor_inode)) {
603 ext4_msg(orig_inode->i_sb, KERN_ERR,
604 "Online defrag not supported for encrypted files");
605 return -EOPNOTSUPP;
606 }
607
601 /* Protect orig and donor inodes against a truncate */ 608 /* Protect orig and donor inodes against a truncate */
602 lock_two_nondirectories(orig_inode, donor_inode); 609 lock_two_nondirectories(orig_inode, donor_inode);
603 610
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 34c0142caf6a..c344b819cffa 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -639,7 +639,7 @@ static struct stats dx_show_leaf(struct inode *dir,
639 res = fscrypt_fname_alloc_buffer( 639 res = fscrypt_fname_alloc_buffer(
640 dir, len, 640 dir, len,
641 &fname_crypto_str); 641 &fname_crypto_str);
642 if (res < 0) 642 if (res)
643 printk(KERN_WARNING "Error " 643 printk(KERN_WARNING "Error "
644 "allocating crypto " 644 "allocating crypto "
645 "buffer--skipping " 645 "buffer--skipping "
@@ -647,7 +647,7 @@ static struct stats dx_show_leaf(struct inode *dir,
647 res = fscrypt_fname_disk_to_usr(dir, 647 res = fscrypt_fname_disk_to_usr(dir,
648 0, 0, &de_name, 648 0, 0, &de_name,
649 &fname_crypto_str); 649 &fname_crypto_str);
650 if (res < 0) { 650 if (res) {
651 printk(KERN_WARNING "Error " 651 printk(KERN_WARNING "Error "
652 "converting filename " 652 "converting filename "
653 "from disk to usr" 653 "from disk to usr"
@@ -1011,7 +1011,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
1011 err = fscrypt_fname_disk_to_usr(dir, hinfo->hash, 1011 err = fscrypt_fname_disk_to_usr(dir, hinfo->hash,
1012 hinfo->minor_hash, &de_name, 1012 hinfo->minor_hash, &de_name,
1013 &fname_crypto_str); 1013 &fname_crypto_str);
1014 if (err < 0) { 1014 if (err) {
1015 count = err; 1015 count = err;
1016 goto errout; 1016 goto errout;
1017 } 1017 }
@@ -2044,33 +2044,31 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
2044 frame->entries = entries; 2044 frame->entries = entries;
2045 frame->at = entries; 2045 frame->at = entries;
2046 frame->bh = bh; 2046 frame->bh = bh;
2047 bh = bh2;
2048 2047
2049 retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); 2048 retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
2050 if (retval) 2049 if (retval)
2051 goto out_frames; 2050 goto out_frames;
2052 retval = ext4_handle_dirty_dirent_node(handle, dir, bh); 2051 retval = ext4_handle_dirty_dirent_node(handle, dir, bh2);
2053 if (retval) 2052 if (retval)
2054 goto out_frames; 2053 goto out_frames;
2055 2054
2056 de = do_split(handle,dir, &bh, frame, &fname->hinfo); 2055 de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
2057 if (IS_ERR(de)) { 2056 if (IS_ERR(de)) {
2058 retval = PTR_ERR(de); 2057 retval = PTR_ERR(de);
2059 goto out_frames; 2058 goto out_frames;
2060 } 2059 }
2061 dx_release(frames);
2062 2060
2063 retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh); 2061 retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
2064 brelse(bh);
2065 return retval;
2066out_frames: 2062out_frames:
2067 /* 2063 /*
2068 * Even if the block split failed, we have to properly write 2064 * Even if the block split failed, we have to properly write
2069 * out all the changes we did so far. Otherwise we can end up 2065 * out all the changes we did so far. Otherwise we can end up
2070 * with corrupted filesystem. 2066 * with corrupted filesystem.
2071 */ 2067 */
2072 ext4_mark_inode_dirty(handle, dir); 2068 if (retval)
2069 ext4_mark_inode_dirty(handle, dir);
2073 dx_release(frames); 2070 dx_release(frames);
2071 brelse(bh2);
2074 return retval; 2072 return retval;
2075} 2073}
2076 2074
@@ -3144,7 +3142,7 @@ static int ext4_symlink(struct inode *dir,
3144 istr.name = (const unsigned char *) symname; 3142 istr.name = (const unsigned char *) symname;
3145 istr.len = len; 3143 istr.len = len;
3146 err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); 3144 err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
3147 if (err < 0) 3145 if (err)
3148 goto err_drop_inode; 3146 goto err_drop_inode;
3149 sd->len = cpu_to_le16(ostr.len); 3147 sd->len = cpu_to_le16(ostr.len);
3150 disk_link.name = (char *) sd; 3148 disk_link.name = (char *) sd;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index a6132a730967..b4cbee936cf8 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -405,14 +405,12 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
405{ 405{
406 struct page *data_page = NULL; 406 struct page *data_page = NULL;
407 struct inode *inode = page->mapping->host; 407 struct inode *inode = page->mapping->host;
408 unsigned block_start, blocksize; 408 unsigned block_start;
409 struct buffer_head *bh, *head; 409 struct buffer_head *bh, *head;
410 int ret = 0; 410 int ret = 0;
411 int nr_submitted = 0; 411 int nr_submitted = 0;
412 int nr_to_submit = 0; 412 int nr_to_submit = 0;
413 413
414 blocksize = 1 << inode->i_blkbits;
415
416 BUG_ON(!PageLocked(page)); 414 BUG_ON(!PageLocked(page));
417 BUG_ON(PageWriteback(page)); 415 BUG_ON(PageWriteback(page));
418 416
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3ec8708989ca..6db81fbcbaa6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -78,6 +78,8 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly);
78static void ext4_destroy_lazyinit_thread(void); 78static void ext4_destroy_lazyinit_thread(void);
79static void ext4_unregister_li_request(struct super_block *sb); 79static void ext4_unregister_li_request(struct super_block *sb);
80static void ext4_clear_request_list(void); 80static void ext4_clear_request_list(void);
81static struct inode *ext4_get_journal_inode(struct super_block *sb,
82 unsigned int journal_inum);
81 83
82/* 84/*
83 * Lock ordering 85 * Lock ordering
@@ -1267,7 +1269,7 @@ enum {
1267 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 1269 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1268 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, 1270 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1269 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 1271 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1270 Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax, 1272 Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1271 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, 1273 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1272 Opt_lazytime, Opt_nolazytime, 1274 Opt_lazytime, Opt_nolazytime,
1273 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 1275 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
@@ -1327,6 +1329,7 @@ static const match_table_t tokens = {
1327 {Opt_noquota, "noquota"}, 1329 {Opt_noquota, "noquota"},
1328 {Opt_quota, "quota"}, 1330 {Opt_quota, "quota"},
1329 {Opt_usrquota, "usrquota"}, 1331 {Opt_usrquota, "usrquota"},
1332 {Opt_prjquota, "prjquota"},
1330 {Opt_barrier, "barrier=%u"}, 1333 {Opt_barrier, "barrier=%u"},
1331 {Opt_barrier, "barrier"}, 1334 {Opt_barrier, "barrier"},
1332 {Opt_nobarrier, "nobarrier"}, 1335 {Opt_nobarrier, "nobarrier"},
@@ -1546,8 +1549,11 @@ static const struct mount_opts {
1546 MOPT_SET | MOPT_Q}, 1549 MOPT_SET | MOPT_Q},
1547 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, 1550 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1548 MOPT_SET | MOPT_Q}, 1551 MOPT_SET | MOPT_Q},
1552 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1553 MOPT_SET | MOPT_Q},
1549 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 1554 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1550 EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q}, 1555 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1556 MOPT_CLEAR | MOPT_Q},
1551 {Opt_usrjquota, 0, MOPT_Q}, 1557 {Opt_usrjquota, 0, MOPT_Q},
1552 {Opt_grpjquota, 0, MOPT_Q}, 1558 {Opt_grpjquota, 0, MOPT_Q},
1553 {Opt_offusrjquota, 0, MOPT_Q}, 1559 {Opt_offusrjquota, 0, MOPT_Q},
@@ -1836,13 +1842,17 @@ static int parse_options(char *options, struct super_block *sb,
1836 return 0; 1842 return 0;
1837 } 1843 }
1838#ifdef CONFIG_QUOTA 1844#ifdef CONFIG_QUOTA
1839 if (ext4_has_feature_quota(sb) && 1845 /*
1840 (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { 1846 * We do the test below only for project quotas. 'usrquota' and
1841 ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota " 1847 * 'grpquota' mount options are allowed even without quota feature
1842 "mount options ignored."); 1848 * to support legacy quotas in quota files.
1843 clear_opt(sb, USRQUOTA); 1849 */
1844 clear_opt(sb, GRPQUOTA); 1850 if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
1845 } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1851 ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
1852 "Cannot enable project quota enforcement.");
1853 return 0;
1854 }
1855 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1846 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 1856 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1847 clear_opt(sb, USRQUOTA); 1857 clear_opt(sb, USRQUOTA);
1848 1858
@@ -2741,7 +2751,6 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
2741 sb = elr->lr_super; 2751 sb = elr->lr_super;
2742 ngroups = EXT4_SB(sb)->s_groups_count; 2752 ngroups = EXT4_SB(sb)->s_groups_count;
2743 2753
2744 sb_start_write(sb);
2745 for (group = elr->lr_next_group; group < ngroups; group++) { 2754 for (group = elr->lr_next_group; group < ngroups; group++) {
2746 gdp = ext4_get_group_desc(sb, group, NULL); 2755 gdp = ext4_get_group_desc(sb, group, NULL);
2747 if (!gdp) { 2756 if (!gdp) {
@@ -2768,8 +2777,6 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
2768 elr->lr_next_sched = jiffies + elr->lr_timeout; 2777 elr->lr_next_sched = jiffies + elr->lr_timeout;
2769 elr->lr_next_group = group + 1; 2778 elr->lr_next_group = group + 1;
2770 } 2779 }
2771 sb_end_write(sb);
2772
2773 return ret; 2780 return ret;
2774} 2781}
2775 2782
@@ -2834,19 +2841,43 @@ cont_thread:
2834 mutex_unlock(&eli->li_list_mtx); 2841 mutex_unlock(&eli->li_list_mtx);
2835 goto exit_thread; 2842 goto exit_thread;
2836 } 2843 }
2837
2838 list_for_each_safe(pos, n, &eli->li_request_list) { 2844 list_for_each_safe(pos, n, &eli->li_request_list) {
2845 int err = 0;
2846 int progress = 0;
2839 elr = list_entry(pos, struct ext4_li_request, 2847 elr = list_entry(pos, struct ext4_li_request,
2840 lr_request); 2848 lr_request);
2841 2849
2842 if (time_after_eq(jiffies, elr->lr_next_sched)) { 2850 if (time_before(jiffies, elr->lr_next_sched)) {
2843 if (ext4_run_li_request(elr) != 0) { 2851 if (time_before(elr->lr_next_sched, next_wakeup))
2844 /* error, remove the lazy_init job */ 2852 next_wakeup = elr->lr_next_sched;
2845 ext4_remove_li_request(elr); 2853 continue;
2846 continue; 2854 }
2855 if (down_read_trylock(&elr->lr_super->s_umount)) {
2856 if (sb_start_write_trylock(elr->lr_super)) {
2857 progress = 1;
2858 /*
2859 * We hold sb->s_umount, sb can not
2860 * be removed from the list, it is
2861 * now safe to drop li_list_mtx
2862 */
2863 mutex_unlock(&eli->li_list_mtx);
2864 err = ext4_run_li_request(elr);
2865 sb_end_write(elr->lr_super);
2866 mutex_lock(&eli->li_list_mtx);
2867 n = pos->next;
2847 } 2868 }
2869 up_read((&elr->lr_super->s_umount));
2870 }
2871 /* error, remove the lazy_init job */
2872 if (err) {
2873 ext4_remove_li_request(elr);
2874 continue;
2875 }
2876 if (!progress) {
2877 elr->lr_next_sched = jiffies +
2878 (prandom_u32()
2879 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
2848 } 2880 }
2849
2850 if (time_before(elr->lr_next_sched, next_wakeup)) 2881 if (time_before(elr->lr_next_sched, next_wakeup))
2851 next_wakeup = elr->lr_next_sched; 2882 next_wakeup = elr->lr_next_sched;
2852 } 2883 }
@@ -3179,6 +3210,8 @@ int ext4_calculate_overhead(struct super_block *sb)
3179{ 3210{
3180 struct ext4_sb_info *sbi = EXT4_SB(sb); 3211 struct ext4_sb_info *sbi = EXT4_SB(sb);
3181 struct ext4_super_block *es = sbi->s_es; 3212 struct ext4_super_block *es = sbi->s_es;
3213 struct inode *j_inode;
3214 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3182 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 3215 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3183 ext4_fsblk_t overhead = 0; 3216 ext4_fsblk_t overhead = 0;
3184 char *buf = (char *) get_zeroed_page(GFP_NOFS); 3217 char *buf = (char *) get_zeroed_page(GFP_NOFS);
@@ -3209,10 +3242,23 @@ int ext4_calculate_overhead(struct super_block *sb)
3209 memset(buf, 0, PAGE_SIZE); 3242 memset(buf, 0, PAGE_SIZE);
3210 cond_resched(); 3243 cond_resched();
3211 } 3244 }
3212 /* Add the internal journal blocks as well */ 3245
3246 /*
3247 * Add the internal journal blocks whether the journal has been
3248 * loaded or not
3249 */
3213 if (sbi->s_journal && !sbi->journal_bdev) 3250 if (sbi->s_journal && !sbi->journal_bdev)
3214 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); 3251 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3215 3252 else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
3253 j_inode = ext4_get_journal_inode(sb, j_inum);
3254 if (j_inode) {
3255 j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
3256 overhead += EXT4_NUM_B2C(sbi, j_blocks);
3257 iput(j_inode);
3258 } else {
3259 ext4_msg(sb, KERN_ERR, "can't get journal size");
3260 }
3261 }
3216 sbi->s_overhead = overhead; 3262 sbi->s_overhead = overhead;
3217 smp_wmb(); 3263 smp_wmb();
3218 free_page((unsigned long) buf); 3264 free_page((unsigned long) buf);
@@ -4208,18 +4254,16 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4208 write_unlock(&journal->j_state_lock); 4254 write_unlock(&journal->j_state_lock);
4209} 4255}
4210 4256
4211static journal_t *ext4_get_journal(struct super_block *sb, 4257static struct inode *ext4_get_journal_inode(struct super_block *sb,
4212 unsigned int journal_inum) 4258 unsigned int journal_inum)
4213{ 4259{
4214 struct inode *journal_inode; 4260 struct inode *journal_inode;
4215 journal_t *journal;
4216
4217 BUG_ON(!ext4_has_feature_journal(sb));
4218
4219 /* First, test for the existence of a valid inode on disk. Bad
4220 * things happen if we iget() an unused inode, as the subsequent
4221 * iput() will try to delete it. */
4222 4261
4262 /*
4263 * Test for the existence of a valid inode on disk. Bad things
4264 * happen if we iget() an unused inode, as the subsequent iput()
4265 * will try to delete it.
4266 */
4223 journal_inode = ext4_iget(sb, journal_inum); 4267 journal_inode = ext4_iget(sb, journal_inum);
4224 if (IS_ERR(journal_inode)) { 4268 if (IS_ERR(journal_inode)) {
4225 ext4_msg(sb, KERN_ERR, "no journal found"); 4269 ext4_msg(sb, KERN_ERR, "no journal found");
@@ -4239,6 +4283,20 @@ static journal_t *ext4_get_journal(struct super_block *sb,
4239 iput(journal_inode); 4283 iput(journal_inode);
4240 return NULL; 4284 return NULL;
4241 } 4285 }
4286 return journal_inode;
4287}
4288
4289static journal_t *ext4_get_journal(struct super_block *sb,
4290 unsigned int journal_inum)
4291{
4292 struct inode *journal_inode;
4293 journal_t *journal;
4294
4295 BUG_ON(!ext4_has_feature_journal(sb));
4296
4297 journal_inode = ext4_get_journal_inode(sb, journal_inum);
4298 if (!journal_inode)
4299 return NULL;
4242 4300
4243 journal = jbd2_journal_init_inode(journal_inode); 4301 journal = jbd2_journal_init_inode(journal_inode);
4244 if (!journal) { 4302 if (!journal) {
@@ -5250,12 +5308,18 @@ static int ext4_enable_quotas(struct super_block *sb)
5250 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 5308 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5251 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 5309 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5252 }; 5310 };
5311 bool quota_mopt[EXT4_MAXQUOTAS] = {
5312 test_opt(sb, USRQUOTA),
5313 test_opt(sb, GRPQUOTA),
5314 test_opt(sb, PRJQUOTA),
5315 };
5253 5316
5254 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; 5317 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
5255 for (type = 0; type < EXT4_MAXQUOTAS; type++) { 5318 for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5256 if (qf_inums[type]) { 5319 if (qf_inums[type]) {
5257 err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 5320 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5258 DQUOT_USAGE_ENABLED); 5321 DQUOT_USAGE_ENABLED |
5322 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5259 if (err) { 5323 if (err) {
5260 ext4_warning(sb, 5324 ext4_warning(sb,
5261 "Failed to enable quota tracking " 5325 "Failed to enable quota tracking "
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 4d83d9e05f2e..fdf1c6154745 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -30,7 +30,6 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
30 char *caddr, *paddr = NULL; 30 char *caddr, *paddr = NULL;
31 struct fscrypt_str cstr, pstr; 31 struct fscrypt_str cstr, pstr;
32 struct fscrypt_symlink_data *sd; 32 struct fscrypt_symlink_data *sd;
33 loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
34 int res; 33 int res;
35 u32 max_size = inode->i_sb->s_blocksize; 34 u32 max_size = inode->i_sb->s_blocksize;
36 35
@@ -49,7 +48,6 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
49 if (IS_ERR(cpage)) 48 if (IS_ERR(cpage))
50 return ERR_CAST(cpage); 49 return ERR_CAST(cpage);
51 caddr = page_address(cpage); 50 caddr = page_address(cpage);
52 caddr[size] = 0;
53 } 51 }
54 52
55 /* Symlink is encrypted */ 53 /* Symlink is encrypted */
@@ -65,16 +63,14 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
65 res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); 63 res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
66 if (res) 64 if (res)
67 goto errout; 65 goto errout;
66 paddr = pstr.name;
68 67
69 res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); 68 res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
70 if (res < 0) 69 if (res)
71 goto errout; 70 goto errout;
72 71
73 paddr = pstr.name;
74
75 /* Null-terminate the name */ 72 /* Null-terminate the name */
76 if (res <= pstr.len) 73 paddr[pstr.len] = '\0';
77 paddr[res] = '\0';
78 if (cpage) 74 if (cpage)
79 put_page(cpage); 75 put_page(cpage);
80 set_delayed_call(done, kfree_link, paddr); 76 set_delayed_call(done, kfree_link, paddr);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 2eb935ca5d9e..c15d63389957 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -199,6 +199,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
199 } 199 }
200 200
201 while (!IS_LAST_ENTRY(entry)) { 201 while (!IS_LAST_ENTRY(entry)) {
202 if (entry->e_value_block != 0)
203 return -EFSCORRUPTED;
202 if (entry->e_value_size != 0 && 204 if (entry->e_value_size != 0 &&
203 (value_start + le16_to_cpu(entry->e_value_offs) < 205 (value_start + le16_to_cpu(entry->e_value_offs) <
204 (void *)e + sizeof(__u32) || 206 (void *)e + sizeof(__u32) ||
@@ -641,7 +643,7 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
641 size_t *min_offs, void *base, int *total) 643 size_t *min_offs, void *base, int *total)
642{ 644{
643 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { 645 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
644 if (!last->e_value_block && last->e_value_size) { 646 if (last->e_value_size) {
645 size_t offs = le16_to_cpu(last->e_value_offs); 647 size_t offs = le16_to_cpu(last->e_value_offs);
646 if (offs < *min_offs) 648 if (offs < *min_offs)
647 *min_offs = offs; 649 *min_offs = offs;
@@ -661,7 +663,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
661 /* Compute min_offs and last. */ 663 /* Compute min_offs and last. */
662 last = s->first; 664 last = s->first;
663 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { 665 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
664 if (!last->e_value_block && last->e_value_size) { 666 if (last->e_value_size) {
665 size_t offs = le16_to_cpu(last->e_value_offs); 667 size_t offs = le16_to_cpu(last->e_value_offs);
666 if (offs < min_offs) 668 if (offs < min_offs)
667 min_offs = offs; 669 min_offs = offs;
@@ -669,7 +671,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
669 } 671 }
670 free = min_offs - ((void *)last - s->base) - sizeof(__u32); 672 free = min_offs - ((void *)last - s->base) - sizeof(__u32);
671 if (!s->not_found) { 673 if (!s->not_found) {
672 if (!s->here->e_value_block && s->here->e_value_size) { 674 if (s->here->e_value_size) {
673 size_t size = le32_to_cpu(s->here->e_value_size); 675 size_t size = le32_to_cpu(s->here->e_value_size);
674 free += EXT4_XATTR_SIZE(size); 676 free += EXT4_XATTR_SIZE(size);
675 } 677 }
@@ -691,7 +693,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
691 s->here->e_name_len = name_len; 693 s->here->e_name_len = name_len;
692 memcpy(s->here->e_name, i->name, name_len); 694 memcpy(s->here->e_name, i->name, name_len);
693 } else { 695 } else {
694 if (!s->here->e_value_block && s->here->e_value_size) { 696 if (s->here->e_value_size) {
695 void *first_val = s->base + min_offs; 697 void *first_val = s->base + min_offs;
696 size_t offs = le16_to_cpu(s->here->e_value_offs); 698 size_t offs = le16_to_cpu(s->here->e_value_offs);
697 void *val = s->base + offs; 699 void *val = s->base + offs;
@@ -725,8 +727,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
725 last = s->first; 727 last = s->first;
726 while (!IS_LAST_ENTRY(last)) { 728 while (!IS_LAST_ENTRY(last)) {
727 size_t o = le16_to_cpu(last->e_value_offs); 729 size_t o = le16_to_cpu(last->e_value_offs);
728 if (!last->e_value_block && 730 if (last->e_value_size && o < offs)
729 last->e_value_size && o < offs)
730 last->e_value_offs = 731 last->e_value_offs =
731 cpu_to_le16(o + size); 732 cpu_to_le16(o + size);
732 last = EXT4_XATTR_NEXT(last); 733 last = EXT4_XATTR_NEXT(last);
@@ -1318,18 +1319,19 @@ retry:
1318 */ 1319 */
1319static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry, 1320static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
1320 int value_offs_shift, void *to, 1321 int value_offs_shift, void *to,
1321 void *from, size_t n, int blocksize) 1322 void *from, size_t n)
1322{ 1323{
1323 struct ext4_xattr_entry *last = entry; 1324 struct ext4_xattr_entry *last = entry;
1324 int new_offs; 1325 int new_offs;
1325 1326
1327 /* We always shift xattr headers further thus offsets get lower */
1328 BUG_ON(value_offs_shift > 0);
1329
1326 /* Adjust the value offsets of the entries */ 1330 /* Adjust the value offsets of the entries */
1327 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { 1331 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1328 if (!last->e_value_block && last->e_value_size) { 1332 if (last->e_value_size) {
1329 new_offs = le16_to_cpu(last->e_value_offs) + 1333 new_offs = le16_to_cpu(last->e_value_offs) +
1330 value_offs_shift; 1334 value_offs_shift;
1331 BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
1332 > blocksize);
1333 last->e_value_offs = cpu_to_le16(new_offs); 1335 last->e_value_offs = cpu_to_le16(new_offs);
1334 } 1336 }
1335 } 1337 }
@@ -1338,6 +1340,141 @@ static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
1338} 1340}
1339 1341
1340/* 1342/*
1343 * Move xattr pointed to by 'entry' from inode into external xattr block
1344 */
1345static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
1346 struct ext4_inode *raw_inode,
1347 struct ext4_xattr_entry *entry)
1348{
1349 struct ext4_xattr_ibody_find *is = NULL;
1350 struct ext4_xattr_block_find *bs = NULL;
1351 char *buffer = NULL, *b_entry_name = NULL;
1352 size_t value_offs, value_size;
1353 struct ext4_xattr_info i = {
1354 .value = NULL,
1355 .value_len = 0,
1356 .name_index = entry->e_name_index,
1357 };
1358 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
1359 int error;
1360
1361 value_offs = le16_to_cpu(entry->e_value_offs);
1362 value_size = le32_to_cpu(entry->e_value_size);
1363
1364 is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
1365 bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
1366 buffer = kmalloc(value_size, GFP_NOFS);
1367 b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
1368 if (!is || !bs || !buffer || !b_entry_name) {
1369 error = -ENOMEM;
1370 goto out;
1371 }
1372
1373 is->s.not_found = -ENODATA;
1374 bs->s.not_found = -ENODATA;
1375 is->iloc.bh = NULL;
1376 bs->bh = NULL;
1377
1378 /* Save the entry name and the entry value */
1379 memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
1380 memcpy(b_entry_name, entry->e_name, entry->e_name_len);
1381 b_entry_name[entry->e_name_len] = '\0';
1382 i.name = b_entry_name;
1383
1384 error = ext4_get_inode_loc(inode, &is->iloc);
1385 if (error)
1386 goto out;
1387
1388 error = ext4_xattr_ibody_find(inode, &i, is);
1389 if (error)
1390 goto out;
1391
1392 /* Remove the chosen entry from the inode */
1393 error = ext4_xattr_ibody_set(handle, inode, &i, is);
1394 if (error)
1395 goto out;
1396
1397 i.name = b_entry_name;
1398 i.value = buffer;
1399 i.value_len = value_size;
1400 error = ext4_xattr_block_find(inode, &i, bs);
1401 if (error)
1402 goto out;
1403
1404 /* Add entry which was removed from the inode into the block */
1405 error = ext4_xattr_block_set(handle, inode, &i, bs);
1406 if (error)
1407 goto out;
1408 error = 0;
1409out:
1410 kfree(b_entry_name);
1411 kfree(buffer);
1412 if (is)
1413 brelse(is->iloc.bh);
1414 kfree(is);
1415 kfree(bs);
1416
1417 return error;
1418}
1419
1420static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
1421 struct ext4_inode *raw_inode,
1422 int isize_diff, size_t ifree,
1423 size_t bfree, int *total_ino)
1424{
1425 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
1426 struct ext4_xattr_entry *small_entry;
1427 struct ext4_xattr_entry *entry;
1428 struct ext4_xattr_entry *last;
1429 unsigned int entry_size; /* EA entry size */
1430 unsigned int total_size; /* EA entry size + value size */
1431 unsigned int min_total_size;
1432 int error;
1433
1434 while (isize_diff > ifree) {
1435 entry = NULL;
1436 small_entry = NULL;
1437 min_total_size = ~0U;
1438 last = IFIRST(header);
1439 /* Find the entry best suited to be pushed into EA block */
1440 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1441 total_size =
1442 EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
1443 EXT4_XATTR_LEN(last->e_name_len);
1444 if (total_size <= bfree &&
1445 total_size < min_total_size) {
1446 if (total_size + ifree < isize_diff) {
1447 small_entry = last;
1448 } else {
1449 entry = last;
1450 min_total_size = total_size;
1451 }
1452 }
1453 }
1454
1455 if (entry == NULL) {
1456 if (small_entry == NULL)
1457 return -ENOSPC;
1458 entry = small_entry;
1459 }
1460
1461 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
1462 total_size = entry_size +
1463 EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
1464 error = ext4_xattr_move_to_block(handle, inode, raw_inode,
1465 entry);
1466 if (error)
1467 return error;
1468
1469 *total_ino -= entry_size;
1470 ifree += total_size;
1471 bfree -= total_size;
1472 }
1473
1474 return 0;
1475}
1476
1477/*
1341 * Expand an inode by new_extra_isize bytes when EAs are present. 1478 * Expand an inode by new_extra_isize bytes when EAs are present.
1342 * Returns 0 on success or negative error number on failure. 1479 * Returns 0 on success or negative error number on failure.
1343 */ 1480 */
@@ -1345,14 +1482,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
1345 struct ext4_inode *raw_inode, handle_t *handle) 1482 struct ext4_inode *raw_inode, handle_t *handle)
1346{ 1483{
1347 struct ext4_xattr_ibody_header *header; 1484 struct ext4_xattr_ibody_header *header;
1348 struct ext4_xattr_entry *entry, *last, *first;
1349 struct buffer_head *bh = NULL; 1485 struct buffer_head *bh = NULL;
1350 struct ext4_xattr_ibody_find *is = NULL; 1486 size_t min_offs;
1351 struct ext4_xattr_block_find *bs = NULL; 1487 size_t ifree, bfree;
1352 char *buffer = NULL, *b_entry_name = NULL;
1353 size_t min_offs, free;
1354 int total_ino; 1488 int total_ino;
1355 void *base, *start, *end; 1489 void *base, *end;
1356 int error = 0, tried_min_extra_isize = 0; 1490 int error = 0, tried_min_extra_isize = 0;
1357 int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); 1491 int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1358 int isize_diff; /* How much do we need to grow i_extra_isize */ 1492 int isize_diff; /* How much do we need to grow i_extra_isize */
@@ -1368,34 +1502,24 @@ retry:
1368 goto out; 1502 goto out;
1369 1503
1370 header = IHDR(inode, raw_inode); 1504 header = IHDR(inode, raw_inode);
1371 entry = IFIRST(header);
1372 1505
1373 /* 1506 /*
1374 * Check if enough free space is available in the inode to shift the 1507 * Check if enough free space is available in the inode to shift the
1375 * entries ahead by new_extra_isize. 1508 * entries ahead by new_extra_isize.
1376 */ 1509 */
1377 1510
1378 base = start = entry; 1511 base = IFIRST(header);
1379 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 1512 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
1380 min_offs = end - base; 1513 min_offs = end - base;
1381 last = entry;
1382 total_ino = sizeof(struct ext4_xattr_ibody_header); 1514 total_ino = sizeof(struct ext4_xattr_ibody_header);
1383 1515
1384 error = xattr_check_inode(inode, header, end); 1516 error = xattr_check_inode(inode, header, end);
1385 if (error) 1517 if (error)
1386 goto cleanup; 1518 goto cleanup;
1387 1519
1388 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); 1520 ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
1389 if (free >= isize_diff) { 1521 if (ifree >= isize_diff)
1390 entry = IFIRST(header); 1522 goto shift;
1391 ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
1392 - new_extra_isize, (void *)raw_inode +
1393 EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
1394 (void *)header, total_ino,
1395 inode->i_sb->s_blocksize);
1396 EXT4_I(inode)->i_extra_isize = new_extra_isize;
1397 goto out;
1398 }
1399 1523
1400 /* 1524 /*
1401 * Enough free space isn't available in the inode, check if 1525 * Enough free space isn't available in the inode, check if
@@ -1413,146 +1537,44 @@ retry:
1413 goto cleanup; 1537 goto cleanup;
1414 } 1538 }
1415 base = BHDR(bh); 1539 base = BHDR(bh);
1416 first = BFIRST(bh);
1417 end = bh->b_data + bh->b_size; 1540 end = bh->b_data + bh->b_size;
1418 min_offs = end - base; 1541 min_offs = end - base;
1419 free = ext4_xattr_free_space(first, &min_offs, base, NULL); 1542 bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
1420 if (free < isize_diff) { 1543 NULL);
1544 if (bfree + ifree < isize_diff) {
1421 if (!tried_min_extra_isize && s_min_extra_isize) { 1545 if (!tried_min_extra_isize && s_min_extra_isize) {
1422 tried_min_extra_isize++; 1546 tried_min_extra_isize++;
1423 new_extra_isize = s_min_extra_isize; 1547 new_extra_isize = s_min_extra_isize;
1424 brelse(bh); 1548 brelse(bh);
1425 goto retry; 1549 goto retry;
1426 } 1550 }
1427 error = -1; 1551 error = -ENOSPC;
1428 goto cleanup; 1552 goto cleanup;
1429 } 1553 }
1430 } else { 1554 } else {
1431 free = inode->i_sb->s_blocksize; 1555 bfree = inode->i_sb->s_blocksize;
1432 } 1556 }
1433 1557
1434 while (isize_diff > 0) { 1558 error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
1435 size_t offs, size, entry_size; 1559 isize_diff, ifree, bfree,
1436 struct ext4_xattr_entry *small_entry = NULL; 1560 &total_ino);
1437 struct ext4_xattr_info i = { 1561 if (error) {
1438 .value = NULL, 1562 if (error == -ENOSPC && !tried_min_extra_isize &&
1439 .value_len = 0, 1563 s_min_extra_isize) {
1440 }; 1564 tried_min_extra_isize++;
1441 unsigned int total_size; /* EA entry size + value size */ 1565 new_extra_isize = s_min_extra_isize;
1442 unsigned int shift_bytes; /* No. of bytes to shift EAs by? */ 1566 brelse(bh);
1443 unsigned int min_total_size = ~0U; 1567 goto retry;
1444
1445 is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
1446 bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
1447 if (!is || !bs) {
1448 error = -ENOMEM;
1449 goto cleanup;
1450 }
1451
1452 is->s.not_found = -ENODATA;
1453 bs->s.not_found = -ENODATA;
1454 is->iloc.bh = NULL;
1455 bs->bh = NULL;
1456
1457 last = IFIRST(header);
1458 /* Find the entry best suited to be pushed into EA block */
1459 entry = NULL;
1460 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1461 total_size =
1462 EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
1463 EXT4_XATTR_LEN(last->e_name_len);
1464 if (total_size <= free && total_size < min_total_size) {
1465 if (total_size < isize_diff) {
1466 small_entry = last;
1467 } else {
1468 entry = last;
1469 min_total_size = total_size;
1470 }
1471 }
1472 }
1473
1474 if (entry == NULL) {
1475 if (small_entry) {
1476 entry = small_entry;
1477 } else {
1478 if (!tried_min_extra_isize &&
1479 s_min_extra_isize) {
1480 tried_min_extra_isize++;
1481 new_extra_isize = s_min_extra_isize;
1482 kfree(is); is = NULL;
1483 kfree(bs); bs = NULL;
1484 brelse(bh);
1485 goto retry;
1486 }
1487 error = -1;
1488 goto cleanup;
1489 }
1490 }
1491 offs = le16_to_cpu(entry->e_value_offs);
1492 size = le32_to_cpu(entry->e_value_size);
1493 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
1494 i.name_index = entry->e_name_index,
1495 buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
1496 b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
1497 if (!buffer || !b_entry_name) {
1498 error = -ENOMEM;
1499 goto cleanup;
1500 } 1568 }
1501 /* Save the entry name and the entry value */ 1569 goto cleanup;
1502 memcpy(buffer, (void *)IFIRST(header) + offs,
1503 EXT4_XATTR_SIZE(size));
1504 memcpy(b_entry_name, entry->e_name, entry->e_name_len);
1505 b_entry_name[entry->e_name_len] = '\0';
1506 i.name = b_entry_name;
1507
1508 error = ext4_get_inode_loc(inode, &is->iloc);
1509 if (error)
1510 goto cleanup;
1511
1512 error = ext4_xattr_ibody_find(inode, &i, is);
1513 if (error)
1514 goto cleanup;
1515
1516 /* Remove the chosen entry from the inode */
1517 error = ext4_xattr_ibody_set(handle, inode, &i, is);
1518 if (error)
1519 goto cleanup;
1520 total_ino -= entry_size;
1521
1522 entry = IFIRST(header);
1523 if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
1524 shift_bytes = isize_diff;
1525 else
1526 shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
1527 /* Adjust the offsets and shift the remaining entries ahead */
1528 ext4_xattr_shift_entries(entry, -shift_bytes,
1529 (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
1530 EXT4_I(inode)->i_extra_isize + shift_bytes,
1531 (void *)header, total_ino, inode->i_sb->s_blocksize);
1532
1533 isize_diff -= shift_bytes;
1534 EXT4_I(inode)->i_extra_isize += shift_bytes;
1535 header = IHDR(inode, raw_inode);
1536
1537 i.name = b_entry_name;
1538 i.value = buffer;
1539 i.value_len = size;
1540 error = ext4_xattr_block_find(inode, &i, bs);
1541 if (error)
1542 goto cleanup;
1543
1544 /* Add entry which was removed from the inode into the block */
1545 error = ext4_xattr_block_set(handle, inode, &i, bs);
1546 if (error)
1547 goto cleanup;
1548 kfree(b_entry_name);
1549 kfree(buffer);
1550 b_entry_name = NULL;
1551 buffer = NULL;
1552 brelse(is->iloc.bh);
1553 kfree(is);
1554 kfree(bs);
1555 } 1570 }
1571shift:
1572 /* Adjust the offsets and shift the remaining entries ahead */
1573 ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
1574 - new_extra_isize, (void *)raw_inode +
1575 EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
1576 (void *)header, total_ino);
1577 EXT4_I(inode)->i_extra_isize = new_extra_isize;
1556 brelse(bh); 1578 brelse(bh);
1557out: 1579out:
1558 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); 1580 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
@@ -1560,12 +1582,6 @@ out:
1560 return 0; 1582 return 0;
1561 1583
1562cleanup: 1584cleanup:
1563 kfree(b_entry_name);
1564 kfree(buffer);
1565 if (is)
1566 brelse(is->iloc.bh);
1567 kfree(is);
1568 kfree(bs);
1569 brelse(bh); 1585 brelse(bh);
1570 /* 1586 /*
1571 * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode 1587 * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
@@ -1734,7 +1750,7 @@ static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
1734 *name++; 1750 *name++;
1735 } 1751 }
1736 1752
1737 if (entry->e_value_block == 0 && entry->e_value_size != 0) { 1753 if (entry->e_value_size != 0) {
1738 __le32 *value = (__le32 *)((char *)header + 1754 __le32 *value = (__le32 *)((char *)header +
1739 le16_to_cpu(entry->e_value_offs)); 1755 le16_to_cpu(entry->e_value_offs));
1740 for (n = (le32_to_cpu(entry->e_value_size) + 1756 for (n = (le32_to_cpu(entry->e_value_size) +
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index cbf85f65ba63..12b5836a1033 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -813,12 +813,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
813 813
814 if (f2fs_encrypted_inode(d->inode)) { 814 if (f2fs_encrypted_inode(d->inode)) {
815 int save_len = fstr->len; 815 int save_len = fstr->len;
816 int ret; 816 int err;
817 817
818 ret = fscrypt_fname_disk_to_usr(d->inode, 818 err = fscrypt_fname_disk_to_usr(d->inode,
819 (u32)de->hash_code, 0, 819 (u32)de->hash_code, 0,
820 &de_name, fstr); 820 &de_name, fstr);
821 if (ret < 0) 821 if (err)
822 return true; 822 return true;
823 823
824 de_name = *fstr; 824 de_name = *fstr;
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 300aef8a2d5f..5625b879c98a 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -454,7 +454,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
454 ostr.name = sd->encrypted_path; 454 ostr.name = sd->encrypted_path;
455 ostr.len = disk_link.len; 455 ostr.len = disk_link.len;
456 err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); 456 err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
457 if (err < 0) 457 if (err)
458 goto err_out; 458 goto err_out;
459 459
460 sd->len = cpu_to_le16(ostr.len); 460 sd->len = cpu_to_le16(ostr.len);
@@ -1051,7 +1051,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1051 goto errout; 1051 goto errout;
1052 1052
1053 res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); 1053 res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
1054 if (res < 0) 1054 if (res)
1055 goto errout; 1055 goto errout;
1056 1056
1057 /* this is broken symlink case */ 1057 /* this is broken symlink case */
@@ -1063,7 +1063,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
1063 paddr = pstr.name; 1063 paddr = pstr.name;
1064 1064
1065 /* Null-terminate the name */ 1065 /* Null-terminate the name */
1066 paddr[res] = '\0'; 1066 paddr[pstr.len] = '\0';
1067 1067
1068 put_page(cpage); 1068 put_page(cpage);
1069 set_delayed_call(done, kfree_link, paddr); 1069 set_delayed_call(done, kfree_link, paddr);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 46261a6f902d..927da4956a89 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1090,11 +1090,15 @@ static void jbd2_stats_proc_exit(journal_t *journal)
1090 * very few fields yet: that has to wait until we have created the 1090 * very few fields yet: that has to wait until we have created the
1091 * journal structures from from scratch, or loaded them from disk. */ 1091 * journal structures from from scratch, or loaded them from disk. */
1092 1092
1093static journal_t * journal_init_common (void) 1093static journal_t *journal_init_common(struct block_device *bdev,
1094 struct block_device *fs_dev,
1095 unsigned long long start, int len, int blocksize)
1094{ 1096{
1095 static struct lock_class_key jbd2_trans_commit_key; 1097 static struct lock_class_key jbd2_trans_commit_key;
1096 journal_t *journal; 1098 journal_t *journal;
1097 int err; 1099 int err;
1100 struct buffer_head *bh;
1101 int n;
1098 1102
1099 journal = kzalloc(sizeof(*journal), GFP_KERNEL); 1103 journal = kzalloc(sizeof(*journal), GFP_KERNEL);
1100 if (!journal) 1104 if (!journal)
@@ -1131,6 +1135,32 @@ static journal_t * journal_init_common (void)
1131 lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", 1135 lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
1132 &jbd2_trans_commit_key, 0); 1136 &jbd2_trans_commit_key, 0);
1133 1137
1138 /* journal descriptor can store up to n blocks -bzzz */
1139 journal->j_blocksize = blocksize;
1140 journal->j_dev = bdev;
1141 journal->j_fs_dev = fs_dev;
1142 journal->j_blk_offset = start;
1143 journal->j_maxlen = len;
1144 n = journal->j_blocksize / sizeof(journal_block_tag_t);
1145 journal->j_wbufsize = n;
1146 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
1147 GFP_KERNEL);
1148 if (!journal->j_wbuf) {
1149 kfree(journal);
1150 return NULL;
1151 }
1152
1153 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
1154 if (!bh) {
1155 pr_err("%s: Cannot get buffer for journal superblock\n",
1156 __func__);
1157 kfree(journal->j_wbuf);
1158 kfree(journal);
1159 return NULL;
1160 }
1161 journal->j_sb_buffer = bh;
1162 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1163
1134 return journal; 1164 return journal;
1135} 1165}
1136 1166
@@ -1157,51 +1187,21 @@ static journal_t * journal_init_common (void)
1157 * range of blocks on an arbitrary block device. 1187 * range of blocks on an arbitrary block device.
1158 * 1188 *
1159 */ 1189 */
1160journal_t * jbd2_journal_init_dev(struct block_device *bdev, 1190journal_t *jbd2_journal_init_dev(struct block_device *bdev,
1161 struct block_device *fs_dev, 1191 struct block_device *fs_dev,
1162 unsigned long long start, int len, int blocksize) 1192 unsigned long long start, int len, int blocksize)
1163{ 1193{
1164 journal_t *journal = journal_init_common(); 1194 journal_t *journal;
1165 struct buffer_head *bh;
1166 int n;
1167 1195
1196 journal = journal_init_common(bdev, fs_dev, start, len, blocksize);
1168 if (!journal) 1197 if (!journal)
1169 return NULL; 1198 return NULL;
1170 1199
1171 /* journal descriptor can store up to n blocks -bzzz */
1172 journal->j_blocksize = blocksize;
1173 journal->j_dev = bdev;
1174 journal->j_fs_dev = fs_dev;
1175 journal->j_blk_offset = start;
1176 journal->j_maxlen = len;
1177 bdevname(journal->j_dev, journal->j_devname); 1200 bdevname(journal->j_dev, journal->j_devname);
1178 strreplace(journal->j_devname, '/', '!'); 1201 strreplace(journal->j_devname, '/', '!');
1179 jbd2_stats_proc_init(journal); 1202 jbd2_stats_proc_init(journal);
1180 n = journal->j_blocksize / sizeof(journal_block_tag_t);
1181 journal->j_wbufsize = n;
1182 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
1183 if (!journal->j_wbuf) {
1184 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
1185 __func__);
1186 goto out_err;
1187 }
1188
1189 bh = __getblk(journal->j_dev, start, journal->j_blocksize);
1190 if (!bh) {
1191 printk(KERN_ERR
1192 "%s: Cannot get buffer for journal superblock\n",
1193 __func__);
1194 goto out_err;
1195 }
1196 journal->j_sb_buffer = bh;
1197 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1198 1203
1199 return journal; 1204 return journal;
1200out_err:
1201 kfree(journal->j_wbuf);
1202 jbd2_stats_proc_exit(journal);
1203 kfree(journal);
1204 return NULL;
1205} 1205}
1206 1206
1207/** 1207/**
@@ -1212,67 +1212,36 @@ out_err:
1212 * the journal. The inode must exist already, must support bmap() and 1212 * the journal. The inode must exist already, must support bmap() and
1213 * must have all data blocks preallocated. 1213 * must have all data blocks preallocated.
1214 */ 1214 */
1215journal_t * jbd2_journal_init_inode (struct inode *inode) 1215journal_t *jbd2_journal_init_inode(struct inode *inode)
1216{ 1216{
1217 struct buffer_head *bh; 1217 journal_t *journal;
1218 journal_t *journal = journal_init_common();
1219 char *p; 1218 char *p;
1220 int err;
1221 int n;
1222 unsigned long long blocknr; 1219 unsigned long long blocknr;
1223 1220
1221 blocknr = bmap(inode, 0);
1222 if (!blocknr) {
1223 pr_err("%s: Cannot locate journal superblock\n",
1224 __func__);
1225 return NULL;
1226 }
1227
1228 jbd_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
1229 inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size,
1230 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
1231
1232 journal = journal_init_common(inode->i_sb->s_bdev, inode->i_sb->s_bdev,
1233 blocknr, inode->i_size >> inode->i_sb->s_blocksize_bits,
1234 inode->i_sb->s_blocksize);
1224 if (!journal) 1235 if (!journal)
1225 return NULL; 1236 return NULL;
1226 1237
1227 journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
1228 journal->j_inode = inode; 1238 journal->j_inode = inode;
1229 bdevname(journal->j_dev, journal->j_devname); 1239 bdevname(journal->j_dev, journal->j_devname);
1230 p = strreplace(journal->j_devname, '/', '!'); 1240 p = strreplace(journal->j_devname, '/', '!');
1231 sprintf(p, "-%lu", journal->j_inode->i_ino); 1241 sprintf(p, "-%lu", journal->j_inode->i_ino);
1232 jbd_debug(1,
1233 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
1234 journal, inode->i_sb->s_id, inode->i_ino,
1235 (long long) inode->i_size,
1236 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
1237
1238 journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
1239 journal->j_blocksize = inode->i_sb->s_blocksize;
1240 jbd2_stats_proc_init(journal); 1242 jbd2_stats_proc_init(journal);
1241 1243
1242 /* journal descriptor can store up to n blocks -bzzz */
1243 n = journal->j_blocksize / sizeof(journal_block_tag_t);
1244 journal->j_wbufsize = n;
1245 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
1246 if (!journal->j_wbuf) {
1247 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
1248 __func__);
1249 goto out_err;
1250 }
1251
1252 err = jbd2_journal_bmap(journal, 0, &blocknr);
1253 /* If that failed, give up */
1254 if (err) {
1255 printk(KERN_ERR "%s: Cannot locate journal superblock\n",
1256 __func__);
1257 goto out_err;
1258 }
1259
1260 bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
1261 if (!bh) {
1262 printk(KERN_ERR
1263 "%s: Cannot get buffer for journal superblock\n",
1264 __func__);
1265 goto out_err;
1266 }
1267 journal->j_sb_buffer = bh;
1268 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1269
1270 return journal; 1244 return journal;
1271out_err:
1272 kfree(journal->j_wbuf);
1273 jbd2_stats_proc_exit(journal);
1274 kfree(journal);
1275 return NULL;
1276} 1245}
1277 1246
1278/* 1247/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index b5bc3e249163..3d8246a9faa4 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -159,6 +159,7 @@ static void wait_transaction_locked(journal_t *journal)
159 read_unlock(&journal->j_state_lock); 159 read_unlock(&journal->j_state_lock);
160 if (need_to_start) 160 if (need_to_start)
161 jbd2_log_start_commit(journal, tid); 161 jbd2_log_start_commit(journal, tid);
162 jbd2_might_wait_for_commit(journal);
162 schedule(); 163 schedule();
163 finish_wait(&journal->j_wait_transaction_locked, &wait); 164 finish_wait(&journal->j_wait_transaction_locked, &wait);
164} 165}
@@ -182,8 +183,6 @@ static int add_transaction_credits(journal_t *journal, int blocks,
182 int needed; 183 int needed;
183 int total = blocks + rsv_blocks; 184 int total = blocks + rsv_blocks;
184 185
185 jbd2_might_wait_for_commit(journal);
186
187 /* 186 /*
188 * If the current transaction is locked down for commit, wait 187 * If the current transaction is locked down for commit, wait
189 * for the lock to be released. 188 * for the lock to be released.
@@ -214,6 +213,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
214 if (atomic_read(&journal->j_reserved_credits) + total > 213 if (atomic_read(&journal->j_reserved_credits) + total >
215 journal->j_max_transaction_buffers) { 214 journal->j_max_transaction_buffers) {
216 read_unlock(&journal->j_state_lock); 215 read_unlock(&journal->j_state_lock);
216 jbd2_might_wait_for_commit(journal);
217 wait_event(journal->j_wait_reserved, 217 wait_event(journal->j_wait_reserved,
218 atomic_read(&journal->j_reserved_credits) + total <= 218 atomic_read(&journal->j_reserved_credits) + total <=
219 journal->j_max_transaction_buffers); 219 journal->j_max_transaction_buffers);
@@ -238,6 +238,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
238 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) { 238 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
239 atomic_sub(total, &t->t_outstanding_credits); 239 atomic_sub(total, &t->t_outstanding_credits);
240 read_unlock(&journal->j_state_lock); 240 read_unlock(&journal->j_state_lock);
241 jbd2_might_wait_for_commit(journal);
241 write_lock(&journal->j_state_lock); 242 write_lock(&journal->j_state_lock);
242 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) 243 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
243 __jbd2_log_wait_for_space(journal); 244 __jbd2_log_wait_for_space(journal);
@@ -255,6 +256,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
255 sub_reserved_credits(journal, rsv_blocks); 256 sub_reserved_credits(journal, rsv_blocks);
256 atomic_sub(total, &t->t_outstanding_credits); 257 atomic_sub(total, &t->t_outstanding_credits);
257 read_unlock(&journal->j_state_lock); 258 read_unlock(&journal->j_state_lock);
259 jbd2_might_wait_for_commit(journal);
258 wait_event(journal->j_wait_reserved, 260 wait_event(journal->j_wait_reserved,
259 atomic_read(&journal->j_reserved_credits) + rsv_blocks 261 atomic_read(&journal->j_reserved_credits) + rsv_blocks
260 <= journal->j_max_transaction_buffers / 2); 262 <= journal->j_max_transaction_buffers / 2);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index eccda3a02de6..c5bd19ffa326 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -366,7 +366,11 @@ struct mb_cache *mb_cache_create(int bucket_bits)
366 cache->c_shrink.count_objects = mb_cache_count; 366 cache->c_shrink.count_objects = mb_cache_count;
367 cache->c_shrink.scan_objects = mb_cache_scan; 367 cache->c_shrink.scan_objects = mb_cache_scan;
368 cache->c_shrink.seeks = DEFAULT_SEEKS; 368 cache->c_shrink.seeks = DEFAULT_SEEKS;
369 register_shrinker(&cache->c_shrink); 369 if (register_shrinker(&cache->c_shrink)) {
370 kfree(cache->c_hash);
371 kfree(cache);
372 goto err_out;
373 }
370 374
371 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); 375 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
372 376
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
index e44b88ba552b..225bdb7daec7 100644
--- a/include/linux/blockgroup_lock.h
+++ b/include/linux/blockgroup_lock.h
@@ -10,28 +10,10 @@
10#include <linux/cache.h> 10#include <linux/cache.h>
11 11
12#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
13 13#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
14/*
15 * We want a power-of-two. Is there a better way than this?
16 */
17
18#if NR_CPUS >= 32
19#define NR_BG_LOCKS 128
20#elif NR_CPUS >= 16
21#define NR_BG_LOCKS 64
22#elif NR_CPUS >= 8
23#define NR_BG_LOCKS 32
24#elif NR_CPUS >= 4
25#define NR_BG_LOCKS 16
26#elif NR_CPUS >= 2
27#define NR_BG_LOCKS 8
28#else 14#else
29#define NR_BG_LOCKS 4
30#endif
31
32#else /* CONFIG_SMP */
33#define NR_BG_LOCKS 1 15#define NR_BG_LOCKS 1
34#endif /* CONFIG_SMP */ 16#endif
35 17
36struct bgl_lock { 18struct bgl_lock {
37 spinlock_t lock; 19 spinlock_t lock;
@@ -49,14 +31,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl)
49 spin_lock_init(&bgl->locks[i].lock); 31 spin_lock_init(&bgl->locks[i].lock);
50} 32}
51 33
52/*
53 * The accessor is a macro so we can embed a blockgroup_lock into different
54 * superblock types
55 */
56static inline spinlock_t * 34static inline spinlock_t *
57bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) 35bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
58{ 36{
59 return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock; 37 return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
60} 38}
61 39
62#endif 40#endif
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index 76cff18bb032..ff8b11b26f31 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -111,23 +111,6 @@ struct fscrypt_completion_result {
111 struct fscrypt_completion_result ecr = { \ 111 struct fscrypt_completion_result ecr = { \
112 COMPLETION_INITIALIZER((ecr).completion), 0 } 112 COMPLETION_INITIALIZER((ecr).completion), 0 }
113 113
114static inline int fscrypt_key_size(int mode)
115{
116 switch (mode) {
117 case FS_ENCRYPTION_MODE_AES_256_XTS:
118 return FS_AES_256_XTS_KEY_SIZE;
119 case FS_ENCRYPTION_MODE_AES_256_GCM:
120 return FS_AES_256_GCM_KEY_SIZE;
121 case FS_ENCRYPTION_MODE_AES_256_CBC:
122 return FS_AES_256_CBC_KEY_SIZE;
123 case FS_ENCRYPTION_MODE_AES_256_CTS:
124 return FS_AES_256_CTS_KEY_SIZE;
125 default:
126 BUG();
127 }
128 return 0;
129}
130
131#define FS_FNAME_NUM_SCATTER_ENTRIES 4 114#define FS_FNAME_NUM_SCATTER_ENTRIES 4
132#define FS_CRYPTO_BLOCK_SIZE 16 115#define FS_CRYPTO_BLOCK_SIZE 16
133#define FS_FNAME_CRYPTO_DIGEST_SIZE 32 116#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
@@ -202,13 +185,6 @@ static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
202 return (mode == FS_ENCRYPTION_MODE_AES_256_CTS); 185 return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
203} 186}
204 187
205static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
206{
207 if (size == fscrypt_key_size(mode))
208 return size;
209 return 0;
210}
211
212static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) 188static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
213{ 189{
214 if (str->len == 1 && str->name[0] == '.') 190 if (str->len == 1 && str->name[0] == '.')