diff options
Diffstat (limited to 'fs')
40 files changed, 448 insertions, 208 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6569fda5cfed..10179cfa1152 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -878,6 +878,7 @@ void invalidate_inode_buffers(struct inode *inode) | |||
878 | spin_unlock(&buffer_mapping->private_lock); | 878 | spin_unlock(&buffer_mapping->private_lock); |
879 | } | 879 | } |
880 | } | 880 | } |
881 | EXPORT_SYMBOL(invalidate_inode_buffers); | ||
881 | 882 | ||
882 | /* | 883 | /* |
883 | * Remove any clean buffers from the inode's buffer list. This is called | 884 | * Remove any clean buffers from the inode's buffer list. This is called |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 8855331b2fba..e078b7aea143 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -8,7 +8,11 @@ handling fcntl(F_SETLEASE). Convert cifs to using blocking tcp | |||
8 | sends, and also let tcp autotune the socket send and receive buffers. | 8 | sends, and also let tcp autotune the socket send and receive buffers. |
9 | This reduces the number of EAGAIN errors returned by TCP/IP in | 9 | This reduces the number of EAGAIN errors returned by TCP/IP in |
10 | high stress workloads (and the number of retries on socket writes | 10 | high stress workloads (and the number of retries on socket writes |
11 | when sending large SMBWriteX requests). | 11 | when sending large SMBWriteX requests). Fix case in which a portion of |
12 | data can in some cases not get written to the file on the server before the | ||
13 | file is closed. Fix DFS parsing to properly handle path consumed field, | ||
14 | and to handle certain codepage conversions better. Fix mount and | ||
15 | umount race that can cause oops in mount or umount or reconnect. | ||
12 | 16 | ||
13 | Version 1.54 | 17 | Version 1.54 |
14 | ------------ | 18 | ------------ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f1ae1f57c30d..c57c0565547f 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -606,7 +606,15 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; | |||
606 | * changes to the tcon->tidStatus should be done while holding this lock. | 606 | * changes to the tcon->tidStatus should be done while holding this lock. |
607 | */ | 607 | */ |
608 | GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; | 608 | GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; |
609 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ | 609 | |
610 | /* | ||
611 | * This lock protects the cifs_file->llist and cifs_file->flist | ||
612 | * list operations, and updates to some flags (cifs_file->invalidHandle) | ||
613 | * It will be moved to either use the tcon->stat_lock or equivalent later. | ||
614 | * If cifs_tcp_ses_lock and the lock below are both needed to be held, then | ||
615 | * the cifs_tcp_ses_lock must be grabbed first and released last. | ||
616 | */ | ||
617 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; | ||
610 | 618 | ||
611 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; | 619 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; |
612 | 620 | ||
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index bdda46dd435a..2af8626ced43 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -295,7 +295,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
295 | check for tcp and smb session status done differently | 295 | check for tcp and smb session status done differently |
296 | for those three - in the calling routine */ | 296 | for those three - in the calling routine */ |
297 | if (tcon) { | 297 | if (tcon) { |
298 | if (tcon->need_reconnect) { | 298 | if (tcon->tidStatus == CifsExiting) { |
299 | /* only tree disconnect, open, and write, | 299 | /* only tree disconnect, open, and write, |
300 | (and ulogoff which does not have tcon) | 300 | (and ulogoff which does not have tcon) |
301 | are allowed as we start force umount */ | 301 | are allowed as we start force umount */ |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 6449e1aae621..f0a81e631ae6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -488,12 +488,13 @@ int cifs_close(struct inode *inode, struct file *file) | |||
488 | pTcon = cifs_sb->tcon; | 488 | pTcon = cifs_sb->tcon; |
489 | if (pSMBFile) { | 489 | if (pSMBFile) { |
490 | struct cifsLockInfo *li, *tmp; | 490 | struct cifsLockInfo *li, *tmp; |
491 | 491 | write_lock(&GlobalSMBSeslock); | |
492 | pSMBFile->closePend = true; | 492 | pSMBFile->closePend = true; |
493 | if (pTcon) { | 493 | if (pTcon) { |
494 | /* no sense reconnecting to close a file that is | 494 | /* no sense reconnecting to close a file that is |
495 | already closed */ | 495 | already closed */ |
496 | if (!pTcon->need_reconnect) { | 496 | if (!pTcon->need_reconnect) { |
497 | write_unlock(&GlobalSMBSeslock); | ||
497 | timeout = 2; | 498 | timeout = 2; |
498 | while ((atomic_read(&pSMBFile->wrtPending) != 0) | 499 | while ((atomic_read(&pSMBFile->wrtPending) != 0) |
499 | && (timeout <= 2048)) { | 500 | && (timeout <= 2048)) { |
@@ -510,12 +511,15 @@ int cifs_close(struct inode *inode, struct file *file) | |||
510 | timeout *= 4; | 511 | timeout *= 4; |
511 | } | 512 | } |
512 | if (atomic_read(&pSMBFile->wrtPending)) | 513 | if (atomic_read(&pSMBFile->wrtPending)) |
513 | cERROR(1, | 514 | cERROR(1, ("close with pending write")); |
514 | ("close with pending writes")); | 515 | if (!pTcon->need_reconnect && |
515 | rc = CIFSSMBClose(xid, pTcon, | 516 | !pSMBFile->invalidHandle) |
517 | rc = CIFSSMBClose(xid, pTcon, | ||
516 | pSMBFile->netfid); | 518 | pSMBFile->netfid); |
517 | } | 519 | } else |
518 | } | 520 | write_unlock(&GlobalSMBSeslock); |
521 | } else | ||
522 | write_unlock(&GlobalSMBSeslock); | ||
519 | 523 | ||
520 | /* Delete any outstanding lock records. | 524 | /* Delete any outstanding lock records. |
521 | We'll lose them when the file is closed anyway. */ | 525 | We'll lose them when the file is closed anyway. */ |
@@ -587,15 +591,18 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
587 | pTcon = cifs_sb->tcon; | 591 | pTcon = cifs_sb->tcon; |
588 | 592 | ||
589 | cFYI(1, ("Freeing private data in close dir")); | 593 | cFYI(1, ("Freeing private data in close dir")); |
594 | write_lock(&GlobalSMBSeslock); | ||
590 | if (!pCFileStruct->srch_inf.endOfSearch && | 595 | if (!pCFileStruct->srch_inf.endOfSearch && |
591 | !pCFileStruct->invalidHandle) { | 596 | !pCFileStruct->invalidHandle) { |
592 | pCFileStruct->invalidHandle = true; | 597 | pCFileStruct->invalidHandle = true; |
598 | write_unlock(&GlobalSMBSeslock); | ||
593 | rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); | 599 | rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); |
594 | cFYI(1, ("Closing uncompleted readdir with rc %d", | 600 | cFYI(1, ("Closing uncompleted readdir with rc %d", |
595 | rc)); | 601 | rc)); |
596 | /* not much we can do if it fails anyway, ignore rc */ | 602 | /* not much we can do if it fails anyway, ignore rc */ |
597 | rc = 0; | 603 | rc = 0; |
598 | } | 604 | } else |
605 | write_unlock(&GlobalSMBSeslock); | ||
599 | ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; | 606 | ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; |
600 | if (ptmp) { | 607 | if (ptmp) { |
601 | cFYI(1, ("closedir free smb buf in srch struct")); | 608 | cFYI(1, ("closedir free smb buf in srch struct")); |
@@ -1468,7 +1475,11 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, | |||
1468 | cFYI(1, ("write_end for page %p from pos %lld with %d bytes", | 1475 | cFYI(1, ("write_end for page %p from pos %lld with %d bytes", |
1469 | page, pos, copied)); | 1476 | page, pos, copied)); |
1470 | 1477 | ||
1471 | if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) | 1478 | if (PageChecked(page)) { |
1479 | if (copied == len) | ||
1480 | SetPageUptodate(page); | ||
1481 | ClearPageChecked(page); | ||
1482 | } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) | ||
1472 | SetPageUptodate(page); | 1483 | SetPageUptodate(page); |
1473 | 1484 | ||
1474 | if (!PageUptodate(page)) { | 1485 | if (!PageUptodate(page)) { |
@@ -2055,39 +2066,70 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, | |||
2055 | { | 2066 | { |
2056 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 2067 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
2057 | loff_t offset = pos & (PAGE_CACHE_SIZE - 1); | 2068 | loff_t offset = pos & (PAGE_CACHE_SIZE - 1); |
2069 | loff_t page_start = pos & PAGE_MASK; | ||
2070 | loff_t i_size; | ||
2071 | struct page *page; | ||
2072 | int rc = 0; | ||
2058 | 2073 | ||
2059 | cFYI(1, ("write_begin from %lld len %d", (long long)pos, len)); | 2074 | cFYI(1, ("write_begin from %lld len %d", (long long)pos, len)); |
2060 | 2075 | ||
2061 | *pagep = __grab_cache_page(mapping, index); | 2076 | page = __grab_cache_page(mapping, index); |
2062 | if (!*pagep) | 2077 | if (!page) { |
2063 | return -ENOMEM; | 2078 | rc = -ENOMEM; |
2064 | 2079 | goto out; | |
2065 | if (PageUptodate(*pagep)) | 2080 | } |
2066 | return 0; | ||
2067 | 2081 | ||
2068 | /* If we are writing a full page it will be up to date, | 2082 | if (PageUptodate(page)) |
2069 | no need to read from the server */ | 2083 | goto out; |
2070 | if (len == PAGE_CACHE_SIZE && flags & AOP_FLAG_UNINTERRUPTIBLE) | ||
2071 | return 0; | ||
2072 | 2084 | ||
2073 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { | 2085 | /* |
2074 | int rc; | 2086 | * If we write a full page it will be up to date, no need to read from |
2087 | * the server. If the write is short, we'll end up doing a sync write | ||
2088 | * instead. | ||
2089 | */ | ||
2090 | if (len == PAGE_CACHE_SIZE) | ||
2091 | goto out; | ||
2075 | 2092 | ||
2076 | /* might as well read a page, it is fast enough */ | 2093 | /* |
2077 | rc = cifs_readpage_worker(file, *pagep, &offset); | 2094 | * optimize away the read when we have an oplock, and we're not |
2095 | * expecting to use any of the data we'd be reading in. That | ||
2096 | * is, when the page lies beyond the EOF, or straddles the EOF | ||
2097 | * and the write will cover all of the existing data. | ||
2098 | */ | ||
2099 | if (CIFS_I(mapping->host)->clientCanCacheRead) { | ||
2100 | i_size = i_size_read(mapping->host); | ||
2101 | if (page_start >= i_size || | ||
2102 | (offset == 0 && (pos + len) >= i_size)) { | ||
2103 | zero_user_segments(page, 0, offset, | ||
2104 | offset + len, | ||
2105 | PAGE_CACHE_SIZE); | ||
2106 | /* | ||
2107 | * PageChecked means that the parts of the page | ||
2108 | * to which we're not writing are considered up | ||
2109 | * to date. Once the data is copied to the | ||
2110 | * page, it can be set uptodate. | ||
2111 | */ | ||
2112 | SetPageChecked(page); | ||
2113 | goto out; | ||
2114 | } | ||
2115 | } | ||
2078 | 2116 | ||
2079 | /* we do not need to pass errors back | 2117 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
2080 | e.g. if we do not have read access to the file | 2118 | /* |
2081 | because cifs_write_end will attempt synchronous writes | 2119 | * might as well read a page, it is fast enough. If we get |
2082 | -- shaggy */ | 2120 | * an error, we don't need to return it. cifs_write_end will |
2121 | * do a sync write instead since PG_uptodate isn't set. | ||
2122 | */ | ||
2123 | cifs_readpage_worker(file, page, &page_start); | ||
2083 | } else { | 2124 | } else { |
2084 | /* we could try using another file handle if there is one - | 2125 | /* we could try using another file handle if there is one - |
2085 | but how would we lock it to prevent close of that handle | 2126 | but how would we lock it to prevent close of that handle |
2086 | racing with this read? In any case | 2127 | racing with this read? In any case |
2087 | this will be written out by write_end so is fine */ | 2128 | this will be written out by write_end so is fine */ |
2088 | } | 2129 | } |
2089 | 2130 | out: | |
2090 | return 0; | 2131 | *pagep = page; |
2132 | return rc; | ||
2091 | } | 2133 | } |
2092 | 2134 | ||
2093 | const struct address_space_operations cifs_addr_ops = { | 2135 | const struct address_space_operations cifs_addr_ops = { |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index f108040ca1bc..8a82d076450b 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -555,12 +555,14 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
555 | continue; | 555 | continue; |
556 | 556 | ||
557 | cifs_stats_inc(&tcon->num_oplock_brks); | 557 | cifs_stats_inc(&tcon->num_oplock_brks); |
558 | write_lock(&GlobalSMBSeslock); | ||
558 | list_for_each(tmp2, &tcon->openFileList) { | 559 | list_for_each(tmp2, &tcon->openFileList) { |
559 | netfile = list_entry(tmp2, struct cifsFileInfo, | 560 | netfile = list_entry(tmp2, struct cifsFileInfo, |
560 | tlist); | 561 | tlist); |
561 | if (pSMB->Fid != netfile->netfid) | 562 | if (pSMB->Fid != netfile->netfid) |
562 | continue; | 563 | continue; |
563 | 564 | ||
565 | write_unlock(&GlobalSMBSeslock); | ||
564 | read_unlock(&cifs_tcp_ses_lock); | 566 | read_unlock(&cifs_tcp_ses_lock); |
565 | cFYI(1, ("file id match, oplock break")); | 567 | cFYI(1, ("file id match, oplock break")); |
566 | pCifsInode = CIFS_I(netfile->pInode); | 568 | pCifsInode = CIFS_I(netfile->pInode); |
@@ -576,6 +578,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
576 | 578 | ||
577 | return true; | 579 | return true; |
578 | } | 580 | } |
581 | write_unlock(&GlobalSMBSeslock); | ||
579 | read_unlock(&cifs_tcp_ses_lock); | 582 | read_unlock(&cifs_tcp_ses_lock); |
580 | cFYI(1, ("No matching file for oplock break")); | 583 | cFYI(1, ("No matching file for oplock break")); |
581 | return true; | 584 | return true; |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 58d57299f2a0..9f51f9bf0292 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -741,11 +741,14 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
741 | (index_to_find < first_entry_in_buffer)) { | 741 | (index_to_find < first_entry_in_buffer)) { |
742 | /* close and restart search */ | 742 | /* close and restart search */ |
743 | cFYI(1, ("search backing up - close and restart search")); | 743 | cFYI(1, ("search backing up - close and restart search")); |
744 | write_lock(&GlobalSMBSeslock); | ||
744 | if (!cifsFile->srch_inf.endOfSearch && | 745 | if (!cifsFile->srch_inf.endOfSearch && |
745 | !cifsFile->invalidHandle) { | 746 | !cifsFile->invalidHandle) { |
746 | cifsFile->invalidHandle = true; | 747 | cifsFile->invalidHandle = true; |
748 | write_unlock(&GlobalSMBSeslock); | ||
747 | CIFSFindClose(xid, pTcon, cifsFile->netfid); | 749 | CIFSFindClose(xid, pTcon, cifsFile->netfid); |
748 | } | 750 | } else |
751 | write_unlock(&GlobalSMBSeslock); | ||
749 | if (cifsFile->srch_inf.ntwrk_buf_start) { | 752 | if (cifsFile->srch_inf.ntwrk_buf_start) { |
750 | cFYI(1, ("freeing SMB ff cache buf on search rewind")); | 753 | cFYI(1, ("freeing SMB ff cache buf on search rewind")); |
751 | if (cifsFile->srch_inf.smallBuf) | 754 | if (cifsFile->srch_inf.smallBuf) |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index e22bc3961345..0d713b691941 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -1037,17 +1037,14 @@ static int | |||
1037 | decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, | 1037 | decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, |
1038 | struct ecryptfs_crypt_stat *crypt_stat) | 1038 | struct ecryptfs_crypt_stat *crypt_stat) |
1039 | { | 1039 | { |
1040 | struct scatterlist dst_sg; | 1040 | struct scatterlist dst_sg[2]; |
1041 | struct scatterlist src_sg; | 1041 | struct scatterlist src_sg[2]; |
1042 | struct mutex *tfm_mutex; | 1042 | struct mutex *tfm_mutex; |
1043 | struct blkcipher_desc desc = { | 1043 | struct blkcipher_desc desc = { |
1044 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP | 1044 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP |
1045 | }; | 1045 | }; |
1046 | int rc = 0; | 1046 | int rc = 0; |
1047 | 1047 | ||
1048 | sg_init_table(&dst_sg, 1); | ||
1049 | sg_init_table(&src_sg, 1); | ||
1050 | |||
1051 | if (unlikely(ecryptfs_verbosity > 0)) { | 1048 | if (unlikely(ecryptfs_verbosity > 0)) { |
1052 | ecryptfs_printk( | 1049 | ecryptfs_printk( |
1053 | KERN_DEBUG, "Session key encryption key (size [%d]):\n", | 1050 | KERN_DEBUG, "Session key encryption key (size [%d]):\n", |
@@ -1066,8 +1063,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, | |||
1066 | } | 1063 | } |
1067 | rc = virt_to_scatterlist(auth_tok->session_key.encrypted_key, | 1064 | rc = virt_to_scatterlist(auth_tok->session_key.encrypted_key, |
1068 | auth_tok->session_key.encrypted_key_size, | 1065 | auth_tok->session_key.encrypted_key_size, |
1069 | &src_sg, 1); | 1066 | src_sg, 2); |
1070 | if (rc != 1) { | 1067 | if (rc < 1 || rc > 2) { |
1071 | printk(KERN_ERR "Internal error whilst attempting to convert " | 1068 | printk(KERN_ERR "Internal error whilst attempting to convert " |
1072 | "auth_tok->session_key.encrypted_key to scatterlist; " | 1069 | "auth_tok->session_key.encrypted_key to scatterlist; " |
1073 | "expected rc = 1; got rc = [%d]. " | 1070 | "expected rc = 1; got rc = [%d]. " |
@@ -1079,8 +1076,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, | |||
1079 | auth_tok->session_key.encrypted_key_size; | 1076 | auth_tok->session_key.encrypted_key_size; |
1080 | rc = virt_to_scatterlist(auth_tok->session_key.decrypted_key, | 1077 | rc = virt_to_scatterlist(auth_tok->session_key.decrypted_key, |
1081 | auth_tok->session_key.decrypted_key_size, | 1078 | auth_tok->session_key.decrypted_key_size, |
1082 | &dst_sg, 1); | 1079 | dst_sg, 2); |
1083 | if (rc != 1) { | 1080 | if (rc < 1 || rc > 2) { |
1084 | printk(KERN_ERR "Internal error whilst attempting to convert " | 1081 | printk(KERN_ERR "Internal error whilst attempting to convert " |
1085 | "auth_tok->session_key.decrypted_key to scatterlist; " | 1082 | "auth_tok->session_key.decrypted_key to scatterlist; " |
1086 | "expected rc = 1; got rc = [%d]\n", rc); | 1083 | "expected rc = 1; got rc = [%d]\n", rc); |
@@ -1096,7 +1093,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, | |||
1096 | rc = -EINVAL; | 1093 | rc = -EINVAL; |
1097 | goto out; | 1094 | goto out; |
1098 | } | 1095 | } |
1099 | rc = crypto_blkcipher_decrypt(&desc, &dst_sg, &src_sg, | 1096 | rc = crypto_blkcipher_decrypt(&desc, dst_sg, src_sg, |
1100 | auth_tok->session_key.encrypted_key_size); | 1097 | auth_tok->session_key.encrypted_key_size); |
1101 | mutex_unlock(tfm_mutex); | 1098 | mutex_unlock(tfm_mutex); |
1102 | if (unlikely(rc)) { | 1099 | if (unlikely(rc)) { |
@@ -1539,8 +1536,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, | |||
1539 | size_t i; | 1536 | size_t i; |
1540 | size_t encrypted_session_key_valid = 0; | 1537 | size_t encrypted_session_key_valid = 0; |
1541 | char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; | 1538 | char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; |
1542 | struct scatterlist dst_sg; | 1539 | struct scatterlist dst_sg[2]; |
1543 | struct scatterlist src_sg; | 1540 | struct scatterlist src_sg[2]; |
1544 | struct mutex *tfm_mutex = NULL; | 1541 | struct mutex *tfm_mutex = NULL; |
1545 | u8 cipher_code; | 1542 | u8 cipher_code; |
1546 | size_t packet_size_length; | 1543 | size_t packet_size_length; |
@@ -1619,8 +1616,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, | |||
1619 | ecryptfs_dump_hex(session_key_encryption_key, 16); | 1616 | ecryptfs_dump_hex(session_key_encryption_key, 16); |
1620 | } | 1617 | } |
1621 | rc = virt_to_scatterlist(crypt_stat->key, key_rec->enc_key_size, | 1618 | rc = virt_to_scatterlist(crypt_stat->key, key_rec->enc_key_size, |
1622 | &src_sg, 1); | 1619 | src_sg, 2); |
1623 | if (rc != 1) { | 1620 | if (rc < 1 || rc > 2) { |
1624 | ecryptfs_printk(KERN_ERR, "Error generating scatterlist " | 1621 | ecryptfs_printk(KERN_ERR, "Error generating scatterlist " |
1625 | "for crypt_stat session key; expected rc = 1; " | 1622 | "for crypt_stat session key; expected rc = 1; " |
1626 | "got rc = [%d]. key_rec->enc_key_size = [%d]\n", | 1623 | "got rc = [%d]. key_rec->enc_key_size = [%d]\n", |
@@ -1629,8 +1626,8 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, | |||
1629 | goto out; | 1626 | goto out; |
1630 | } | 1627 | } |
1631 | rc = virt_to_scatterlist(key_rec->enc_key, key_rec->enc_key_size, | 1628 | rc = virt_to_scatterlist(key_rec->enc_key, key_rec->enc_key_size, |
1632 | &dst_sg, 1); | 1629 | dst_sg, 2); |
1633 | if (rc != 1) { | 1630 | if (rc < 1 || rc > 2) { |
1634 | ecryptfs_printk(KERN_ERR, "Error generating scatterlist " | 1631 | ecryptfs_printk(KERN_ERR, "Error generating scatterlist " |
1635 | "for crypt_stat encrypted session key; " | 1632 | "for crypt_stat encrypted session key; " |
1636 | "expected rc = 1; got rc = [%d]. " | 1633 | "expected rc = 1; got rc = [%d]. " |
@@ -1651,7 +1648,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, | |||
1651 | rc = 0; | 1648 | rc = 0; |
1652 | ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n", | 1649 | ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n", |
1653 | crypt_stat->key_size); | 1650 | crypt_stat->key_size); |
1654 | rc = crypto_blkcipher_encrypt(&desc, &dst_sg, &src_sg, | 1651 | rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg, |
1655 | (*key_rec).enc_key_size); | 1652 | (*key_rec).enc_key_size); |
1656 | mutex_unlock(tfm_mutex); | 1653 | mutex_unlock(tfm_mutex); |
1657 | if (rc) { | 1654 | if (rc) { |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index aec5c13f6341..96355d505347 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -102,6 +102,8 @@ | |||
102 | 102 | ||
103 | #define EP_UNACTIVE_PTR ((void *) -1L) | 103 | #define EP_UNACTIVE_PTR ((void *) -1L) |
104 | 104 | ||
105 | #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) | ||
106 | |||
105 | struct epoll_filefd { | 107 | struct epoll_filefd { |
106 | struct file *file; | 108 | struct file *file; |
107 | int fd; | 109 | int fd; |
@@ -200,6 +202,9 @@ struct eventpoll { | |||
200 | * holding ->lock. | 202 | * holding ->lock. |
201 | */ | 203 | */ |
202 | struct epitem *ovflist; | 204 | struct epitem *ovflist; |
205 | |||
206 | /* The user that created the eventpoll descriptor */ | ||
207 | struct user_struct *user; | ||
203 | }; | 208 | }; |
204 | 209 | ||
205 | /* Wait structure used by the poll hooks */ | 210 | /* Wait structure used by the poll hooks */ |
@@ -227,9 +232,17 @@ struct ep_pqueue { | |||
227 | }; | 232 | }; |
228 | 233 | ||
229 | /* | 234 | /* |
235 | * Configuration options available inside /proc/sys/fs/epoll/ | ||
236 | */ | ||
237 | /* Maximum number of epoll devices, per user */ | ||
238 | static int max_user_instances __read_mostly; | ||
239 | /* Maximum number of epoll watched descriptors, per user */ | ||
240 | static int max_user_watches __read_mostly; | ||
241 | |||
242 | /* | ||
230 | * This mutex is used to serialize ep_free() and eventpoll_release_file(). | 243 | * This mutex is used to serialize ep_free() and eventpoll_release_file(). |
231 | */ | 244 | */ |
232 | static struct mutex epmutex; | 245 | static DEFINE_MUTEX(epmutex); |
233 | 246 | ||
234 | /* Safe wake up implementation */ | 247 | /* Safe wake up implementation */ |
235 | static struct poll_safewake psw; | 248 | static struct poll_safewake psw; |
@@ -240,6 +253,33 @@ static struct kmem_cache *epi_cache __read_mostly; | |||
240 | /* Slab cache used to allocate "struct eppoll_entry" */ | 253 | /* Slab cache used to allocate "struct eppoll_entry" */ |
241 | static struct kmem_cache *pwq_cache __read_mostly; | 254 | static struct kmem_cache *pwq_cache __read_mostly; |
242 | 255 | ||
256 | #ifdef CONFIG_SYSCTL | ||
257 | |||
258 | #include <linux/sysctl.h> | ||
259 | |||
260 | static int zero; | ||
261 | |||
262 | ctl_table epoll_table[] = { | ||
263 | { | ||
264 | .procname = "max_user_instances", | ||
265 | .data = &max_user_instances, | ||
266 | .maxlen = sizeof(int), | ||
267 | .mode = 0644, | ||
268 | .proc_handler = &proc_dointvec_minmax, | ||
269 | .extra1 = &zero, | ||
270 | }, | ||
271 | { | ||
272 | .procname = "max_user_watches", | ||
273 | .data = &max_user_watches, | ||
274 | .maxlen = sizeof(int), | ||
275 | .mode = 0644, | ||
276 | .proc_handler = &proc_dointvec_minmax, | ||
277 | .extra1 = &zero, | ||
278 | }, | ||
279 | { .ctl_name = 0 } | ||
280 | }; | ||
281 | #endif /* CONFIG_SYSCTL */ | ||
282 | |||
243 | 283 | ||
244 | /* Setup the structure that is used as key for the RB tree */ | 284 | /* Setup the structure that is used as key for the RB tree */ |
245 | static inline void ep_set_ffd(struct epoll_filefd *ffd, | 285 | static inline void ep_set_ffd(struct epoll_filefd *ffd, |
@@ -402,6 +442,8 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) | |||
402 | /* At this point it is safe to free the eventpoll item */ | 442 | /* At this point it is safe to free the eventpoll item */ |
403 | kmem_cache_free(epi_cache, epi); | 443 | kmem_cache_free(epi_cache, epi); |
404 | 444 | ||
445 | atomic_dec(&ep->user->epoll_watches); | ||
446 | |||
405 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n", | 447 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n", |
406 | current, ep, file)); | 448 | current, ep, file)); |
407 | 449 | ||
@@ -449,6 +491,8 @@ static void ep_free(struct eventpoll *ep) | |||
449 | 491 | ||
450 | mutex_unlock(&epmutex); | 492 | mutex_unlock(&epmutex); |
451 | mutex_destroy(&ep->mtx); | 493 | mutex_destroy(&ep->mtx); |
494 | atomic_dec(&ep->user->epoll_devs); | ||
495 | free_uid(ep->user); | ||
452 | kfree(ep); | 496 | kfree(ep); |
453 | } | 497 | } |
454 | 498 | ||
@@ -532,10 +576,19 @@ void eventpoll_release_file(struct file *file) | |||
532 | 576 | ||
533 | static int ep_alloc(struct eventpoll **pep) | 577 | static int ep_alloc(struct eventpoll **pep) |
534 | { | 578 | { |
535 | struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL); | 579 | int error; |
580 | struct user_struct *user; | ||
581 | struct eventpoll *ep; | ||
536 | 582 | ||
537 | if (!ep) | 583 | user = get_current_user(); |
538 | return -ENOMEM; | 584 | error = -EMFILE; |
585 | if (unlikely(atomic_read(&user->epoll_devs) >= | ||
586 | max_user_instances)) | ||
587 | goto free_uid; | ||
588 | error = -ENOMEM; | ||
589 | ep = kzalloc(sizeof(*ep), GFP_KERNEL); | ||
590 | if (unlikely(!ep)) | ||
591 | goto free_uid; | ||
539 | 592 | ||
540 | spin_lock_init(&ep->lock); | 593 | spin_lock_init(&ep->lock); |
541 | mutex_init(&ep->mtx); | 594 | mutex_init(&ep->mtx); |
@@ -544,12 +597,17 @@ static int ep_alloc(struct eventpoll **pep) | |||
544 | INIT_LIST_HEAD(&ep->rdllist); | 597 | INIT_LIST_HEAD(&ep->rdllist); |
545 | ep->rbr = RB_ROOT; | 598 | ep->rbr = RB_ROOT; |
546 | ep->ovflist = EP_UNACTIVE_PTR; | 599 | ep->ovflist = EP_UNACTIVE_PTR; |
600 | ep->user = user; | ||
547 | 601 | ||
548 | *pep = ep; | 602 | *pep = ep; |
549 | 603 | ||
550 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", | 604 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", |
551 | current, ep)); | 605 | current, ep)); |
552 | return 0; | 606 | return 0; |
607 | |||
608 | free_uid: | ||
609 | free_uid(user); | ||
610 | return error; | ||
553 | } | 611 | } |
554 | 612 | ||
555 | /* | 613 | /* |
@@ -703,9 +761,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
703 | struct epitem *epi; | 761 | struct epitem *epi; |
704 | struct ep_pqueue epq; | 762 | struct ep_pqueue epq; |
705 | 763 | ||
706 | error = -ENOMEM; | 764 | if (unlikely(atomic_read(&ep->user->epoll_watches) >= |
765 | max_user_watches)) | ||
766 | return -ENOSPC; | ||
707 | if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) | 767 | if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) |
708 | goto error_return; | 768 | return -ENOMEM; |
709 | 769 | ||
710 | /* Item initialization follow here ... */ | 770 | /* Item initialization follow here ... */ |
711 | INIT_LIST_HEAD(&epi->rdllink); | 771 | INIT_LIST_HEAD(&epi->rdllink); |
@@ -735,6 +795,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
735 | * install process. Namely an allocation for a wait queue failed due | 795 | * install process. Namely an allocation for a wait queue failed due |
736 | * high memory pressure. | 796 | * high memory pressure. |
737 | */ | 797 | */ |
798 | error = -ENOMEM; | ||
738 | if (epi->nwait < 0) | 799 | if (epi->nwait < 0) |
739 | goto error_unregister; | 800 | goto error_unregister; |
740 | 801 | ||
@@ -765,6 +826,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
765 | 826 | ||
766 | spin_unlock_irqrestore(&ep->lock, flags); | 827 | spin_unlock_irqrestore(&ep->lock, flags); |
767 | 828 | ||
829 | atomic_inc(&ep->user->epoll_watches); | ||
830 | |||
768 | /* We have to call this outside the lock */ | 831 | /* We have to call this outside the lock */ |
769 | if (pwake) | 832 | if (pwake) |
770 | ep_poll_safewake(&psw, &ep->poll_wait); | 833 | ep_poll_safewake(&psw, &ep->poll_wait); |
@@ -789,7 +852,7 @@ error_unregister: | |||
789 | spin_unlock_irqrestore(&ep->lock, flags); | 852 | spin_unlock_irqrestore(&ep->lock, flags); |
790 | 853 | ||
791 | kmem_cache_free(epi_cache, epi); | 854 | kmem_cache_free(epi_cache, epi); |
792 | error_return: | 855 | |
793 | return error; | 856 | return error; |
794 | } | 857 | } |
795 | 858 | ||
@@ -1078,6 +1141,7 @@ asmlinkage long sys_epoll_create1(int flags) | |||
1078 | flags & O_CLOEXEC); | 1141 | flags & O_CLOEXEC); |
1079 | if (fd < 0) | 1142 | if (fd < 0) |
1080 | ep_free(ep); | 1143 | ep_free(ep); |
1144 | atomic_inc(&ep->user->epoll_devs); | ||
1081 | 1145 | ||
1082 | error_return: | 1146 | error_return: |
1083 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", | 1147 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", |
@@ -1299,7 +1363,12 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, | |||
1299 | 1363 | ||
1300 | static int __init eventpoll_init(void) | 1364 | static int __init eventpoll_init(void) |
1301 | { | 1365 | { |
1302 | mutex_init(&epmutex); | 1366 | struct sysinfo si; |
1367 | |||
1368 | si_meminfo(&si); | ||
1369 | max_user_instances = 128; | ||
1370 | max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) / | ||
1371 | EP_ITEM_COST; | ||
1303 | 1372 | ||
1304 | /* Initialize the structure used to perform safe poll wait head wake ups */ | 1373 | /* Initialize the structure used to perform safe poll wait head wake ups */ |
1305 | ep_poll_safewake_init(&psw); | 1374 | ep_poll_safewake_init(&psw); |
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 6ae9011b95eb..2f34f8f2134b 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h | |||
@@ -81,7 +81,7 @@ extern int do_rmdir(const char *file); | |||
81 | extern int do_mknod(const char *file, int mode, unsigned int major, | 81 | extern int do_mknod(const char *file, int mode, unsigned int major, |
82 | unsigned int minor); | 82 | unsigned int minor); |
83 | extern int link_file(const char *from, const char *to); | 83 | extern int link_file(const char *from, const char *to); |
84 | extern int do_readlink(char *file, char *buf, int size); | 84 | extern int hostfs_do_readlink(char *file, char *buf, int size); |
85 | extern int rename_file(char *from, char *to); | 85 | extern int rename_file(char *from, char *to); |
86 | extern int do_statfs(char *root, long *bsize_out, long long *blocks_out, | 86 | extern int do_statfs(char *root, long *bsize_out, long long *blocks_out, |
87 | long long *bfree_out, long long *bavail_out, | 87 | long long *bfree_out, long long *bavail_out, |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 7f34f4385de0..3a31451ac170 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -168,7 +168,7 @@ static char *follow_link(char *link) | |||
168 | if (name == NULL) | 168 | if (name == NULL) |
169 | goto out; | 169 | goto out; |
170 | 170 | ||
171 | n = do_readlink(link, name, len); | 171 | n = hostfs_do_readlink(link, name, len); |
172 | if (n < len) | 172 | if (n < len) |
173 | break; | 173 | break; |
174 | len *= 2; | 174 | len *= 2; |
@@ -943,7 +943,7 @@ int hostfs_link_readpage(struct file *file, struct page *page) | |||
943 | name = inode_name(page->mapping->host, 0); | 943 | name = inode_name(page->mapping->host, 0); |
944 | if (name == NULL) | 944 | if (name == NULL) |
945 | return -ENOMEM; | 945 | return -ENOMEM; |
946 | err = do_readlink(name, buffer, PAGE_CACHE_SIZE); | 946 | err = hostfs_do_readlink(name, buffer, PAGE_CACHE_SIZE); |
947 | kfree(name); | 947 | kfree(name); |
948 | if (err == PAGE_CACHE_SIZE) | 948 | if (err == PAGE_CACHE_SIZE) |
949 | err = -E2BIG; | 949 | err = -E2BIG; |
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 53fd0a67c11a..b79424f93282 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c | |||
@@ -377,7 +377,7 @@ int link_file(const char *to, const char *from) | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | int do_readlink(char *file, char *buf, int size) | 380 | int hostfs_do_readlink(char *file, char *buf, int size) |
381 | { | 381 | { |
382 | int n; | 382 | int n; |
383 | 383 | ||
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 9fd8889097b7..70fc63a1727b 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -167,7 +167,8 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) | |||
167 | continue; | 167 | continue; |
168 | if (host->h_server != ni->server) | 168 | if (host->h_server != ni->server) |
169 | continue; | 169 | continue; |
170 | if (!nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) | 170 | if (ni->server && |
171 | !nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) | ||
171 | continue; | 172 | continue; |
172 | 173 | ||
173 | /* Move to head of hash chain. */ | 174 | /* Move to head of hash chain. */ |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index c631a83931ce..56b076736b56 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -181,6 +181,7 @@ lockd(void *vrqstp) | |||
181 | } | 181 | } |
182 | flush_signals(current); | 182 | flush_signals(current); |
183 | cancel_delayed_work_sync(&grace_period_end); | 183 | cancel_delayed_work_sync(&grace_period_end); |
184 | locks_end_grace(&lockd_manager); | ||
184 | if (nlmsvc_ops) | 185 | if (nlmsvc_ops) |
185 | nlmsvc_invalidate_all(); | 186 | nlmsvc_invalidate_all(); |
186 | nlm_shutdown_hosts(); | 187 | nlm_shutdown_hosts(); |
diff --git a/fs/namei.c b/fs/namei.c index 42d7b7606936..af3783fff1de 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1380,7 +1380,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) | |||
1380 | if (IS_APPEND(dir)) | 1380 | if (IS_APPEND(dir)) |
1381 | return -EPERM; | 1381 | return -EPERM; |
1382 | if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| | 1382 | if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| |
1383 | IS_IMMUTABLE(victim->d_inode)) | 1383 | IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) |
1384 | return -EPERM; | 1384 | return -EPERM; |
1385 | if (isdir) { | 1385 | if (isdir) { |
1386 | if (!S_ISDIR(victim->d_inode->i_mode)) | 1386 | if (!S_ISDIR(victim->d_inode->i_mode)) |
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 9371ea12d7fa..0f9d6efaa62b 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c | |||
@@ -233,6 +233,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) | |||
233 | status = nfs4_save_creds(&original_cred); | 233 | status = nfs4_save_creds(&original_cred); |
234 | if (status < 0) | 234 | if (status < 0) |
235 | return status; | 235 | return status; |
236 | INIT_LIST_HEAD(dentries); | ||
236 | 237 | ||
237 | filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY, | 238 | filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY, |
238 | current_cred()); | 239 | current_cred()); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b0bebc552a11..1a052ac2bde9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -3261,6 +3261,7 @@ nfs4_state_shutdown(void) | |||
3261 | { | 3261 | { |
3262 | cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work); | 3262 | cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work); |
3263 | destroy_workqueue(laundry_wq); | 3263 | destroy_workqueue(laundry_wq); |
3264 | locks_end_grace(&nfsd4_manager); | ||
3264 | nfs4_lock_state(); | 3265 | nfs4_lock_state(); |
3265 | nfs4_release_reclaim(); | 3266 | nfs4_release_reclaim(); |
3266 | __nfs4_state_shutdown(); | 3267 | __nfs4_state_shutdown(); |
diff --git a/fs/ntfs/debug.h b/fs/ntfs/debug.h index 5e6724c1afd1..2142b1c68b61 100644 --- a/fs/ntfs/debug.h +++ b/fs/ntfs/debug.h | |||
@@ -30,7 +30,8 @@ | |||
30 | 30 | ||
31 | extern int debug_msgs; | 31 | extern int debug_msgs; |
32 | 32 | ||
33 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | 33 | extern void __ntfs_debug(const char *file, int line, const char *function, |
34 | const char *format, ...) __attribute__ ((format (printf, 4, 5))); | ||
34 | /** | 35 | /** |
35 | * ntfs_debug - write a debug level message to syslog | 36 | * ntfs_debug - write a debug level message to syslog |
36 | * @f: a printf format string containing the message | 37 | * @f: a printf format string containing the message |
@@ -39,11 +40,6 @@ extern int debug_msgs; | |||
39 | * ntfs_debug() writes a DEBUG level message to the syslog but only if the | 40 | * ntfs_debug() writes a DEBUG level message to the syslog but only if the |
40 | * driver was compiled with -DDEBUG. Otherwise, the call turns into a NOP. | 41 | * driver was compiled with -DDEBUG. Otherwise, the call turns into a NOP. |
41 | */ | 42 | */ |
42 | static void ntfs_debug(const char *f, ...); | ||
43 | #endif | ||
44 | |||
45 | extern void __ntfs_debug (const char *file, int line, const char *function, | ||
46 | const char *format, ...) __attribute__ ((format (printf, 4, 5))); | ||
47 | #define ntfs_debug(f, a...) \ | 43 | #define ntfs_debug(f, a...) \ |
48 | __ntfs_debug(__FILE__, __LINE__, __func__, f, ##a) | 44 | __ntfs_debug(__FILE__, __LINE__, __func__, f, ##a) |
49 | 45 | ||
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 7e947c672469..3a178ec48d7c 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
@@ -112,7 +112,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
112 | bh = bhs[i]; | 112 | bh = bhs[i]; |
113 | 113 | ||
114 | if (buffer_jbd(bh)) { | 114 | if (buffer_jbd(bh)) { |
115 | mlog(ML_ERROR, | 115 | mlog(ML_BH_IO, |
116 | "trying to sync read a jbd " | 116 | "trying to sync read a jbd " |
117 | "managed bh (blocknr = %llu), skipping\n", | 117 | "managed bh (blocknr = %llu), skipping\n", |
118 | (unsigned long long)bh->b_blocknr); | 118 | (unsigned long long)bh->b_blocknr); |
@@ -147,15 +147,10 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
147 | for (i = nr; i > 0; i--) { | 147 | for (i = nr; i > 0; i--) { |
148 | bh = bhs[i - 1]; | 148 | bh = bhs[i - 1]; |
149 | 149 | ||
150 | if (buffer_jbd(bh)) { | 150 | /* No need to wait on the buffer if it's managed by JBD. */ |
151 | mlog(ML_ERROR, | 151 | if (!buffer_jbd(bh)) |
152 | "the journal got the buffer while it was " | 152 | wait_on_buffer(bh); |
153 | "locked for io! (blocknr = %llu)\n", | ||
154 | (unsigned long long)bh->b_blocknr); | ||
155 | BUG(); | ||
156 | } | ||
157 | 153 | ||
158 | wait_on_buffer(bh); | ||
159 | if (!buffer_uptodate(bh)) { | 154 | if (!buffer_uptodate(bh)) { |
160 | /* Status won't be cleared from here on out, | 155 | /* Status won't be cleared from here on out, |
161 | * so we can safely record this and loop back | 156 | * so we can safely record this and loop back |
@@ -251,8 +246,6 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, | |||
251 | ignore_cache = 1; | 246 | ignore_cache = 1; |
252 | } | 247 | } |
253 | 248 | ||
254 | /* XXX: Can we ever get this and *not* have the cached | ||
255 | * flag set? */ | ||
256 | if (buffer_jbd(bh)) { | 249 | if (buffer_jbd(bh)) { |
257 | if (ignore_cache) | 250 | if (ignore_cache) |
258 | mlog(ML_BH_IO, "trying to sync read a jbd " | 251 | mlog(ML_BH_IO, "trying to sync read a jbd " |
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 3516d8a4166b..6f7a77d54020 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c | |||
@@ -608,8 +608,10 @@ static int __init init_dlmfs_fs(void) | |||
608 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | 608 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| |
609 | SLAB_MEM_SPREAD), | 609 | SLAB_MEM_SPREAD), |
610 | dlmfs_init_once); | 610 | dlmfs_init_once); |
611 | if (!dlmfs_inode_cache) | 611 | if (!dlmfs_inode_cache) { |
612 | status = -ENOMEM; | ||
612 | goto bail; | 613 | goto bail; |
614 | } | ||
613 | cleanup_inode = 1; | 615 | cleanup_inode = 1; |
614 | 616 | ||
615 | user_dlm_worker = create_singlethread_workqueue("user_dlm"); | 617 | user_dlm_worker = create_singlethread_workqueue("user_dlm"); |
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h index 39ec27738499..0c3cc03c61fa 100644 --- a/fs/ocfs2/dlm/userdlm.h +++ b/fs/ocfs2/dlm/userdlm.h | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
34 | 34 | ||
35 | /* user_lock_res->l_flags flags. */ | 35 | /* user_lock_res->l_flags flags. */ |
36 | #define USER_LOCK_ATTACHED (0x00000001) /* have we initialized | 36 | #define USER_LOCK_ATTACHED (0x00000001) /* we have initialized |
37 | * the lvb */ | 37 | * the lvb */ |
38 | #define USER_LOCK_BUSY (0x00000002) /* we are currently in | 38 | #define USER_LOCK_BUSY (0x00000002) /* we are currently in |
39 | * dlm_lock */ | 39 | * dlm_lock */ |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index ec684426034b..6e6cc0a2e5f7 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -2841,9 +2841,8 @@ static void ocfs2_unlock_ast(void *opaque, int error) | |||
2841 | 2841 | ||
2842 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); | 2842 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); |
2843 | lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; | 2843 | lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; |
2844 | spin_unlock_irqrestore(&lockres->l_lock, flags); | ||
2845 | |||
2846 | wake_up(&lockres->l_event); | 2844 | wake_up(&lockres->l_event); |
2845 | spin_unlock_irqrestore(&lockres->l_lock, flags); | ||
2847 | 2846 | ||
2848 | mlog_exit_void(); | 2847 | mlog_exit_void(); |
2849 | } | 2848 | } |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index fef7ece32376..3fed9e3d8992 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -85,7 +85,7 @@ enum ocfs2_unlock_action { | |||
85 | }; | 85 | }; |
86 | 86 | ||
87 | /* ocfs2_lock_res->l_flags flags. */ | 87 | /* ocfs2_lock_res->l_flags flags. */ |
88 | #define OCFS2_LOCK_ATTACHED (0x00000001) /* have we initialized | 88 | #define OCFS2_LOCK_ATTACHED (0x00000001) /* we have initialized |
89 | * the lvb */ | 89 | * the lvb */ |
90 | #define OCFS2_LOCK_BUSY (0x00000002) /* we are currently in | 90 | #define OCFS2_LOCK_BUSY (0x00000002) /* we are currently in |
91 | * dlm_lock */ | 91 | * dlm_lock */ |
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index faec2d879357..9b76d41a8ac6 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c | |||
@@ -740,6 +740,9 @@ static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb) | |||
740 | 740 | ||
741 | static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) | 741 | static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) |
742 | { | 742 | { |
743 | if (!lksb->lksb_fsdlm.sb_lvbptr) | ||
744 | lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb + | ||
745 | sizeof(struct dlm_lksb); | ||
743 | return (void *)(lksb->lksb_fsdlm.sb_lvbptr); | 746 | return (void *)(lksb->lksb_fsdlm.sb_lvbptr); |
744 | } | 747 | } |
745 | 748 | ||
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 633f7a0ebb2c..6d5b213b8a9b 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -348,8 +348,8 @@ static ssize_t whole_disk_show(struct device *dev, | |||
348 | static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, | 348 | static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, |
349 | whole_disk_show, NULL); | 349 | whole_disk_show, NULL); |
350 | 350 | ||
351 | int add_partition(struct gendisk *disk, int partno, | 351 | struct hd_struct *add_partition(struct gendisk *disk, int partno, |
352 | sector_t start, sector_t len, int flags) | 352 | sector_t start, sector_t len, int flags) |
353 | { | 353 | { |
354 | struct hd_struct *p; | 354 | struct hd_struct *p; |
355 | dev_t devt = MKDEV(0, 0); | 355 | dev_t devt = MKDEV(0, 0); |
@@ -361,15 +361,15 @@ int add_partition(struct gendisk *disk, int partno, | |||
361 | 361 | ||
362 | err = disk_expand_part_tbl(disk, partno); | 362 | err = disk_expand_part_tbl(disk, partno); |
363 | if (err) | 363 | if (err) |
364 | return err; | 364 | return ERR_PTR(err); |
365 | ptbl = disk->part_tbl; | 365 | ptbl = disk->part_tbl; |
366 | 366 | ||
367 | if (ptbl->part[partno]) | 367 | if (ptbl->part[partno]) |
368 | return -EBUSY; | 368 | return ERR_PTR(-EBUSY); |
369 | 369 | ||
370 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 370 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
371 | if (!p) | 371 | if (!p) |
372 | return -ENOMEM; | 372 | return ERR_PTR(-EBUSY); |
373 | 373 | ||
374 | if (!init_part_stats(p)) { | 374 | if (!init_part_stats(p)) { |
375 | err = -ENOMEM; | 375 | err = -ENOMEM; |
@@ -395,7 +395,7 @@ int add_partition(struct gendisk *disk, int partno, | |||
395 | 395 | ||
396 | err = blk_alloc_devt(p, &devt); | 396 | err = blk_alloc_devt(p, &devt); |
397 | if (err) | 397 | if (err) |
398 | goto out_free; | 398 | goto out_free_stats; |
399 | pdev->devt = devt; | 399 | pdev->devt = devt; |
400 | 400 | ||
401 | /* delay uevent until 'holders' subdir is created */ | 401 | /* delay uevent until 'holders' subdir is created */ |
@@ -424,18 +424,20 @@ int add_partition(struct gendisk *disk, int partno, | |||
424 | if (!ddev->uevent_suppress) | 424 | if (!ddev->uevent_suppress) |
425 | kobject_uevent(&pdev->kobj, KOBJ_ADD); | 425 | kobject_uevent(&pdev->kobj, KOBJ_ADD); |
426 | 426 | ||
427 | return 0; | 427 | return p; |
428 | 428 | ||
429 | out_free_stats: | ||
430 | free_part_stats(p); | ||
429 | out_free: | 431 | out_free: |
430 | kfree(p); | 432 | kfree(p); |
431 | return err; | 433 | return ERR_PTR(err); |
432 | out_del: | 434 | out_del: |
433 | kobject_put(p->holder_dir); | 435 | kobject_put(p->holder_dir); |
434 | device_del(pdev); | 436 | device_del(pdev); |
435 | out_put: | 437 | out_put: |
436 | put_device(pdev); | 438 | put_device(pdev); |
437 | blk_free_devt(devt); | 439 | blk_free_devt(devt); |
438 | return err; | 440 | return ERR_PTR(err); |
439 | } | 441 | } |
440 | 442 | ||
441 | /* Not exported, helper to add_disk(). */ | 443 | /* Not exported, helper to add_disk(). */ |
@@ -566,15 +568,16 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) | |||
566 | disk->disk_name, p, (unsigned long long) size); | 568 | disk->disk_name, p, (unsigned long long) size); |
567 | size = get_capacity(disk) - from; | 569 | size = get_capacity(disk) - from; |
568 | } | 570 | } |
569 | res = add_partition(disk, p, from, size, state->parts[p].flags); | 571 | part = add_partition(disk, p, from, size, |
570 | if (res) { | 572 | state->parts[p].flags); |
571 | printk(KERN_ERR " %s: p%d could not be added: %d\n", | 573 | if (IS_ERR(part)) { |
572 | disk->disk_name, p, -res); | 574 | printk(KERN_ERR " %s: p%d could not be added: %ld\n", |
575 | disk->disk_name, p, -PTR_ERR(part)); | ||
573 | continue; | 576 | continue; |
574 | } | 577 | } |
575 | #ifdef CONFIG_BLK_DEV_MD | 578 | #ifdef CONFIG_BLK_DEV_MD |
576 | if (state->parts[p].flags & ADDPART_FLAG_RAID) | 579 | if (state->parts[p].flags & ADDPART_FLAG_RAID) |
577 | md_autodetect_dev(bdev->bd_dev+p); | 580 | md_autodetect_dev(part_to_dev(part)->devt); |
578 | #endif | 581 | #endif |
579 | } | 582 | } |
580 | kfree(state); | 583 | kfree(state); |
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 0a6aa2cc78f0..b49884c8c10e 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c | |||
@@ -234,8 +234,8 @@ int ubifs_bg_thread(void *info) | |||
234 | int err; | 234 | int err; |
235 | struct ubifs_info *c = info; | 235 | struct ubifs_info *c = info; |
236 | 236 | ||
237 | ubifs_msg("background thread \"%s\" started, PID %d", | 237 | dbg_msg("background thread \"%s\" started, PID %d", |
238 | c->bgt_name, current->pid); | 238 | c->bgt_name, current->pid); |
239 | set_freezable(); | 239 | set_freezable(); |
240 | 240 | ||
241 | while (1) { | 241 | while (1) { |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 7186400750e7..510ffa0bbda4 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -101,21 +101,24 @@ static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key, | |||
101 | if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { | 101 | if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { |
102 | switch (type) { | 102 | switch (type) { |
103 | case UBIFS_INO_KEY: | 103 | case UBIFS_INO_KEY: |
104 | sprintf(p, "(%lu, %s)", key_inum(c, key), | 104 | sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key), |
105 | get_key_type(type)); | 105 | get_key_type(type)); |
106 | break; | 106 | break; |
107 | case UBIFS_DENT_KEY: | 107 | case UBIFS_DENT_KEY: |
108 | case UBIFS_XENT_KEY: | 108 | case UBIFS_XENT_KEY: |
109 | sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key), | 109 | sprintf(p, "(%lu, %s, %#08x)", |
110 | (unsigned long)key_inum(c, key), | ||
110 | get_key_type(type), key_hash(c, key)); | 111 | get_key_type(type), key_hash(c, key)); |
111 | break; | 112 | break; |
112 | case UBIFS_DATA_KEY: | 113 | case UBIFS_DATA_KEY: |
113 | sprintf(p, "(%lu, %s, %u)", key_inum(c, key), | 114 | sprintf(p, "(%lu, %s, %u)", |
115 | (unsigned long)key_inum(c, key), | ||
114 | get_key_type(type), key_block(c, key)); | 116 | get_key_type(type), key_block(c, key)); |
115 | break; | 117 | break; |
116 | case UBIFS_TRUN_KEY: | 118 | case UBIFS_TRUN_KEY: |
117 | sprintf(p, "(%lu, %s)", | 119 | sprintf(p, "(%lu, %s)", |
118 | key_inum(c, key), get_key_type(type)); | 120 | (unsigned long)key_inum(c, key), |
121 | get_key_type(type)); | ||
119 | break; | 122 | break; |
120 | default: | 123 | default: |
121 | sprintf(p, "(bad key type: %#08x, %#08x)", | 124 | sprintf(p, "(bad key type: %#08x, %#08x)", |
@@ -364,8 +367,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) | |||
364 | le32_to_cpu(mst->ihead_lnum)); | 367 | le32_to_cpu(mst->ihead_lnum)); |
365 | printk(KERN_DEBUG "\tihead_offs %u\n", | 368 | printk(KERN_DEBUG "\tihead_offs %u\n", |
366 | le32_to_cpu(mst->ihead_offs)); | 369 | le32_to_cpu(mst->ihead_offs)); |
367 | printk(KERN_DEBUG "\tindex_size %u\n", | 370 | printk(KERN_DEBUG "\tindex_size %llu\n", |
368 | le32_to_cpu(mst->index_size)); | 371 | (unsigned long long)le64_to_cpu(mst->index_size)); |
369 | printk(KERN_DEBUG "\tlpt_lnum %u\n", | 372 | printk(KERN_DEBUG "\tlpt_lnum %u\n", |
370 | le32_to_cpu(mst->lpt_lnum)); | 373 | le32_to_cpu(mst->lpt_lnum)); |
371 | printk(KERN_DEBUG "\tlpt_offs %u\n", | 374 | printk(KERN_DEBUG "\tlpt_offs %u\n", |
@@ -1589,7 +1592,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c, | |||
1589 | 1592 | ||
1590 | if (inum > c->highest_inum) { | 1593 | if (inum > c->highest_inum) { |
1591 | ubifs_err("too high inode number, max. is %lu", | 1594 | ubifs_err("too high inode number, max. is %lu", |
1592 | c->highest_inum); | 1595 | (unsigned long)c->highest_inum); |
1593 | return ERR_PTR(-EINVAL); | 1596 | return ERR_PTR(-EINVAL); |
1594 | } | 1597 | } |
1595 | 1598 | ||
@@ -1668,16 +1671,18 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, | |||
1668 | ino_key_init(c, &key, inum); | 1671 | ino_key_init(c, &key, inum); |
1669 | err = ubifs_lookup_level0(c, &key, &znode, &n); | 1672 | err = ubifs_lookup_level0(c, &key, &znode, &n); |
1670 | if (!err) { | 1673 | if (!err) { |
1671 | ubifs_err("inode %lu not found in index", inum); | 1674 | ubifs_err("inode %lu not found in index", (unsigned long)inum); |
1672 | return ERR_PTR(-ENOENT); | 1675 | return ERR_PTR(-ENOENT); |
1673 | } else if (err < 0) { | 1676 | } else if (err < 0) { |
1674 | ubifs_err("error %d while looking up inode %lu", err, inum); | 1677 | ubifs_err("error %d while looking up inode %lu", |
1678 | err, (unsigned long)inum); | ||
1675 | return ERR_PTR(err); | 1679 | return ERR_PTR(err); |
1676 | } | 1680 | } |
1677 | 1681 | ||
1678 | zbr = &znode->zbranch[n]; | 1682 | zbr = &znode->zbranch[n]; |
1679 | if (zbr->len < UBIFS_INO_NODE_SZ) { | 1683 | if (zbr->len < UBIFS_INO_NODE_SZ) { |
1680 | ubifs_err("bad node %lu node length %d", inum, zbr->len); | 1684 | ubifs_err("bad node %lu node length %d", |
1685 | (unsigned long)inum, zbr->len); | ||
1681 | return ERR_PTR(-EINVAL); | 1686 | return ERR_PTR(-EINVAL); |
1682 | } | 1687 | } |
1683 | 1688 | ||
@@ -1697,7 +1702,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, | |||
1697 | kfree(ino); | 1702 | kfree(ino); |
1698 | if (IS_ERR(fscki)) { | 1703 | if (IS_ERR(fscki)) { |
1699 | ubifs_err("error %ld while adding inode %lu node", | 1704 | ubifs_err("error %ld while adding inode %lu node", |
1700 | PTR_ERR(fscki), inum); | 1705 | PTR_ERR(fscki), (unsigned long)inum); |
1701 | return fscki; | 1706 | return fscki; |
1702 | } | 1707 | } |
1703 | 1708 | ||
@@ -1786,7 +1791,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
1786 | if (IS_ERR(fscki)) { | 1791 | if (IS_ERR(fscki)) { |
1787 | err = PTR_ERR(fscki); | 1792 | err = PTR_ERR(fscki); |
1788 | ubifs_err("error %d while processing data node and " | 1793 | ubifs_err("error %d while processing data node and " |
1789 | "trying to find inode node %lu", err, inum); | 1794 | "trying to find inode node %lu", |
1795 | err, (unsigned long)inum); | ||
1790 | goto out_dump; | 1796 | goto out_dump; |
1791 | } | 1797 | } |
1792 | 1798 | ||
@@ -1819,7 +1825,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
1819 | if (IS_ERR(fscki)) { | 1825 | if (IS_ERR(fscki)) { |
1820 | err = PTR_ERR(fscki); | 1826 | err = PTR_ERR(fscki); |
1821 | ubifs_err("error %d while processing entry node and " | 1827 | ubifs_err("error %d while processing entry node and " |
1822 | "trying to find inode node %lu", err, inum); | 1828 | "trying to find inode node %lu", |
1829 | err, (unsigned long)inum); | ||
1823 | goto out_dump; | 1830 | goto out_dump; |
1824 | } | 1831 | } |
1825 | 1832 | ||
@@ -1832,7 +1839,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
1832 | err = PTR_ERR(fscki); | 1839 | err = PTR_ERR(fscki); |
1833 | ubifs_err("error %d while processing entry node and " | 1840 | ubifs_err("error %d while processing entry node and " |
1834 | "trying to find parent inode node %lu", | 1841 | "trying to find parent inode node %lu", |
1835 | err, inum); | 1842 | err, (unsigned long)inum); |
1836 | goto out_dump; | 1843 | goto out_dump; |
1837 | } | 1844 | } |
1838 | 1845 | ||
@@ -1923,7 +1930,8 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) | |||
1923 | fscki->references != 1) { | 1930 | fscki->references != 1) { |
1924 | ubifs_err("directory inode %lu has %d " | 1931 | ubifs_err("directory inode %lu has %d " |
1925 | "direntries which refer it, but " | 1932 | "direntries which refer it, but " |
1926 | "should be 1", fscki->inum, | 1933 | "should be 1", |
1934 | (unsigned long)fscki->inum, | ||
1927 | fscki->references); | 1935 | fscki->references); |
1928 | goto out_dump; | 1936 | goto out_dump; |
1929 | } | 1937 | } |
@@ -1931,27 +1939,29 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) | |||
1931 | fscki->references != 0) { | 1939 | fscki->references != 0) { |
1932 | ubifs_err("root inode %lu has non-zero (%d) " | 1940 | ubifs_err("root inode %lu has non-zero (%d) " |
1933 | "direntries which refer it", | 1941 | "direntries which refer it", |
1934 | fscki->inum, fscki->references); | 1942 | (unsigned long)fscki->inum, |
1943 | fscki->references); | ||
1935 | goto out_dump; | 1944 | goto out_dump; |
1936 | } | 1945 | } |
1937 | if (fscki->calc_sz != fscki->size) { | 1946 | if (fscki->calc_sz != fscki->size) { |
1938 | ubifs_err("directory inode %lu size is %lld, " | 1947 | ubifs_err("directory inode %lu size is %lld, " |
1939 | "but calculated size is %lld", | 1948 | "but calculated size is %lld", |
1940 | fscki->inum, fscki->size, | 1949 | (unsigned long)fscki->inum, |
1941 | fscki->calc_sz); | 1950 | fscki->size, fscki->calc_sz); |
1942 | goto out_dump; | 1951 | goto out_dump; |
1943 | } | 1952 | } |
1944 | if (fscki->calc_cnt != fscki->nlink) { | 1953 | if (fscki->calc_cnt != fscki->nlink) { |
1945 | ubifs_err("directory inode %lu nlink is %d, " | 1954 | ubifs_err("directory inode %lu nlink is %d, " |
1946 | "but calculated nlink is %d", | 1955 | "but calculated nlink is %d", |
1947 | fscki->inum, fscki->nlink, | 1956 | (unsigned long)fscki->inum, |
1948 | fscki->calc_cnt); | 1957 | fscki->nlink, fscki->calc_cnt); |
1949 | goto out_dump; | 1958 | goto out_dump; |
1950 | } | 1959 | } |
1951 | } else { | 1960 | } else { |
1952 | if (fscki->references != fscki->nlink) { | 1961 | if (fscki->references != fscki->nlink) { |
1953 | ubifs_err("inode %lu nlink is %d, but " | 1962 | ubifs_err("inode %lu nlink is %d, but " |
1954 | "calculated nlink is %d", fscki->inum, | 1963 | "calculated nlink is %d", |
1964 | (unsigned long)fscki->inum, | ||
1955 | fscki->nlink, fscki->references); | 1965 | fscki->nlink, fscki->references); |
1956 | goto out_dump; | 1966 | goto out_dump; |
1957 | } | 1967 | } |
@@ -1959,20 +1969,21 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) | |||
1959 | if (fscki->xattr_sz != fscki->calc_xsz) { | 1969 | if (fscki->xattr_sz != fscki->calc_xsz) { |
1960 | ubifs_err("inode %lu has xattr size %u, but " | 1970 | ubifs_err("inode %lu has xattr size %u, but " |
1961 | "calculated size is %lld", | 1971 | "calculated size is %lld", |
1962 | fscki->inum, fscki->xattr_sz, | 1972 | (unsigned long)fscki->inum, fscki->xattr_sz, |
1963 | fscki->calc_xsz); | 1973 | fscki->calc_xsz); |
1964 | goto out_dump; | 1974 | goto out_dump; |
1965 | } | 1975 | } |
1966 | if (fscki->xattr_cnt != fscki->calc_xcnt) { | 1976 | if (fscki->xattr_cnt != fscki->calc_xcnt) { |
1967 | ubifs_err("inode %lu has %u xattrs, but " | 1977 | ubifs_err("inode %lu has %u xattrs, but " |
1968 | "calculated count is %lld", fscki->inum, | 1978 | "calculated count is %lld", |
1979 | (unsigned long)fscki->inum, | ||
1969 | fscki->xattr_cnt, fscki->calc_xcnt); | 1980 | fscki->xattr_cnt, fscki->calc_xcnt); |
1970 | goto out_dump; | 1981 | goto out_dump; |
1971 | } | 1982 | } |
1972 | if (fscki->xattr_nms != fscki->calc_xnms) { | 1983 | if (fscki->xattr_nms != fscki->calc_xnms) { |
1973 | ubifs_err("inode %lu has xattr names' size %u, but " | 1984 | ubifs_err("inode %lu has xattr names' size %u, but " |
1974 | "calculated names' size is %lld", | 1985 | "calculated names' size is %lld", |
1975 | fscki->inum, fscki->xattr_nms, | 1986 | (unsigned long)fscki->inum, fscki->xattr_nms, |
1976 | fscki->calc_xnms); | 1987 | fscki->calc_xnms); |
1977 | goto out_dump; | 1988 | goto out_dump; |
1978 | } | 1989 | } |
@@ -1985,11 +1996,12 @@ out_dump: | |||
1985 | ino_key_init(c, &key, fscki->inum); | 1996 | ino_key_init(c, &key, fscki->inum); |
1986 | err = ubifs_lookup_level0(c, &key, &znode, &n); | 1997 | err = ubifs_lookup_level0(c, &key, &znode, &n); |
1987 | if (!err) { | 1998 | if (!err) { |
1988 | ubifs_err("inode %lu not found in index", fscki->inum); | 1999 | ubifs_err("inode %lu not found in index", |
2000 | (unsigned long)fscki->inum); | ||
1989 | return -ENOENT; | 2001 | return -ENOENT; |
1990 | } else if (err < 0) { | 2002 | } else if (err < 0) { |
1991 | ubifs_err("error %d while looking up inode %lu", | 2003 | ubifs_err("error %d while looking up inode %lu", |
1992 | err, fscki->inum); | 2004 | err, (unsigned long)fscki->inum); |
1993 | return err; | 2005 | return err; |
1994 | } | 2006 | } |
1995 | 2007 | ||
@@ -2007,7 +2019,7 @@ out_dump: | |||
2007 | } | 2019 | } |
2008 | 2020 | ||
2009 | ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", | 2021 | ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", |
2010 | fscki->inum, zbr->lnum, zbr->offs); | 2022 | (unsigned long)fscki->inum, zbr->lnum, zbr->offs); |
2011 | dbg_dump_node(c, ino); | 2023 | dbg_dump_node(c, ino); |
2012 | kfree(ino); | 2024 | kfree(ino); |
2013 | return -EINVAL; | 2025 | return -EINVAL; |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 856189014a3a..f448ab1f9c38 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
@@ -161,7 +161,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, | |||
161 | return ERR_PTR(-EINVAL); | 161 | return ERR_PTR(-EINVAL); |
162 | } | 162 | } |
163 | ubifs_warn("running out of inode numbers (current %lu, max %d)", | 163 | ubifs_warn("running out of inode numbers (current %lu, max %d)", |
164 | c->highest_inum, INUM_WATERMARK); | 164 | (unsigned long)c->highest_inum, INUM_WATERMARK); |
165 | } | 165 | } |
166 | 166 | ||
167 | inode->i_ino = ++c->highest_inum; | 167 | inode->i_ino = ++c->highest_inum; |
@@ -428,7 +428,8 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) | |||
428 | dbg_gen("feed '%s', ino %llu, new f_pos %#x", | 428 | dbg_gen("feed '%s', ino %llu, new f_pos %#x", |
429 | dent->name, (unsigned long long)le64_to_cpu(dent->inum), | 429 | dent->name, (unsigned long long)le64_to_cpu(dent->inum), |
430 | key_hash_flash(c, &dent->key)); | 430 | key_hash_flash(c, &dent->key)); |
431 | ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); | 431 | ubifs_assert(le64_to_cpu(dent->ch.sqnum) > |
432 | ubifs_inode(dir)->creat_sqnum); | ||
432 | 433 | ||
433 | nm.len = le16_to_cpu(dent->nlen); | 434 | nm.len = le16_to_cpu(dent->nlen); |
434 | over = filldir(dirent, dent->name, nm.len, file->f_pos, | 435 | over = filldir(dirent, dent->name, nm.len, file->f_pos, |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 51cf511d44d9..2624411d9758 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -72,7 +72,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, | |||
72 | return err; | 72 | return err; |
73 | } | 73 | } |
74 | 74 | ||
75 | ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum); | 75 | ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); |
76 | 76 | ||
77 | len = le32_to_cpu(dn->size); | 77 | len = le32_to_cpu(dn->size); |
78 | if (len <= 0 || len > UBIFS_BLOCK_SIZE) | 78 | if (len <= 0 || len > UBIFS_BLOCK_SIZE) |
@@ -626,7 +626,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, | |||
626 | 626 | ||
627 | dn = bu->buf + (bu->zbranch[nn].offs - offs); | 627 | dn = bu->buf + (bu->zbranch[nn].offs - offs); |
628 | 628 | ||
629 | ubifs_assert(dn->ch.sqnum > | 629 | ubifs_assert(le64_to_cpu(dn->ch.sqnum) > |
630 | ubifs_inode(inode)->creat_sqnum); | 630 | ubifs_inode(inode)->creat_sqnum); |
631 | 631 | ||
632 | len = le32_to_cpu(dn->size); | 632 | len = le32_to_cpu(dn->size); |
@@ -691,32 +691,22 @@ out_err: | |||
691 | /** | 691 | /** |
692 | * ubifs_do_bulk_read - do bulk-read. | 692 | * ubifs_do_bulk_read - do bulk-read. |
693 | * @c: UBIFS file-system description object | 693 | * @c: UBIFS file-system description object |
694 | * @page1: first page | 694 | * @bu: bulk-read information |
695 | * @page1: first page to read | ||
695 | * | 696 | * |
696 | * This function returns %1 if the bulk-read is done, otherwise %0 is returned. | 697 | * This function returns %1 if the bulk-read is done, otherwise %0 is returned. |
697 | */ | 698 | */ |
698 | static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | 699 | static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, |
700 | struct page *page1) | ||
699 | { | 701 | { |
700 | pgoff_t offset = page1->index, end_index; | 702 | pgoff_t offset = page1->index, end_index; |
701 | struct address_space *mapping = page1->mapping; | 703 | struct address_space *mapping = page1->mapping; |
702 | struct inode *inode = mapping->host; | 704 | struct inode *inode = mapping->host; |
703 | struct ubifs_inode *ui = ubifs_inode(inode); | 705 | struct ubifs_inode *ui = ubifs_inode(inode); |
704 | struct bu_info *bu; | ||
705 | int err, page_idx, page_cnt, ret = 0, n = 0; | 706 | int err, page_idx, page_cnt, ret = 0, n = 0; |
707 | int allocate = bu->buf ? 0 : 1; | ||
706 | loff_t isize; | 708 | loff_t isize; |
707 | 709 | ||
708 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); | ||
709 | if (!bu) | ||
710 | return 0; | ||
711 | |||
712 | bu->buf_len = c->bulk_read_buf_size; | ||
713 | bu->buf = kmalloc(bu->buf_len, GFP_NOFS); | ||
714 | if (!bu->buf) | ||
715 | goto out_free; | ||
716 | |||
717 | data_key_init(c, &bu->key, inode->i_ino, | ||
718 | offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); | ||
719 | |||
720 | err = ubifs_tnc_get_bu_keys(c, bu); | 710 | err = ubifs_tnc_get_bu_keys(c, bu); |
721 | if (err) | 711 | if (err) |
722 | goto out_warn; | 712 | goto out_warn; |
@@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | |||
735 | * together. If all the pages were like this, bulk-read would | 725 | * together. If all the pages were like this, bulk-read would |
736 | * reduce performance, so we turn it off for a while. | 726 | * reduce performance, so we turn it off for a while. |
737 | */ | 727 | */ |
738 | ui->read_in_a_row = 0; | 728 | goto out_bu_off; |
739 | ui->bulk_read = 0; | ||
740 | goto out_free; | ||
741 | } | 729 | } |
742 | 730 | ||
743 | if (bu->cnt) { | 731 | if (bu->cnt) { |
732 | if (allocate) { | ||
733 | /* | ||
734 | * Allocate bulk-read buffer depending on how many data | ||
735 | * nodes we are going to read. | ||
736 | */ | ||
737 | bu->buf_len = bu->zbranch[bu->cnt - 1].offs + | ||
738 | bu->zbranch[bu->cnt - 1].len - | ||
739 | bu->zbranch[0].offs; | ||
740 | ubifs_assert(bu->buf_len > 0); | ||
741 | ubifs_assert(bu->buf_len <= c->leb_size); | ||
742 | bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); | ||
743 | if (!bu->buf) | ||
744 | goto out_bu_off; | ||
745 | } | ||
746 | |||
744 | err = ubifs_tnc_bulk_read(c, bu); | 747 | err = ubifs_tnc_bulk_read(c, bu); |
745 | if (err) | 748 | if (err) |
746 | goto out_warn; | 749 | goto out_warn; |
@@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | |||
779 | ui->last_page_read = offset + page_idx - 1; | 782 | ui->last_page_read = offset + page_idx - 1; |
780 | 783 | ||
781 | out_free: | 784 | out_free: |
782 | kfree(bu->buf); | 785 | if (allocate) |
783 | kfree(bu); | 786 | kfree(bu->buf); |
784 | return ret; | 787 | return ret; |
785 | 788 | ||
786 | out_warn: | 789 | out_warn: |
787 | ubifs_warn("ignoring error %d and skipping bulk-read", err); | 790 | ubifs_warn("ignoring error %d and skipping bulk-read", err); |
788 | goto out_free; | 791 | goto out_free; |
792 | |||
793 | out_bu_off: | ||
794 | ui->read_in_a_row = ui->bulk_read = 0; | ||
795 | goto out_free; | ||
789 | } | 796 | } |
790 | 797 | ||
791 | /** | 798 | /** |
@@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page) | |||
803 | struct ubifs_info *c = inode->i_sb->s_fs_info; | 810 | struct ubifs_info *c = inode->i_sb->s_fs_info; |
804 | struct ubifs_inode *ui = ubifs_inode(inode); | 811 | struct ubifs_inode *ui = ubifs_inode(inode); |
805 | pgoff_t index = page->index, last_page_read = ui->last_page_read; | 812 | pgoff_t index = page->index, last_page_read = ui->last_page_read; |
806 | int ret = 0; | 813 | struct bu_info *bu; |
814 | int err = 0, allocated = 0; | ||
807 | 815 | ||
808 | ui->last_page_read = index; | 816 | ui->last_page_read = index; |
809 | |||
810 | if (!c->bulk_read) | 817 | if (!c->bulk_read) |
811 | return 0; | 818 | return 0; |
819 | |||
812 | /* | 820 | /* |
813 | * Bulk-read is protected by ui_mutex, but it is an optimization, so | 821 | * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, |
814 | * don't bother if we cannot lock the mutex. | 822 | * so don't bother if we cannot lock the mutex. |
815 | */ | 823 | */ |
816 | if (!mutex_trylock(&ui->ui_mutex)) | 824 | if (!mutex_trylock(&ui->ui_mutex)) |
817 | return 0; | 825 | return 0; |
826 | |||
818 | if (index != last_page_read + 1) { | 827 | if (index != last_page_read + 1) { |
819 | /* Turn off bulk-read if we stop reading sequentially */ | 828 | /* Turn off bulk-read if we stop reading sequentially */ |
820 | ui->read_in_a_row = 1; | 829 | ui->read_in_a_row = 1; |
@@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page) | |||
822 | ui->bulk_read = 0; | 831 | ui->bulk_read = 0; |
823 | goto out_unlock; | 832 | goto out_unlock; |
824 | } | 833 | } |
834 | |||
825 | if (!ui->bulk_read) { | 835 | if (!ui->bulk_read) { |
826 | ui->read_in_a_row += 1; | 836 | ui->read_in_a_row += 1; |
827 | if (ui->read_in_a_row < 3) | 837 | if (ui->read_in_a_row < 3) |
@@ -829,10 +839,35 @@ static int ubifs_bulk_read(struct page *page) | |||
829 | /* Three reads in a row, so switch on bulk-read */ | 839 | /* Three reads in a row, so switch on bulk-read */ |
830 | ui->bulk_read = 1; | 840 | ui->bulk_read = 1; |
831 | } | 841 | } |
832 | ret = ubifs_do_bulk_read(c, page); | 842 | |
843 | /* | ||
844 | * If possible, try to use pre-allocated bulk-read information, which | ||
845 | * is protected by @c->bu_mutex. | ||
846 | */ | ||
847 | if (mutex_trylock(&c->bu_mutex)) | ||
848 | bu = &c->bu; | ||
849 | else { | ||
850 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); | ||
851 | if (!bu) | ||
852 | goto out_unlock; | ||
853 | |||
854 | bu->buf = NULL; | ||
855 | allocated = 1; | ||
856 | } | ||
857 | |||
858 | bu->buf_len = c->max_bu_buf_len; | ||
859 | data_key_init(c, &bu->key, inode->i_ino, | ||
860 | page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); | ||
861 | err = ubifs_do_bulk_read(c, bu, page); | ||
862 | |||
863 | if (!allocated) | ||
864 | mutex_unlock(&c->bu_mutex); | ||
865 | else | ||
866 | kfree(bu); | ||
867 | |||
833 | out_unlock: | 868 | out_unlock: |
834 | mutex_unlock(&ui->ui_mutex); | 869 | mutex_unlock(&ui->ui_mutex); |
835 | return ret; | 870 | return err; |
836 | } | 871 | } |
837 | 872 | ||
838 | static int ubifs_readpage(struct file *file, struct page *page) | 873 | static int ubifs_readpage(struct file *file, struct page *page) |
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 22993f867d19..f91b745908ea 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c | |||
@@ -690,8 +690,9 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, | |||
690 | int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; | 690 | int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; |
691 | struct ubifs_inode *ui = ubifs_inode(inode); | 691 | struct ubifs_inode *ui = ubifs_inode(inode); |
692 | 692 | ||
693 | dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key), | 693 | dbg_jnl("ino %lu, blk %u, len %d, key %s", |
694 | key_block(c, key), len, DBGKEY(key)); | 694 | (unsigned long)key_inum(c, key), key_block(c, key), len, |
695 | DBGKEY(key)); | ||
695 | ubifs_assert(len <= UBIFS_BLOCK_SIZE); | 696 | ubifs_assert(len <= UBIFS_BLOCK_SIZE); |
696 | 697 | ||
697 | data = kmalloc(dlen, GFP_NOFS); | 698 | data = kmalloc(dlen, GFP_NOFS); |
@@ -1128,7 +1129,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, | |||
1128 | ino_t inum = inode->i_ino; | 1129 | ino_t inum = inode->i_ino; |
1129 | unsigned int blk; | 1130 | unsigned int blk; |
1130 | 1131 | ||
1131 | dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size); | 1132 | dbg_jnl("ino %lu, size %lld -> %lld", |
1133 | (unsigned long)inum, old_size, new_size); | ||
1132 | ubifs_assert(!ui->data_len); | 1134 | ubifs_assert(!ui->data_len); |
1133 | ubifs_assert(S_ISREG(inode->i_mode)); | 1135 | ubifs_assert(S_ISREG(inode->i_mode)); |
1134 | ubifs_assert(mutex_is_locked(&ui->ui_mutex)); | 1136 | ubifs_assert(mutex_is_locked(&ui->ui_mutex)); |
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 9ee65086f627..3f1f16bc25c9 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h | |||
@@ -345,7 +345,7 @@ static inline int key_type_flash(const struct ubifs_info *c, const void *k) | |||
345 | { | 345 | { |
346 | const union ubifs_key *key = k; | 346 | const union ubifs_key *key = k; |
347 | 347 | ||
348 | return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS; | 348 | return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS; |
349 | } | 349 | } |
350 | 350 | ||
351 | /** | 351 | /** |
@@ -416,7 +416,7 @@ static inline unsigned int key_block_flash(const struct ubifs_info *c, | |||
416 | { | 416 | { |
417 | const union ubifs_key *key = k; | 417 | const union ubifs_key *key = k; |
418 | 418 | ||
419 | return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK; | 419 | return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK; |
420 | } | 420 | } |
421 | 421 | ||
422 | /** | 422 | /** |
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index eed5a0025d63..a41434b42785 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c | |||
@@ -571,8 +571,6 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c, | |||
571 | /* We assume here that LEB zero is never an LPT LEB */ | 571 | /* We assume here that LEB zero is never an LPT LEB */ |
572 | if (nnode->nbranch[iip].lnum) | 572 | if (nnode->nbranch[iip].lnum) |
573 | return ubifs_get_pnode(c, nnode, iip); | 573 | return ubifs_get_pnode(c, nnode, iip); |
574 | else | ||
575 | return NULL; | ||
576 | } | 574 | } |
577 | 575 | ||
578 | /* Go up while can't go right */ | 576 | /* Go up while can't go right */ |
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 02d3462f4d3e..9bd5a43d4526 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c | |||
@@ -105,7 +105,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) | |||
105 | list_add_tail(&orphan->list, &c->orph_list); | 105 | list_add_tail(&orphan->list, &c->orph_list); |
106 | list_add_tail(&orphan->new_list, &c->orph_new); | 106 | list_add_tail(&orphan->new_list, &c->orph_new); |
107 | spin_unlock(&c->orphan_lock); | 107 | spin_unlock(&c->orphan_lock); |
108 | dbg_gen("ino %lu", inum); | 108 | dbg_gen("ino %lu", (unsigned long)inum); |
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
111 | 111 | ||
@@ -132,14 +132,16 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) | |||
132 | else { | 132 | else { |
133 | if (o->dnext) { | 133 | if (o->dnext) { |
134 | spin_unlock(&c->orphan_lock); | 134 | spin_unlock(&c->orphan_lock); |
135 | dbg_gen("deleted twice ino %lu", inum); | 135 | dbg_gen("deleted twice ino %lu", |
136 | (unsigned long)inum); | ||
136 | return; | 137 | return; |
137 | } | 138 | } |
138 | if (o->cnext) { | 139 | if (o->cnext) { |
139 | o->dnext = c->orph_dnext; | 140 | o->dnext = c->orph_dnext; |
140 | c->orph_dnext = o; | 141 | c->orph_dnext = o; |
141 | spin_unlock(&c->orphan_lock); | 142 | spin_unlock(&c->orphan_lock); |
142 | dbg_gen("delete later ino %lu", inum); | 143 | dbg_gen("delete later ino %lu", |
144 | (unsigned long)inum); | ||
143 | return; | 145 | return; |
144 | } | 146 | } |
145 | rb_erase(p, &c->orph_tree); | 147 | rb_erase(p, &c->orph_tree); |
@@ -151,12 +153,12 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) | |||
151 | } | 153 | } |
152 | spin_unlock(&c->orphan_lock); | 154 | spin_unlock(&c->orphan_lock); |
153 | kfree(o); | 155 | kfree(o); |
154 | dbg_gen("inum %lu", inum); | 156 | dbg_gen("inum %lu", (unsigned long)inum); |
155 | return; | 157 | return; |
156 | } | 158 | } |
157 | } | 159 | } |
158 | spin_unlock(&c->orphan_lock); | 160 | spin_unlock(&c->orphan_lock); |
159 | dbg_err("missing orphan ino %lu", inum); | 161 | dbg_err("missing orphan ino %lu", (unsigned long)inum); |
160 | dbg_dump_stack(); | 162 | dbg_dump_stack(); |
161 | } | 163 | } |
162 | 164 | ||
@@ -448,7 +450,7 @@ static void erase_deleted(struct ubifs_info *c) | |||
448 | rb_erase(&orphan->rb, &c->orph_tree); | 450 | rb_erase(&orphan->rb, &c->orph_tree); |
449 | list_del(&orphan->list); | 451 | list_del(&orphan->list); |
450 | c->tot_orphans -= 1; | 452 | c->tot_orphans -= 1; |
451 | dbg_gen("deleting orphan ino %lu", orphan->inum); | 453 | dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum); |
452 | kfree(orphan); | 454 | kfree(orphan); |
453 | } | 455 | } |
454 | c->orph_dnext = NULL; | 456 | c->orph_dnext = NULL; |
@@ -536,8 +538,8 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum) | |||
536 | list_add_tail(&orphan->list, &c->orph_list); | 538 | list_add_tail(&orphan->list, &c->orph_list); |
537 | orphan->dnext = c->orph_dnext; | 539 | orphan->dnext = c->orph_dnext; |
538 | c->orph_dnext = orphan; | 540 | c->orph_dnext = orphan; |
539 | dbg_mnt("ino %lu, new %d, tot %d", | 541 | dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum, |
540 | inum, c->new_orphans, c->tot_orphans); | 542 | c->new_orphans, c->tot_orphans); |
541 | return 0; | 543 | return 0; |
542 | } | 544 | } |
543 | 545 | ||
@@ -609,7 +611,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
609 | n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; | 611 | n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; |
610 | for (i = 0; i < n; i++) { | 612 | for (i = 0; i < n; i++) { |
611 | inum = le64_to_cpu(orph->inos[i]); | 613 | inum = le64_to_cpu(orph->inos[i]); |
612 | dbg_rcvry("deleting orphaned inode %lu", inum); | 614 | dbg_rcvry("deleting orphaned inode %lu", |
615 | (unsigned long)inum); | ||
613 | err = ubifs_tnc_remove_ino(c, inum); | 616 | err = ubifs_tnc_remove_ino(c, inum); |
614 | if (err) | 617 | if (err) |
615 | return err; | 618 | return err; |
@@ -840,8 +843,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
840 | if (inum != ci->last_ino) { | 843 | if (inum != ci->last_ino) { |
841 | /* Lowest node type is the inode node, so it comes first */ | 844 | /* Lowest node type is the inode node, so it comes first */ |
842 | if (key_type(c, &zbr->key) != UBIFS_INO_KEY) | 845 | if (key_type(c, &zbr->key) != UBIFS_INO_KEY) |
843 | ubifs_err("found orphan node ino %lu, type %d", inum, | 846 | ubifs_err("found orphan node ino %lu, type %d", |
844 | key_type(c, &zbr->key)); | 847 | (unsigned long)inum, key_type(c, &zbr->key)); |
845 | ci->last_ino = inum; | 848 | ci->last_ino = inum; |
846 | ci->tot_inos += 1; | 849 | ci->tot_inos += 1; |
847 | err = ubifs_tnc_read_node(c, zbr, ci->node); | 850 | err = ubifs_tnc_read_node(c, zbr, ci->node); |
@@ -853,7 +856,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
853 | /* Must be recorded as an orphan */ | 856 | /* Must be recorded as an orphan */ |
854 | if (!dbg_find_check_orphan(&ci->root, inum) && | 857 | if (!dbg_find_check_orphan(&ci->root, inum) && |
855 | !dbg_find_orphan(c, inum)) { | 858 | !dbg_find_orphan(c, inum)) { |
856 | ubifs_err("missing orphan, ino %lu", inum); | 859 | ubifs_err("missing orphan, ino %lu", |
860 | (unsigned long)inum); | ||
857 | ci->missing += 1; | 861 | ci->missing += 1; |
858 | } | 862 | } |
859 | } | 863 | } |
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 77d26c141cf6..90acac603e63 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -168,12 +168,12 @@ static int write_rcvrd_mst_node(struct ubifs_info *c, | |||
168 | struct ubifs_mst_node *mst) | 168 | struct ubifs_mst_node *mst) |
169 | { | 169 | { |
170 | int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; | 170 | int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; |
171 | uint32_t save_flags; | 171 | __le32 save_flags; |
172 | 172 | ||
173 | dbg_rcvry("recovery"); | 173 | dbg_rcvry("recovery"); |
174 | 174 | ||
175 | save_flags = mst->flags; | 175 | save_flags = mst->flags; |
176 | mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY); | 176 | mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); |
177 | 177 | ||
178 | ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); | 178 | ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); |
179 | err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); | 179 | err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); |
@@ -1435,13 +1435,13 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) | |||
1435 | err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); | 1435 | err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); |
1436 | if (err) | 1436 | if (err) |
1437 | goto out; | 1437 | goto out; |
1438 | dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs, | 1438 | dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", |
1439 | i_size, e->d_size); | 1439 | (unsigned long)e->inum, lnum, offs, i_size, e->d_size); |
1440 | return 0; | 1440 | return 0; |
1441 | 1441 | ||
1442 | out: | 1442 | out: |
1443 | ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", | 1443 | ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", |
1444 | e->inum, e->i_size, e->d_size, err); | 1444 | (unsigned long)e->inum, e->i_size, e->d_size, err); |
1445 | return err; | 1445 | return err; |
1446 | } | 1446 | } |
1447 | 1447 | ||
@@ -1472,7 +1472,8 @@ int ubifs_recover_size(struct ubifs_info *c) | |||
1472 | return err; | 1472 | return err; |
1473 | if (err == -ENOENT) { | 1473 | if (err == -ENOENT) { |
1474 | /* Remove data nodes that have no inode */ | 1474 | /* Remove data nodes that have no inode */ |
1475 | dbg_rcvry("removing ino %lu", e->inum); | 1475 | dbg_rcvry("removing ino %lu", |
1476 | (unsigned long)e->inum); | ||
1476 | err = ubifs_tnc_remove_ino(c, e->inum); | 1477 | err = ubifs_tnc_remove_ino(c, e->inum); |
1477 | if (err) | 1478 | if (err) |
1478 | return err; | 1479 | return err; |
@@ -1493,8 +1494,8 @@ int ubifs_recover_size(struct ubifs_info *c) | |||
1493 | return PTR_ERR(inode); | 1494 | return PTR_ERR(inode); |
1494 | if (inode->i_size < e->d_size) { | 1495 | if (inode->i_size < e->d_size) { |
1495 | dbg_rcvry("ino %lu size %lld -> %lld", | 1496 | dbg_rcvry("ino %lu size %lld -> %lld", |
1496 | e->inum, e->d_size, | 1497 | (unsigned long)e->inum, |
1497 | inode->i_size); | 1498 | e->d_size, inode->i_size); |
1498 | inode->i_size = e->d_size; | 1499 | inode->i_size = e->d_size; |
1499 | ubifs_inode(inode)->ui_size = e->d_size; | 1500 | ubifs_inode(inode)->ui_size = e->d_size; |
1500 | e->inode = inode; | 1501 | e->inode = inode; |
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 7399692af859..21f7d047c306 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c | |||
@@ -1065,7 +1065,7 @@ int ubifs_replay_journal(struct ubifs_info *c) | |||
1065 | ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); | 1065 | ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); |
1066 | dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " | 1066 | dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " |
1067 | "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, | 1067 | "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, |
1068 | c->highest_inum); | 1068 | (unsigned long)c->highest_inum); |
1069 | out: | 1069 | out: |
1070 | destroy_replay_tree(c); | 1070 | destroy_replay_tree(c); |
1071 | destroy_bud_list(c); | 1071 | destroy_bud_list(c); |
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 2bf753b38889..0f392351dc5a 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c | |||
@@ -81,6 +81,7 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
81 | int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; | 81 | int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; |
82 | int min_leb_cnt = UBIFS_MIN_LEB_CNT; | 82 | int min_leb_cnt = UBIFS_MIN_LEB_CNT; |
83 | uint64_t tmp64, main_bytes; | 83 | uint64_t tmp64, main_bytes; |
84 | __le64 tmp_le64; | ||
84 | 85 | ||
85 | /* Some functions called from here depend on the @c->key_len filed */ | 86 | /* Some functions called from here depend on the @c->key_len filed */ |
86 | c->key_len = UBIFS_SK_LEN; | 87 | c->key_len = UBIFS_SK_LEN; |
@@ -295,10 +296,10 @@ static int create_default_filesystem(struct ubifs_info *c) | |||
295 | ino->ch.node_type = UBIFS_INO_NODE; | 296 | ino->ch.node_type = UBIFS_INO_NODE; |
296 | ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); | 297 | ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); |
297 | ino->nlink = cpu_to_le32(2); | 298 | ino->nlink = cpu_to_le32(2); |
298 | tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); | 299 | tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); |
299 | ino->atime_sec = tmp; | 300 | ino->atime_sec = tmp_le64; |
300 | ino->ctime_sec = tmp; | 301 | ino->ctime_sec = tmp_le64; |
301 | ino->mtime_sec = tmp; | 302 | ino->mtime_sec = tmp_le64; |
302 | ino->atime_nsec = 0; | 303 | ino->atime_nsec = 0; |
303 | ino->ctime_nsec = 0; | 304 | ino->ctime_nsec = 0; |
304 | ino->mtime_nsec = 0; | 305 | ino->mtime_nsec = 0; |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8780efbf40ac..d80b2aef42b6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -36,6 +36,12 @@ | |||
36 | #include <linux/mount.h> | 36 | #include <linux/mount.h> |
37 | #include "ubifs.h" | 37 | #include "ubifs.h" |
38 | 38 | ||
39 | /* | ||
40 | * Maximum amount of memory we may 'kmalloc()' without worrying that we are | ||
41 | * allocating too much. | ||
42 | */ | ||
43 | #define UBIFS_KMALLOC_OK (128*1024) | ||
44 | |||
39 | /* Slab cache for UBIFS inodes */ | 45 | /* Slab cache for UBIFS inodes */ |
40 | struct kmem_cache *ubifs_inode_slab; | 46 | struct kmem_cache *ubifs_inode_slab; |
41 | 47 | ||
@@ -561,18 +567,11 @@ static int init_constants_early(struct ubifs_info *c) | |||
561 | * calculations when reporting free space. | 567 | * calculations when reporting free space. |
562 | */ | 568 | */ |
563 | c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; | 569 | c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; |
564 | /* Buffer size for bulk-reads */ | ||
565 | c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; | ||
566 | if (c->bulk_read_buf_size > c->leb_size) | ||
567 | c->bulk_read_buf_size = c->leb_size; | ||
568 | if (c->bulk_read_buf_size > 128 * 1024) { | ||
569 | /* Check if we can kmalloc more than 128KiB */ | ||
570 | void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); | ||
571 | 570 | ||
572 | kfree(try); | 571 | /* Buffer size for bulk-reads */ |
573 | if (!try) | 572 | c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; |
574 | c->bulk_read_buf_size = 128 * 1024; | 573 | if (c->max_bu_buf_len > c->leb_size) |
575 | } | 574 | c->max_bu_buf_len = c->leb_size; |
576 | return 0; | 575 | return 0; |
577 | } | 576 | } |
578 | 577 | ||
@@ -992,6 +991,34 @@ static void destroy_journal(struct ubifs_info *c) | |||
992 | } | 991 | } |
993 | 992 | ||
994 | /** | 993 | /** |
994 | * bu_init - initialize bulk-read information. | ||
995 | * @c: UBIFS file-system description object | ||
996 | */ | ||
997 | static void bu_init(struct ubifs_info *c) | ||
998 | { | ||
999 | ubifs_assert(c->bulk_read == 1); | ||
1000 | |||
1001 | if (c->bu.buf) | ||
1002 | return; /* Already initialized */ | ||
1003 | |||
1004 | again: | ||
1005 | c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); | ||
1006 | if (!c->bu.buf) { | ||
1007 | if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { | ||
1008 | c->max_bu_buf_len = UBIFS_KMALLOC_OK; | ||
1009 | goto again; | ||
1010 | } | ||
1011 | |||
1012 | /* Just disable bulk-read */ | ||
1013 | ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " | ||
1014 | "disabling it", c->max_bu_buf_len); | ||
1015 | c->mount_opts.bulk_read = 1; | ||
1016 | c->bulk_read = 0; | ||
1017 | return; | ||
1018 | } | ||
1019 | } | ||
1020 | |||
1021 | /** | ||
995 | * mount_ubifs - mount UBIFS file-system. | 1022 | * mount_ubifs - mount UBIFS file-system. |
996 | * @c: UBIFS file-system description object | 1023 | * @c: UBIFS file-system description object |
997 | * | 1024 | * |
@@ -1059,6 +1086,13 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1059 | goto out_free; | 1086 | goto out_free; |
1060 | } | 1087 | } |
1061 | 1088 | ||
1089 | if (c->bulk_read == 1) | ||
1090 | bu_init(c); | ||
1091 | |||
1092 | /* | ||
1093 | * We have to check all CRCs, even for data nodes, when we mount the FS | ||
1094 | * (specifically, when we are replaying). | ||
1095 | */ | ||
1062 | c->always_chk_crc = 1; | 1096 | c->always_chk_crc = 1; |
1063 | 1097 | ||
1064 | err = ubifs_read_superblock(c); | 1098 | err = ubifs_read_superblock(c); |
@@ -1289,6 +1323,7 @@ out_cbuf: | |||
1289 | out_dereg: | 1323 | out_dereg: |
1290 | dbg_failure_mode_deregistration(c); | 1324 | dbg_failure_mode_deregistration(c); |
1291 | out_free: | 1325 | out_free: |
1326 | kfree(c->bu.buf); | ||
1292 | vfree(c->ileb_buf); | 1327 | vfree(c->ileb_buf); |
1293 | vfree(c->sbuf); | 1328 | vfree(c->sbuf); |
1294 | kfree(c->bottom_up_buf); | 1329 | kfree(c->bottom_up_buf); |
@@ -1325,10 +1360,11 @@ static void ubifs_umount(struct ubifs_info *c) | |||
1325 | kfree(c->cbuf); | 1360 | kfree(c->cbuf); |
1326 | kfree(c->rcvrd_mst_node); | 1361 | kfree(c->rcvrd_mst_node); |
1327 | kfree(c->mst_node); | 1362 | kfree(c->mst_node); |
1363 | kfree(c->bu.buf); | ||
1364 | vfree(c->ileb_buf); | ||
1328 | vfree(c->sbuf); | 1365 | vfree(c->sbuf); |
1329 | kfree(c->bottom_up_buf); | 1366 | kfree(c->bottom_up_buf); |
1330 | UBIFS_DBG(vfree(c->dbg_buf)); | 1367 | UBIFS_DBG(vfree(c->dbg_buf)); |
1331 | vfree(c->ileb_buf); | ||
1332 | dbg_failure_mode_deregistration(c); | 1368 | dbg_failure_mode_deregistration(c); |
1333 | } | 1369 | } |
1334 | 1370 | ||
@@ -1626,6 +1662,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
1626 | ubifs_err("invalid or unknown remount parameter"); | 1662 | ubifs_err("invalid or unknown remount parameter"); |
1627 | return err; | 1663 | return err; |
1628 | } | 1664 | } |
1665 | |||
1629 | if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { | 1666 | if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { |
1630 | err = ubifs_remount_rw(c); | 1667 | err = ubifs_remount_rw(c); |
1631 | if (err) | 1668 | if (err) |
@@ -1633,6 +1670,14 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
1633 | } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) | 1670 | } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) |
1634 | ubifs_remount_ro(c); | 1671 | ubifs_remount_ro(c); |
1635 | 1672 | ||
1673 | if (c->bulk_read == 1) | ||
1674 | bu_init(c); | ||
1675 | else { | ||
1676 | dbg_gen("disable bulk-read"); | ||
1677 | kfree(c->bu.buf); | ||
1678 | c->bu.buf = NULL; | ||
1679 | } | ||
1680 | |||
1636 | return 0; | 1681 | return 0; |
1637 | } | 1682 | } |
1638 | 1683 | ||
@@ -1723,6 +1768,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) | |||
1723 | mutex_init(&c->log_mutex); | 1768 | mutex_init(&c->log_mutex); |
1724 | mutex_init(&c->mst_mutex); | 1769 | mutex_init(&c->mst_mutex); |
1725 | mutex_init(&c->umount_mutex); | 1770 | mutex_init(&c->umount_mutex); |
1771 | mutex_init(&c->bu_mutex); | ||
1726 | init_waitqueue_head(&c->cmt_wq); | 1772 | init_waitqueue_head(&c->cmt_wq); |
1727 | c->buds = RB_ROOT; | 1773 | c->buds = RB_ROOT; |
1728 | c->old_idx = RB_ROOT; | 1774 | c->old_idx = RB_ROOT; |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index d27fd918b9c9..6eef5344a145 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -1501,7 +1501,12 @@ out: | |||
1501 | * @bu: bulk-read parameters and results | 1501 | * @bu: bulk-read parameters and results |
1502 | * | 1502 | * |
1503 | * Lookup consecutive data node keys for the same inode that reside | 1503 | * Lookup consecutive data node keys for the same inode that reside |
1504 | * consecutively in the same LEB. | 1504 | * consecutively in the same LEB. This function returns zero in case of success |
1505 | * and a negative error code in case of failure. | ||
1506 | * | ||
1507 | * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function | ||
1508 | * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares | ||
1509 | * maxumum possible amount of nodes for bulk-read. | ||
1505 | */ | 1510 | */ |
1506 | int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) | 1511 | int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) |
1507 | { | 1512 | { |
@@ -2677,7 +2682,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) | |||
2677 | struct ubifs_dent_node *xent, *pxent = NULL; | 2682 | struct ubifs_dent_node *xent, *pxent = NULL; |
2678 | struct qstr nm = { .name = NULL }; | 2683 | struct qstr nm = { .name = NULL }; |
2679 | 2684 | ||
2680 | dbg_tnc("ino %lu", inum); | 2685 | dbg_tnc("ino %lu", (unsigned long)inum); |
2681 | 2686 | ||
2682 | /* | 2687 | /* |
2683 | * Walk all extended attribute entries and remove them together with | 2688 | * Walk all extended attribute entries and remove them together with |
@@ -2697,7 +2702,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) | |||
2697 | } | 2702 | } |
2698 | 2703 | ||
2699 | xattr_inum = le64_to_cpu(xent->inum); | 2704 | xattr_inum = le64_to_cpu(xent->inum); |
2700 | dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum); | 2705 | dbg_tnc("xent '%s', ino %lu", xent->name, |
2706 | (unsigned long)xattr_inum); | ||
2701 | 2707 | ||
2702 | nm.name = xent->name; | 2708 | nm.name = xent->name; |
2703 | nm.len = le16_to_cpu(xent->nlen); | 2709 | nm.len = le16_to_cpu(xent->nlen); |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a7bd32fa15b9..46b172560a06 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -753,7 +753,7 @@ struct ubifs_znode { | |||
753 | }; | 753 | }; |
754 | 754 | ||
755 | /** | 755 | /** |
756 | * struct bu_info - bulk-read information | 756 | * struct bu_info - bulk-read information. |
757 | * @key: first data node key | 757 | * @key: first data node key |
758 | * @zbranch: zbranches of data nodes to bulk read | 758 | * @zbranch: zbranches of data nodes to bulk read |
759 | * @buf: buffer to read into | 759 | * @buf: buffer to read into |
@@ -969,7 +969,10 @@ struct ubifs_mount_opts { | |||
969 | * @mst_node: master node | 969 | * @mst_node: master node |
970 | * @mst_offs: offset of valid master node | 970 | * @mst_offs: offset of valid master node |
971 | * @mst_mutex: protects the master node area, @mst_node, and @mst_offs | 971 | * @mst_mutex: protects the master node area, @mst_node, and @mst_offs |
972 | * @bulk_read_buf_size: buffer size for bulk-reads | 972 | * |
973 | * @max_bu_buf_len: maximum bulk-read buffer length | ||
974 | * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu | ||
975 | * @bu: pre-allocated bulk-read information | ||
973 | * | 976 | * |
974 | * @log_lebs: number of logical eraseblocks in the log | 977 | * @log_lebs: number of logical eraseblocks in the log |
975 | * @log_bytes: log size in bytes | 978 | * @log_bytes: log size in bytes |
@@ -1217,7 +1220,10 @@ struct ubifs_info { | |||
1217 | struct ubifs_mst_node *mst_node; | 1220 | struct ubifs_mst_node *mst_node; |
1218 | int mst_offs; | 1221 | int mst_offs; |
1219 | struct mutex mst_mutex; | 1222 | struct mutex mst_mutex; |
1220 | int bulk_read_buf_size; | 1223 | |
1224 | int max_bu_buf_len; | ||
1225 | struct mutex bu_mutex; | ||
1226 | struct bu_info bu; | ||
1221 | 1227 | ||
1222 | int log_lebs; | 1228 | int log_lebs; |
1223 | long long log_bytes; | 1229 | long long log_bytes; |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 6e74b117aaf0..30ebde490f7f 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
@@ -106,6 +106,7 @@ void udf_clear_inode(struct inode *inode) | |||
106 | udf_truncate_tail_extent(inode); | 106 | udf_truncate_tail_extent(inode); |
107 | unlock_kernel(); | 107 | unlock_kernel(); |
108 | write_inode_now(inode, 0); | 108 | write_inode_now(inode, 0); |
109 | invalidate_inode_buffers(inode); | ||
109 | } | 110 | } |
110 | iinfo = UDF_I(inode); | 111 | iinfo = UDF_I(inode); |
111 | kfree(iinfo->i_ext.i_data); | 112 | kfree(iinfo->i_ext.i_data); |