aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/enlighten.c5
-rw-r--r--arch/x86/xen/mmu.c9
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/message/fusion/mptlan.c108
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--fs/ext4/mballoc.c1
-rw-r--r--fs/ext4/super.c24
-rw-r--r--fs/jbd/checkpoint.c31
-rw-r--r--fs/jbd2/checkpoint.c32
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--mm/vmalloc.c7
12 files changed, 88 insertions, 146 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b61534c7a4c4..5e4686d70f62 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
863 if (PagePinned(virt_to_page(mm->pgd))) { 863 if (PagePinned(virt_to_page(mm->pgd))) {
864 SetPagePinned(page); 864 SetPagePinned(page);
865 865
866 vm_unmap_aliases();
866 if (!PageHighMem(page)) { 867 if (!PageHighMem(page)) {
867 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 868 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
868 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 869 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
869 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 870 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
870 } else 871 } else {
871 /* make sure there are no stray mappings of 872 /* make sure there are no stray mappings of
872 this page */ 873 this page */
873 kmap_flush_unused(); 874 kmap_flush_unused();
874 vm_unmap_aliases(); 875 }
875 } 876 }
876} 877}
877 878
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index aba77b2b7d18..89f3b6edc65a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
850 read-only, and can be pinned. */ 850 read-only, and can be pinned. */
851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
852{ 852{
853 vm_unmap_aliases();
854
853 xen_mc_batch(); 855 xen_mc_batch();
854 856
855 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { 857 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
856 /* re-enable interrupts for kmap_flush_unused */ 858 /* re-enable interrupts for flushing */
857 xen_mc_issue(0); 859 xen_mc_issue(0);
860
858 kmap_flush_unused(); 861 kmap_flush_unused();
859 vm_unmap_aliases(); 862
860 xen_mc_batch(); 863 xen_mc_batch();
861 } 864 }
862 865
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3e526b6d00cb..8daf4793ac32 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -81,9 +81,9 @@ static void dmi_table(u8 *buf, int len, int num,
81 const struct dmi_header *dm = (const struct dmi_header *)data; 81 const struct dmi_header *dm = (const struct dmi_header *)data;
82 82
83 /* 83 /*
84 * We want to know the total length (formated area and strings) 84 * We want to know the total length (formatted area and
85 * before decoding to make sure we won't run off the table in 85 * strings) before decoding to make sure we won't run off the
86 * dmi_decode or dmi_string 86 * table in dmi_decode or dmi_string
87 */ 87 */
88 data += dm->length; 88 data += dm->length;
89 while ((data - buf < len - 1) && (data[0] || data[1])) 89 while ((data - buf < len - 1) && (data[0] || data[1]))
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index a1abf95cf751..603ffd008c73 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -77,12 +77,6 @@ MODULE_VERSION(my_VERSION);
77 * Fusion MPT LAN private structures 77 * Fusion MPT LAN private structures
78 */ 78 */
79 79
80struct NAA_Hosed {
81 u16 NAA;
82 u8 ieee[FC_ALEN];
83 struct NAA_Hosed *next;
84};
85
86struct BufferControl { 80struct BufferControl {
87 struct sk_buff *skb; 81 struct sk_buff *skb;
88 dma_addr_t dma; 82 dma_addr_t dma;
@@ -159,11 +153,6 @@ static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
159static u32 max_buckets_out = 127; 153static u32 max_buckets_out = 127;
160static u32 tx_max_out_p = 127 - 16; 154static u32 tx_max_out_p = 127 - 16;
161 155
162#ifdef QLOGIC_NAA_WORKAROUND
163static struct NAA_Hosed *mpt_bad_naa = NULL;
164DEFINE_RWLOCK(bad_naa_lock);
165#endif
166
167/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 156/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
168/** 157/**
169 * lan_reply - Handle all data sent from the hardware. 158 * lan_reply - Handle all data sent from the hardware.
@@ -780,30 +769,6 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
780// ctx, skb, skb->data)); 769// ctx, skb, skb->data));
781 770
782 mac = skb_mac_header(skb); 771 mac = skb_mac_header(skb);
783#ifdef QLOGIC_NAA_WORKAROUND
784{
785 struct NAA_Hosed *nh;
786
787 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788 RFC 2625. The longer I look at this, the more my opinion of Qlogic
789 drops. */
790 read_lock_irq(&bad_naa_lock);
791 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792 if ((nh->ieee[0] == mac[0]) &&
793 (nh->ieee[1] == mac[1]) &&
794 (nh->ieee[2] == mac[2]) &&
795 (nh->ieee[3] == mac[3]) &&
796 (nh->ieee[4] == mac[4]) &&
797 (nh->ieee[5] == mac[5])) {
798 cur_naa = nh->NAA;
799 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800 "= %04x.\n", cur_naa));
801 break;
802 }
803 }
804 read_unlock_irq(&bad_naa_lock);
805}
806#endif
807 772
808 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | 773 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
809 (mac[0] << 8) | 774 (mac[0] << 8) |
@@ -1572,79 +1537,6 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1572 1537
1573 fcllc = (struct fcllc *)skb->data; 1538 fcllc = (struct fcllc *)skb->data;
1574 1539
1575#ifdef QLOGIC_NAA_WORKAROUND
1576{
1577 u16 source_naa = fch->stype, found = 0;
1578
1579 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1580 value. */
1581
1582 if ((source_naa & 0xF000) == 0)
1583 source_naa = swab16(source_naa);
1584
1585 if (fcllc->ethertype == htons(ETH_P_ARP))
1586 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1587 "%04x.\n", source_naa));
1588
1589 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1590 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1591 struct NAA_Hosed *nh, *prevnh;
1592 int i;
1593
1594 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1595 "system with non-RFC 2625 NAA value (%04x).\n",
1596 source_naa));
1597
1598 write_lock_irq(&bad_naa_lock);
1599 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1600 prevnh=nh, nh=nh->next) {
1601 if ((nh->ieee[0] == fch->saddr[0]) &&
1602 (nh->ieee[1] == fch->saddr[1]) &&
1603 (nh->ieee[2] == fch->saddr[2]) &&
1604 (nh->ieee[3] == fch->saddr[3]) &&
1605 (nh->ieee[4] == fch->saddr[4]) &&
1606 (nh->ieee[5] == fch->saddr[5])) {
1607 found = 1;
1608 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1609 "q/Rep w/ bad NAA from system already"
1610 " in DB.\n"));
1611 break;
1612 }
1613 }
1614
1615 if ((!found) && (nh == NULL)) {
1616
1617 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1618 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1619 " bad NAA from system not yet in DB.\n"));
1620
1621 if (nh != NULL) {
1622 nh->next = NULL;
1623 if (!mpt_bad_naa)
1624 mpt_bad_naa = nh;
1625 if (prevnh)
1626 prevnh->next = nh;
1627
1628 nh->NAA = source_naa; /* Set the S_NAA value. */
1629 for (i = 0; i < FC_ALEN; i++)
1630 nh->ieee[i] = fch->saddr[i];
1631 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1632 "%02x:%02x with non-compliant S_NAA value.\n",
1633 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1634 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1635 } else {
1636 printk (KERN_ERR "mptlan/type_trans: Unable to"
1637 " kmalloc a NAA_Hosed struct.\n");
1638 }
1639 } else if (!found) {
1640 printk (KERN_ERR "mptlan/type_trans: found not"
1641 " set, but nh isn't null. Evil "
1642 "funkiness abounds.\n");
1643 }
1644 write_unlock_irq(&bad_naa_lock);
1645 }
1646}
1647#endif
1648 1540
1649 /* Strip the SNAP header from ARP packets since we don't 1541 /* Strip the SNAP header from ARP packets since we don't
1650 * pass them through to the 802.2/SNAP layers. 1542 * pass them through to the 802.2/SNAP layers.
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index fe34d74cfb19..2a117e286e54 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -718,6 +718,8 @@ got:
718 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 718 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
719 free = ext4_free_blocks_after_init(sb, group, gdp); 719 free = ext4_free_blocks_after_init(sb, group, gdp);
720 gdp->bg_free_blocks_count = cpu_to_le16(free); 720 gdp->bg_free_blocks_count = cpu_to_le16(free);
721 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
722 gdp);
721 } 723 }
722 spin_unlock(sb_bgl_lock(sbi, group)); 724 spin_unlock(sb_bgl_lock(sbi, group));
723 725
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8dbf6953845b..be21a5ae33cb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2329,6 +2329,8 @@ static int ext4_da_writepage(struct page *page,
2329 unlock_page(page); 2329 unlock_page(page);
2330 return 0; 2330 return 0;
2331 } 2331 }
2332 /* now mark the buffer_heads as dirty and uptodate */
2333 block_commit_write(page, 0, PAGE_CACHE_SIZE);
2332 } 2334 }
2333 2335
2334 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2336 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -4580,9 +4582,10 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
4580static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4582static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4581{ 4583{
4582 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4584 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
4583 return ext4_indirect_trans_blocks(inode, nrblocks, 0); 4585 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
4584 return ext4_ext_index_trans_blocks(inode, nrblocks, 0); 4586 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4585} 4587}
4588
4586/* 4589/*
4587 * Account for index blocks, block groups bitmaps and block group 4590 * Account for index blocks, block groups bitmaps and block group
4588 * descriptor blocks if modify datablocks and index blocks 4591 * descriptor blocks if modify datablocks and index blocks
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index dfe17a134052..444ad998f72e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4441,6 +4441,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4441 else if (block >= (entry->start_blk + entry->count)) 4441 else if (block >= (entry->start_blk + entry->count))
4442 n = &(*n)->rb_right; 4442 n = &(*n)->rb_right;
4443 else { 4443 else {
4444 ext4_unlock_group(sb, group);
4444 ext4_error(sb, __func__, 4445 ext4_error(sb, __func__,
4445 "Double free of blocks %d (%d %d)\n", 4446 "Double free of blocks %d (%d %d)\n",
4446 block, entry->start_blk, entry->count); 4447 block, entry->start_blk, entry->count);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 994859df010e..e4a241c65dbe 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1458,9 +1458,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
1458 1458
1459 /* We allocate both existing and potentially added groups */ 1459 /* We allocate both existing and potentially added groups */
1460 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + 1460 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
1461 ((sbi->s_es->s_reserved_gdt_blocks +1 ) << 1461 ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
1462 EXT4_DESC_PER_BLOCK_BITS(sb))) / 1462 EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
1463 groups_per_flex;
1464 sbi->s_flex_groups = kzalloc(flex_group_count * 1463 sbi->s_flex_groups = kzalloc(flex_group_count *
1465 sizeof(struct flex_groups), GFP_KERNEL); 1464 sizeof(struct flex_groups), GFP_KERNEL);
1466 if (sbi->s_flex_groups == NULL) { 1465 if (sbi->s_flex_groups == NULL) {
@@ -2885,12 +2884,9 @@ int ext4_force_commit(struct super_block *sb)
2885/* 2884/*
2886 * Ext4 always journals updates to the superblock itself, so we don't 2885 * Ext4 always journals updates to the superblock itself, so we don't
2887 * have to propagate any other updates to the superblock on disk at this 2886 * have to propagate any other updates to the superblock on disk at this
2888 * point. Just start an async writeback to get the buffers on their way 2887 * point. (We can probably nuke this function altogether, and remove
2889 * to the disk. 2888 * any mention to sb->s_dirt in all of fs/ext4; eventual cleanup...)
2890 *
2891 * This implicitly triggers the writebehind on sync().
2892 */ 2889 */
2893
2894static void ext4_write_super(struct super_block *sb) 2890static void ext4_write_super(struct super_block *sb)
2895{ 2891{
2896 if (mutex_trylock(&sb->s_lock) != 0) 2892 if (mutex_trylock(&sb->s_lock) != 0)
@@ -2900,15 +2896,15 @@ static void ext4_write_super(struct super_block *sb)
2900 2896
2901static int ext4_sync_fs(struct super_block *sb, int wait) 2897static int ext4_sync_fs(struct super_block *sb, int wait)
2902{ 2898{
2903 tid_t target; 2899 int ret = 0;
2904 2900
2905 trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait); 2901 trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
2906 sb->s_dirt = 0; 2902 sb->s_dirt = 0;
2907 if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) { 2903 if (wait)
2908 if (wait) 2904 ret = ext4_force_commit(sb);
2909 jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target); 2905 else
2910 } 2906 jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
2911 return 0; 2907 return ret;
2912} 2908}
2913 2909
2914/* 2910/*
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 1bd8d4acc6f2..61f32f3868cd 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -115,7 +115,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
115 */ 115 */
116void __log_wait_for_space(journal_t *journal) 116void __log_wait_for_space(journal_t *journal)
117{ 117{
118 int nblocks; 118 int nblocks, space_left;
119 assert_spin_locked(&journal->j_state_lock); 119 assert_spin_locked(&journal->j_state_lock);
120 120
121 nblocks = jbd_space_needed(journal); 121 nblocks = jbd_space_needed(journal);
@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
128 /* 128 /*
129 * Test again, another process may have checkpointed while we 129 * Test again, another process may have checkpointed while we
130 * were waiting for the checkpoint lock. If there are no 130 * were waiting for the checkpoint lock. If there are no
131 * outstanding transactions there is nothing to checkpoint and 131 * transactions ready to be checkpointed, try to recover
132 * we can't make progress. Abort the journal in this case. 132 * journal space by calling cleanup_journal_tail(), and if
133 * that doesn't work, by waiting for the currently committing
134 * transaction to complete. If there is absolutely no way
135 * to make progress, this is either a BUG or corrupted
136 * filesystem, so abort the journal and leave a stack
137 * trace for forensic evidence.
133 */ 138 */
134 spin_lock(&journal->j_state_lock); 139 spin_lock(&journal->j_state_lock);
135 spin_lock(&journal->j_list_lock); 140 spin_lock(&journal->j_list_lock);
136 nblocks = jbd_space_needed(journal); 141 nblocks = jbd_space_needed(journal);
137 if (__log_space_left(journal) < nblocks) { 142 space_left = __log_space_left(journal);
143 if (space_left < nblocks) {
138 int chkpt = journal->j_checkpoint_transactions != NULL; 144 int chkpt = journal->j_checkpoint_transactions != NULL;
145 tid_t tid = 0;
139 146
147 if (journal->j_committing_transaction)
148 tid = journal->j_committing_transaction->t_tid;
140 spin_unlock(&journal->j_list_lock); 149 spin_unlock(&journal->j_list_lock);
141 spin_unlock(&journal->j_state_lock); 150 spin_unlock(&journal->j_state_lock);
142 if (chkpt) { 151 if (chkpt) {
143 log_do_checkpoint(journal); 152 log_do_checkpoint(journal);
153 } else if (cleanup_journal_tail(journal) == 0) {
154 /* We were able to recover space; yay! */
155 ;
156 } else if (tid) {
157 log_wait_commit(journal, tid);
144 } else { 158 } else {
145 printk(KERN_ERR "%s: no transactions\n", 159 printk(KERN_ERR "%s: needed %d blocks and "
146 __func__); 160 "only had %d space available\n",
161 __func__, nblocks, space_left);
162 printk(KERN_ERR "%s: no way to get more "
163 "journal space\n", __func__);
164 WARN_ON(1);
147 journal_abort(journal, 0); 165 journal_abort(journal, 0);
148 } 166 }
149
150 spin_lock(&journal->j_state_lock); 167 spin_lock(&journal->j_state_lock);
151 } else { 168 } else {
152 spin_unlock(&journal->j_list_lock); 169 spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9203c3332f17..9497718fe920 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -116,7 +116,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
116 */ 116 */
117void __jbd2_log_wait_for_space(journal_t *journal) 117void __jbd2_log_wait_for_space(journal_t *journal)
118{ 118{
119 int nblocks; 119 int nblocks, space_left;
120 assert_spin_locked(&journal->j_state_lock); 120 assert_spin_locked(&journal->j_state_lock);
121 121
122 nblocks = jbd_space_needed(journal); 122 nblocks = jbd_space_needed(journal);
@@ -129,25 +129,43 @@ void __jbd2_log_wait_for_space(journal_t *journal)
129 /* 129 /*
130 * Test again, another process may have checkpointed while we 130 * Test again, another process may have checkpointed while we
131 * were waiting for the checkpoint lock. If there are no 131 * were waiting for the checkpoint lock. If there are no
132 * outstanding transactions there is nothing to checkpoint and 132 * transactions ready to be checkpointed, try to recover
133 * we can't make progress. Abort the journal in this case. 133 * journal space by calling cleanup_journal_tail(), and if
134 * that doesn't work, by waiting for the currently committing
135 * transaction to complete. If there is absolutely no way
136 * to make progress, this is either a BUG or corrupted
137 * filesystem, so abort the journal and leave a stack
138 * trace for forensic evidence.
134 */ 139 */
135 spin_lock(&journal->j_state_lock); 140 spin_lock(&journal->j_state_lock);
136 spin_lock(&journal->j_list_lock); 141 spin_lock(&journal->j_list_lock);
137 nblocks = jbd_space_needed(journal); 142 nblocks = jbd_space_needed(journal);
138 if (__jbd2_log_space_left(journal) < nblocks) { 143 space_left = __jbd2_log_space_left(journal);
144 if (space_left < nblocks) {
139 int chkpt = journal->j_checkpoint_transactions != NULL; 145 int chkpt = journal->j_checkpoint_transactions != NULL;
146 tid_t tid = 0;
140 147
148 if (journal->j_committing_transaction)
149 tid = journal->j_committing_transaction->t_tid;
141 spin_unlock(&journal->j_list_lock); 150 spin_unlock(&journal->j_list_lock);
142 spin_unlock(&journal->j_state_lock); 151 spin_unlock(&journal->j_state_lock);
143 if (chkpt) { 152 if (chkpt) {
144 jbd2_log_do_checkpoint(journal); 153 jbd2_log_do_checkpoint(journal);
154 } else if (jbd2_cleanup_journal_tail(journal) == 0) {
155 /* We were able to recover space; yay! */
156 ;
157 } else if (tid) {
158 jbd2_log_wait_commit(journal, tid);
145 } else { 159 } else {
146 printk(KERN_ERR "%s: no transactions\n", 160 printk(KERN_ERR "%s: needed %d blocks and "
147 __func__); 161 "only had %d space available\n",
162 __func__, nblocks, space_left);
163 printk(KERN_ERR "%s: no way to get more "
164 "journal space in %s\n", __func__,
165 journal->j_devname);
166 WARN_ON(1);
148 jbd2_journal_abort(journal, 0); 167 jbd2_journal_abort(journal, 0);
149 } 168 }
150
151 spin_lock(&journal->j_state_lock); 169 spin_lock(&journal->j_state_lock);
152 } else { 170 } else {
153 spin_unlock(&journal->j_list_lock); 171 spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 783de118de92..e70d657a19f8 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1089,6 +1089,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1089 if (!journal->j_wbuf) { 1089 if (!journal->j_wbuf) {
1090 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 1090 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
1091 __func__); 1091 __func__);
1092 jbd2_stats_proc_exit(journal);
1092 kfree(journal); 1093 kfree(journal);
1093 return NULL; 1094 return NULL;
1094 } 1095 }
@@ -1098,6 +1099,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1098 if (err) { 1099 if (err) {
1099 printk(KERN_ERR "%s: Cannnot locate journal superblock\n", 1100 printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
1100 __func__); 1101 __func__);
1102 jbd2_stats_proc_exit(journal);
1101 kfree(journal); 1103 kfree(journal);
1102 return NULL; 1104 return NULL;
1103 } 1105 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 66fad3fc02b1..ba6b0f5f7fac 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
592 592
593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
594 594
595static bool vmap_initialized __read_mostly = false;
596
595struct vmap_block_queue { 597struct vmap_block_queue {
596 spinlock_t lock; 598 spinlock_t lock;
597 struct list_head free; 599 struct list_head free;
@@ -828,6 +830,9 @@ void vm_unmap_aliases(void)
828 int cpu; 830 int cpu;
829 int flush = 0; 831 int flush = 0;
830 832
833 if (unlikely(!vmap_initialized))
834 return;
835
831 for_each_possible_cpu(cpu) { 836 for_each_possible_cpu(cpu) {
832 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 837 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
833 struct vmap_block *vb; 838 struct vmap_block *vb;
@@ -942,6 +947,8 @@ void __init vmalloc_init(void)
942 INIT_LIST_HEAD(&vbq->dirty); 947 INIT_LIST_HEAD(&vbq->dirty);
943 vbq->nr_dirty = 0; 948 vbq->nr_dirty = 0;
944 } 949 }
950
951 vmap_initialized = true;
945} 952}
946 953
947void unmap_kernel_range(unsigned long addr, unsigned long size) 954void unmap_kernel_range(unsigned long addr, unsigned long size)