aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/Locking43
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--drivers/char/mxser.c2
-rw-r--r--drivers/char/nozomi.c2
-rw-r--r--drivers/char/synclink_gt.c72
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/i2c/busses/Kconfig9
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-designware.c624
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bnx2.c10
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c37
-rw-r--r--drivers/net/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/pci/intel-iommu.c314
-rw-r--r--drivers/pci/intr_remapping.c160
-rw-r--r--drivers/pci/intr_remapping.h2
-rw-r--r--drivers/serial/8250_pci.c6
-rw-r--r--drivers/serial/icom.c3
-rw-r--r--drivers/serial/jsm/jsm_tty.c4
-rw-r--r--drivers/serial/serial_txx9.c113
-rw-r--r--fs/btrfs/acl.c44
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/inode.c16
-rw-r--r--fs/compat_ioctl.c48
-rw-r--r--fs/devpts/inode.c10
-rw-r--r--fs/ext2/acl.c81
-rw-r--r--fs/ext2/acl.h4
-rw-r--r--fs/ext2/ext2.h4
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext2/super.c16
-rw-r--r--fs/ext3/acl.c85
-rw-r--r--fs/ext3/acl.h4
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext3/super.c16
-rw-r--r--fs/ext4/acl.c67
-rw-r--r--fs/ext4/acl.h4
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/super.c16
-rw-r--r--fs/fs-writeback.c100
-rw-r--r--fs/inode.c27
-rw-r--r--fs/ioctl.c35
-rw-r--r--fs/jffs2/acl.c88
-rw-r--r--fs/jffs2/acl.h4
-rw-r--r--fs/jffs2/jffs2_fs_i.h4
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jffs2/readinode.c1
-rw-r--r--fs/jfs/acl.c42
-rw-r--r--fs/jfs/jfs_incore.h6
-rw-r--r--fs/jfs/super.c16
-rw-r--r--fs/jfs/xattr.c10
-rw-r--r--fs/namei.c11
-rw-r--r--fs/namespace.c37
-rw-r--r--fs/nilfs2/inode.c8
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nilfs2/super.c10
-rw-r--r--fs/ocfs2/dlmglue.c123
-rw-r--r--fs/ocfs2/dlmglue.h24
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/inode.c11
-rw-r--r--fs/ocfs2/journal.c43
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/namei.c15
-rw-r--r--fs/ocfs2/ocfs2.h10
-rw-r--r--fs/ocfs2/stack_o2cb.c11
-rw-r--r--fs/ocfs2/stack_user.c8
-rw-r--r--fs/ocfs2/stackglue.c13
-rw-r--r--fs/ocfs2/stackglue.h6
-rw-r--r--fs/ocfs2/suballoc.c28
-rw-r--r--fs/ocfs2/super.c69
-rw-r--r--fs/ocfs2/sysfile.c19
-rw-r--r--fs/open.c58
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/resize.c1
-rw-r--r--fs/reiserfs/super.c24
-rw-r--r--fs/reiserfs/xattr_acl.c58
-rw-r--r--fs/super.c9
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/udf/balloc.c9
-rw-r--r--fs/udf/lowlevel.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c73
-rw-r--r--fs/xfs/xfs_acl.h4
-rw-r--r--fs/xfs/xfs_iget.c2
-rw-r--r--fs/xfs/xfs_inode.h5
-rw-r--r--include/linux/dmar.h11
-rw-r--r--include/linux/ext3_fs_i.h4
-rw-r--r--include/linux/falloc.h21
-rw-r--r--include/linux/fs.h13
-rw-r--r--include/linux/icmpv6.h6
-rw-r--r--include/linux/lockdep.h15
-rw-r--r--include/linux/posix_acl.h64
-rw-r--r--include/linux/reiserfs_acl.h17
-rw-r--r--include/linux/reiserfs_fs_i.h4
-rw-r--r--include/linux/shmem_fs.h8
-rw-r--r--include/net/protocol.h2
-rw-r--r--include/net/rawv6.h2
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h2
-rw-r--r--mm/shmem.c9
-rw-r--r--mm/shmem_acl.c29
-rw-r--r--net/ax25/ax25_in.c3
-rw-r--r--net/core/dev.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/ipv4/route.c26
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_tunnel.c18
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/tunnel6.c2
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/irda/ircomm/ircomm_lmp.c1
-rw-r--r--net/netfilter/nf_conntrack_core.c25
-rw-r--r--net/netfilter/nf_log.c16
-rw-r--r--net/netfilter/xt_NFQUEUE.c8
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/netfilter/xt_quota.c1
-rw-r--r--net/netfilter/xt_rateest.c2
-rw-r--r--net/sctp/ipv6.c2
131 files changed, 2102 insertions, 1181 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 229d7b7c50a3..18b9d0ca0630 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -109,27 +109,28 @@ prototypes:
109 109
110locking rules: 110locking rules:
111 All may block. 111 All may block.
112 BKL s_lock s_umount 112 None have BKL
113alloc_inode: no no no 113 s_umount
114destroy_inode: no 114alloc_inode:
115dirty_inode: no (must not sleep) 115destroy_inode:
116write_inode: no 116dirty_inode: (must not sleep)
117drop_inode: no !!!inode_lock!!! 117write_inode:
118delete_inode: no 118drop_inode: !!!inode_lock!!!
119put_super: yes yes no 119delete_inode:
120write_super: no yes read 120put_super: write
121sync_fs: no no read 121write_super: read
122freeze_fs: ? 122sync_fs: read
123unfreeze_fs: ? 123freeze_fs: read
124statfs: no no no 124unfreeze_fs: read
125remount_fs: yes yes maybe (see below) 125statfs: no
126clear_inode: no 126remount_fs: maybe (see below)
127umount_begin: yes no no 127clear_inode:
128show_options: no (vfsmount->sem) 128umount_begin: no
129quota_read: no no no (see below) 129show_options: no (namespace_sem)
130quota_write: no no no (see below) 130quota_read: no (see below)
131 131quota_write: no (see below)
132->remount_fs() will have the s_umount lock if it's already mounted. 132
133->remount_fs() will have the s_umount exclusive lock if it's already mounted.
133When called from get_sb_single, it does NOT have the s_umount lock. 134When called from get_sb_single, it does NOT have the s_umount lock.
134->quota_read() and ->quota_write() functions are both guaranteed to 135->quota_read() and ->quota_write() functions are both guaranteed to
135be the only ones operating on the quota file by the quota code (via 136be the only ones operating on the quota file by the quota code (via
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b7a79207295e..4d0216fcb36c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1414,6 +1414,9 @@ int setup_ioapic_entry(int apic_id, int irq,
1414 irte.vector = vector; 1414 irte.vector = vector;
1415 irte.dest_id = IRTE_DEST(destination); 1415 irte.dest_id = IRTE_DEST(destination);
1416 1416
1417 /* Set source-id of interrupt request */
1418 set_ioapic_sid(&irte, apic_id);
1419
1417 modify_irte(irq, &irte); 1420 modify_irte(irq, &irte);
1418 1421
1419 ir_entry->index2 = (index >> 15) & 0x1; 1422 ir_entry->index2 = (index >> 15) & 0x1;
@@ -3290,6 +3293,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3290 irte.vector = cfg->vector; 3293 irte.vector = cfg->vector;
3291 irte.dest_id = IRTE_DEST(dest); 3294 irte.dest_id = IRTE_DEST(dest);
3292 3295
3296 /* Set source-id of interrupt request */
3297 set_msi_sid(&irte, pdev);
3298
3293 modify_irte(irq, &irte); 3299 modify_irte(irq, &irte);
3294 3300
3295 msg->address_hi = MSI_ADDR_BASE_HI; 3301 msg->address_hi = MSI_ADDR_BASE_HI;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 9533f43a30bb..52d953eb30c3 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1048,8 +1048,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
1048 if (retval) 1048 if (retval)
1049 return retval; 1049 return retval;
1050 1050
1051 /* unmark here for very high baud rate (ex. 921600 bps) used */
1052 tty->low_latency = 1;
1053 return 0; 1051 return 0;
1054} 1052}
1055 1053
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d6102b644b55..574f1c79b6e6 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1591,8 +1591,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
1591 1591
1592 /* Enable interrupt downlink for channel */ 1592 /* Enable interrupt downlink for channel */
1593 if (port->port.count == 1) { 1593 if (port->port.count == 1) {
1594 /* FIXME: is this needed now ? */
1595 tty->low_latency = 1;
1596 tty->driver_data = port; 1594 tty->driver_data = port;
1597 tty_port_tty_set(&port->port, tty); 1595 tty_port_tty_set(&port->port, tty);
1598 DBG1("open: %d", port->token_dl); 1596 DBG1("open: %d", port->token_dl);
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1386625fc4ca..a2e67e6df3a1 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -467,7 +467,6 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
467static unsigned int tbuf_bytes(struct slgt_info *info); 467static unsigned int tbuf_bytes(struct slgt_info *info);
468static void reset_tbufs(struct slgt_info *info); 468static void reset_tbufs(struct slgt_info *info);
469static void tdma_reset(struct slgt_info *info); 469static void tdma_reset(struct slgt_info *info);
470static void tdma_start(struct slgt_info *info);
471static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); 470static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
472 471
473static void get_signals(struct slgt_info *info); 472static void get_signals(struct slgt_info *info);
@@ -795,6 +794,18 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
795 } 794 }
796} 795}
797 796
797static void update_tx_timer(struct slgt_info *info)
798{
799 /*
800 * use worst case speed of 1200bps to calculate transmit timeout
801 * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
802 */
803 if (info->params.mode == MGSL_MODE_HDLC) {
804 int timeout = (tbuf_bytes(info) * 7) + 1000;
805 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
806 }
807}
808
798static int write(struct tty_struct *tty, 809static int write(struct tty_struct *tty,
799 const unsigned char *buf, int count) 810 const unsigned char *buf, int count)
800{ 811{
@@ -838,8 +849,18 @@ start:
838 spin_lock_irqsave(&info->lock,flags); 849 spin_lock_irqsave(&info->lock,flags);
839 if (!info->tx_active) 850 if (!info->tx_active)
840 tx_start(info); 851 tx_start(info);
841 else 852 else if (!(rd_reg32(info, TDCSR) & BIT0)) {
842 tdma_start(info); 853 /* transmit still active but transmit DMA stopped */
854 unsigned int i = info->tbuf_current;
855 if (!i)
856 i = info->tbuf_count;
857 i--;
858 /* if DMA buf unsent must try later after tx idle */
859 if (desc_count(info->tbufs[i]))
860 ret = 0;
861 }
862 if (ret > 0)
863 update_tx_timer(info);
843 spin_unlock_irqrestore(&info->lock,flags); 864 spin_unlock_irqrestore(&info->lock,flags);
844 } 865 }
845 866
@@ -1502,10 +1523,9 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
1502 /* save start time for transmit timeout detection */ 1523 /* save start time for transmit timeout detection */
1503 dev->trans_start = jiffies; 1524 dev->trans_start = jiffies;
1504 1525
1505 /* start hardware transmitter if necessary */
1506 spin_lock_irqsave(&info->lock,flags); 1526 spin_lock_irqsave(&info->lock,flags);
1507 if (!info->tx_active) 1527 tx_start(info);
1508 tx_start(info); 1528 update_tx_timer(info);
1509 spin_unlock_irqrestore(&info->lock,flags); 1529 spin_unlock_irqrestore(&info->lock,flags);
1510 1530
1511 return 0; 1531 return 0;
@@ -3946,50 +3966,19 @@ static void tx_start(struct slgt_info *info)
3946 slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE); 3966 slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
3947 /* clear tx idle and underrun status bits */ 3967 /* clear tx idle and underrun status bits */
3948 wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER)); 3968 wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
3949 if (info->params.mode == MGSL_MODE_HDLC)
3950 mod_timer(&info->tx_timer, jiffies +
3951 msecs_to_jiffies(5000));
3952 } else { 3969 } else {
3953 slgt_irq_off(info, IRQ_TXDATA); 3970 slgt_irq_off(info, IRQ_TXDATA);
3954 slgt_irq_on(info, IRQ_TXIDLE); 3971 slgt_irq_on(info, IRQ_TXIDLE);
3955 /* clear tx idle status bit */ 3972 /* clear tx idle status bit */
3956 wr_reg16(info, SSR, IRQ_TXIDLE); 3973 wr_reg16(info, SSR, IRQ_TXIDLE);
3957 } 3974 }
3958 tdma_start(info); 3975 /* set 1st descriptor address and start DMA */
3976 wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
3977 wr_reg32(info, TDCSR, BIT2 + BIT0);
3959 info->tx_active = true; 3978 info->tx_active = true;
3960 } 3979 }
3961} 3980}
3962 3981
3963/*
3964 * start transmit DMA if inactive and there are unsent buffers
3965 */
3966static void tdma_start(struct slgt_info *info)
3967{
3968 unsigned int i;
3969
3970 if (rd_reg32(info, TDCSR) & BIT0)
3971 return;
3972
3973 /* transmit DMA inactive, check for unsent buffers */
3974 i = info->tbuf_start;
3975 while (!desc_count(info->tbufs[i])) {
3976 if (++i == info->tbuf_count)
3977 i = 0;
3978 if (i == info->tbuf_current)
3979 return;
3980 }
3981 info->tbuf_start = i;
3982
3983 /* there are unsent buffers, start transmit DMA */
3984
3985 /* reset needed if previous error condition */
3986 tdma_reset(info);
3987
3988 /* set 1st descriptor address */
3989 wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
3990 wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */
3991}
3992
3993static void tx_stop(struct slgt_info *info) 3982static void tx_stop(struct slgt_info *info)
3994{ 3983{
3995 unsigned short val; 3984 unsigned short val;
@@ -5004,8 +4993,7 @@ static void tx_timeout(unsigned long context)
5004 info->icount.txtimeout++; 4993 info->icount.txtimeout++;
5005 } 4994 }
5006 spin_lock_irqsave(&info->lock,flags); 4995 spin_lock_irqsave(&info->lock,flags);
5007 info->tx_active = false; 4996 tx_stop(info);
5008 info->tx_count = 0;
5009 spin_unlock_irqrestore(&info->lock,flags); 4997 spin_unlock_irqrestore(&info->lock,flags);
5010 4998
5011#if SYNCLINK_GENERIC_HDLC 4999#if SYNCLINK_GENERIC_HDLC
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 62dadfc95e34..4e862a75f7ff 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -193,7 +193,7 @@ int tty_port_block_til_ready(struct tty_port *port,
193{ 193{
194 int do_clocal = 0, retval; 194 int do_clocal = 0, retval;
195 unsigned long flags; 195 unsigned long flags;
196 DECLARE_WAITQUEUE(wait, current); 196 DEFINE_WAIT(wait);
197 int cd; 197 int cd;
198 198
199 /* block if port is in the process of being closed */ 199 /* block if port is in the process of being closed */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3c259ee7ddda..aa87b6a3bbef 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -326,6 +326,15 @@ config I2C_DAVINCI
326 devices such as DaVinci NIC. 326 devices such as DaVinci NIC.
327 For details please see http://www.ti.com/davinci 327 For details please see http://www.ti.com/davinci
328 328
329config I2C_DESIGNWARE
330 tristate "Synopsys DesignWare"
331 help
332 If you say yes to this option, support will be included for the
333 Synopsys DesignWare I2C adapter. Only master mode is supported.
334
335 This driver can also be built as a module. If so, the module
336 will be called i2c-designware.
337
329config I2C_GPIO 338config I2C_GPIO
330 tristate "GPIO-based bitbanging I2C" 339 tristate "GPIO-based bitbanging I2C"
331 depends on GENERIC_GPIO 340 depends on GENERIC_GPIO
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index edeabf003106..e654263bfc01 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
30obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o 30obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
31obj-$(CONFIG_I2C_CPM) += i2c-cpm.o 31obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
32obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o 32obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
33obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o
33obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o 34obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
34obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o 35obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
35obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o 36obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c
new file mode 100644
index 000000000000..b444762e9b9f
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware.c
@@ -0,0 +1,624 @@
1/*
2 * Synopsys Designware I2C adapter driver (master only).
3 *
4 * Based on the TI DAVINCI I2C adapter driver.
5 *
6 * Copyright (C) 2006 Texas Instruments.
7 * Copyright (C) 2007 MontaVista Software Inc.
8 * Copyright (C) 2009 Provigent Ltd.
9 *
10 * ----------------------------------------------------------------------------
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ----------------------------------------------------------------------------
26 *
27 */
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/delay.h>
31#include <linux/i2c.h>
32#include <linux/clk.h>
33#include <linux/errno.h>
34#include <linux/sched.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/platform_device.h>
38#include <linux/io.h>
39
40/*
41 * Registers offset
42 */
43#define DW_IC_CON 0x0
44#define DW_IC_TAR 0x4
45#define DW_IC_DATA_CMD 0x10
46#define DW_IC_SS_SCL_HCNT 0x14
47#define DW_IC_SS_SCL_LCNT 0x18
48#define DW_IC_FS_SCL_HCNT 0x1c
49#define DW_IC_FS_SCL_LCNT 0x20
50#define DW_IC_INTR_STAT 0x2c
51#define DW_IC_INTR_MASK 0x30
52#define DW_IC_CLR_INTR 0x40
53#define DW_IC_ENABLE 0x6c
54#define DW_IC_STATUS 0x70
55#define DW_IC_TXFLR 0x74
56#define DW_IC_RXFLR 0x78
57#define DW_IC_COMP_PARAM_1 0xf4
58#define DW_IC_TX_ABRT_SOURCE 0x80
59
60#define DW_IC_CON_MASTER 0x1
61#define DW_IC_CON_SPEED_STD 0x2
62#define DW_IC_CON_SPEED_FAST 0x4
63#define DW_IC_CON_10BITADDR_MASTER 0x10
64#define DW_IC_CON_RESTART_EN 0x20
65#define DW_IC_CON_SLAVE_DISABLE 0x40
66
67#define DW_IC_INTR_TX_EMPTY 0x10
68#define DW_IC_INTR_TX_ABRT 0x40
69#define DW_IC_INTR_STOP_DET 0x200
70
71#define DW_IC_STATUS_ACTIVITY 0x1
72
73#define DW_IC_ERR_TX_ABRT 0x1
74
75/*
76 * status codes
77 */
78#define STATUS_IDLE 0x0
79#define STATUS_WRITE_IN_PROGRESS 0x1
80#define STATUS_READ_IN_PROGRESS 0x2
81
82#define TIMEOUT 20 /* ms */
83
84/*
85 * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
86 *
87 * only expected abort codes are listed here
88 * refer to the datasheet for the full list
89 */
90#define ABRT_7B_ADDR_NOACK 0
91#define ABRT_10ADDR1_NOACK 1
92#define ABRT_10ADDR2_NOACK 2
93#define ABRT_TXDATA_NOACK 3
94#define ABRT_GCALL_NOACK 4
95#define ABRT_GCALL_READ 5
96#define ABRT_SBYTE_ACKDET 7
97#define ABRT_SBYTE_NORSTRT 9
98#define ABRT_10B_RD_NORSTRT 10
99#define ARB_MASTER_DIS 11
100#define ARB_LOST 12
101
102static char *abort_sources[] = {
103 [ABRT_7B_ADDR_NOACK] =
104 "slave address not acknowledged (7bit mode)",
105 [ABRT_10ADDR1_NOACK] =
106 "first address byte not acknowledged (10bit mode)",
107 [ABRT_10ADDR2_NOACK] =
108 "second address byte not acknowledged (10bit mode)",
109 [ABRT_TXDATA_NOACK] =
110 "data not acknowledged",
111 [ABRT_GCALL_NOACK] =
112 "no acknowledgement for a general call",
113 [ABRT_GCALL_READ] =
114 "read after general call",
115 [ABRT_SBYTE_ACKDET] =
116 "start byte acknowledged",
117 [ABRT_SBYTE_NORSTRT] =
118 "trying to send start byte when restart is disabled",
119 [ABRT_10B_RD_NORSTRT] =
120 "trying to read when restart is disabled (10bit mode)",
121 [ARB_MASTER_DIS] =
122 "trying to use disabled adapter",
123 [ARB_LOST] =
124 "lost arbitration",
125};
126
127/**
128 * struct dw_i2c_dev - private i2c-designware data
129 * @dev: driver model device node
130 * @base: IO registers pointer
131 * @cmd_complete: tx completion indicator
132 * @pump_msg: continue in progress transfers
133 * @lock: protect this struct and IO registers
134 * @clk: input reference clock
135 * @cmd_err: run time hadware error code
136 * @msgs: points to an array of messages currently being transfered
137 * @msgs_num: the number of elements in msgs
138 * @msg_write_idx: the element index of the current tx message in the msgs
139 * array
140 * @tx_buf_len: the length of the current tx buffer
141 * @tx_buf: the current tx buffer
142 * @msg_read_idx: the element index of the current rx message in the msgs
143 * array
144 * @rx_buf_len: the length of the current rx buffer
145 * @rx_buf: the current rx buffer
146 * @msg_err: error status of the current transfer
147 * @status: i2c master status, one of STATUS_*
148 * @abort_source: copy of the TX_ABRT_SOURCE register
149 * @irq: interrupt number for the i2c master
150 * @adapter: i2c subsystem adapter node
151 * @tx_fifo_depth: depth of the hardware tx fifo
152 * @rx_fifo_depth: depth of the hardware rx fifo
153 */
154struct dw_i2c_dev {
155 struct device *dev;
156 void __iomem *base;
157 struct completion cmd_complete;
158 struct tasklet_struct pump_msg;
159 struct mutex lock;
160 struct clk *clk;
161 int cmd_err;
162 struct i2c_msg *msgs;
163 int msgs_num;
164 int msg_write_idx;
165 u16 tx_buf_len;
166 u8 *tx_buf;
167 int msg_read_idx;
168 u16 rx_buf_len;
169 u8 *rx_buf;
170 int msg_err;
171 unsigned int status;
172 u16 abort_source;
173 int irq;
174 struct i2c_adapter adapter;
175 unsigned int tx_fifo_depth;
176 unsigned int rx_fifo_depth;
177};
178
179/**
180 * i2c_dw_init() - initialize the designware i2c master hardware
181 * @dev: device private data
182 *
183 * This functions configures and enables the I2C master.
184 * This function is called during I2C init function, and in case of timeout at
185 * run time.
186 */
187static void i2c_dw_init(struct dw_i2c_dev *dev)
188{
189 u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
190 u16 ic_con;
191
192 /* Disable the adapter */
193 writeb(0, dev->base + DW_IC_ENABLE);
194
195 /* set standard and fast speed deviders for high/low periods */
196 writew((input_clock_khz * 40 / 10000)+1, /* std speed high, 4us */
197 dev->base + DW_IC_SS_SCL_HCNT);
198 writew((input_clock_khz * 47 / 10000)+1, /* std speed low, 4.7us */
199 dev->base + DW_IC_SS_SCL_LCNT);
200 writew((input_clock_khz * 6 / 10000)+1, /* fast speed high, 0.6us */
201 dev->base + DW_IC_FS_SCL_HCNT);
202 writew((input_clock_khz * 13 / 10000)+1, /* fast speed low, 1.3us */
203 dev->base + DW_IC_FS_SCL_LCNT);
204
205 /* configure the i2c master */
206 ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
207 DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
208 writew(ic_con, dev->base + DW_IC_CON);
209}
210
211/*
212 * Waiting for bus not busy
213 */
214static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
215{
216 int timeout = TIMEOUT;
217
218 while (readb(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
219 if (timeout <= 0) {
220 dev_warn(dev->dev, "timeout waiting for bus ready\n");
221 return -ETIMEDOUT;
222 }
223 timeout--;
224 mdelay(1);
225 }
226
227 return 0;
228}
229
230/*
231 * Initiate low level master read/write transaction.
232 * This function is called from i2c_dw_xfer when starting a transfer.
233 * This function is also called from dw_i2c_pump_msg to continue a transfer
234 * that is longer than the size of the TX FIFO.
235 */
236static void
237i2c_dw_xfer_msg(struct i2c_adapter *adap)
238{
239 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
240 struct i2c_msg *msgs = dev->msgs;
241 int num = dev->msgs_num;
242 u16 ic_con, intr_mask;
243 int tx_limit = dev->tx_fifo_depth - readb(dev->base + DW_IC_TXFLR);
244 int rx_limit = dev->rx_fifo_depth - readb(dev->base + DW_IC_RXFLR);
245 u16 addr = msgs[dev->msg_write_idx].addr;
246 u16 buf_len = dev->tx_buf_len;
247
248 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
249 /* Disable the adapter */
250 writeb(0, dev->base + DW_IC_ENABLE);
251
252 /* set the slave (target) address */
253 writew(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR);
254
255 /* if the slave address is ten bit address, enable 10BITADDR */
256 ic_con = readw(dev->base + DW_IC_CON);
257 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
258 ic_con |= DW_IC_CON_10BITADDR_MASTER;
259 else
260 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
261 writew(ic_con, dev->base + DW_IC_CON);
262
263 /* Enable the adapter */
264 writeb(1, dev->base + DW_IC_ENABLE);
265 }
266
267 for (; dev->msg_write_idx < num; dev->msg_write_idx++) {
268 /* if target address has changed, we need to
269 * reprogram the target address in the i2c
270 * adapter when we are done with this transfer
271 */
272 if (msgs[dev->msg_write_idx].addr != addr)
273 return;
274
275 if (msgs[dev->msg_write_idx].len == 0) {
276 dev_err(dev->dev,
277 "%s: invalid message length\n", __func__);
278 dev->msg_err = -EINVAL;
279 return;
280 }
281
282 if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
283 /* new i2c_msg */
284 dev->tx_buf = msgs[dev->msg_write_idx].buf;
285 buf_len = msgs[dev->msg_write_idx].len;
286 }
287
288 while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
289 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
290 writew(0x100, dev->base + DW_IC_DATA_CMD);
291 rx_limit--;
292 } else
293 writew(*(dev->tx_buf++),
294 dev->base + DW_IC_DATA_CMD);
295 tx_limit--; buf_len--;
296 }
297 }
298
299 intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
300 if (buf_len > 0) { /* more bytes to be written */
301 intr_mask |= DW_IC_INTR_TX_EMPTY;
302 dev->status |= STATUS_WRITE_IN_PROGRESS;
303 } else
304 dev->status &= ~STATUS_WRITE_IN_PROGRESS;
305 writew(intr_mask, dev->base + DW_IC_INTR_MASK);
306
307 dev->tx_buf_len = buf_len;
308}
309
310static void
311i2c_dw_read(struct i2c_adapter *adap)
312{
313 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
314 struct i2c_msg *msgs = dev->msgs;
315 int num = dev->msgs_num;
316 u16 addr = msgs[dev->msg_read_idx].addr;
317 int rx_valid = readw(dev->base + DW_IC_RXFLR);
318
319 for (; dev->msg_read_idx < num; dev->msg_read_idx++) {
320 u16 len;
321 u8 *buf;
322
323 if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
324 continue;
325
326 /* different i2c client, reprogram the i2c adapter */
327 if (msgs[dev->msg_read_idx].addr != addr)
328 return;
329
330 if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
331 len = msgs[dev->msg_read_idx].len;
332 buf = msgs[dev->msg_read_idx].buf;
333 } else {
334 len = dev->rx_buf_len;
335 buf = dev->rx_buf;
336 }
337
338 for (; len > 0 && rx_valid > 0; len--, rx_valid--)
339 *buf++ = readb(dev->base + DW_IC_DATA_CMD);
340
341 if (len > 0) {
342 dev->status |= STATUS_READ_IN_PROGRESS;
343 dev->rx_buf_len = len;
344 dev->rx_buf = buf;
345 return;
346 } else
347 dev->status &= ~STATUS_READ_IN_PROGRESS;
348 }
349}
350
351/*
352 * Prepare controller for a transaction and call i2c_dw_xfer_msg
353 */
354static int
355i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
356{
357 struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
358 int ret;
359
360 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
361
362 mutex_lock(&dev->lock);
363
364 INIT_COMPLETION(dev->cmd_complete);
365 dev->msgs = msgs;
366 dev->msgs_num = num;
367 dev->cmd_err = 0;
368 dev->msg_write_idx = 0;
369 dev->msg_read_idx = 0;
370 dev->msg_err = 0;
371 dev->status = STATUS_IDLE;
372
373 ret = i2c_dw_wait_bus_not_busy(dev);
374 if (ret < 0)
375 goto done;
376
377 /* start the transfers */
378 i2c_dw_xfer_msg(adap);
379
380 /* wait for tx to complete */
381 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
382 if (ret == 0) {
383 dev_err(dev->dev, "controller timed out\n");
384 i2c_dw_init(dev);
385 ret = -ETIMEDOUT;
386 goto done;
387 } else if (ret < 0)
388 goto done;
389
390 if (dev->msg_err) {
391 ret = dev->msg_err;
392 goto done;
393 }
394
395 /* no error */
396 if (likely(!dev->cmd_err)) {
397 /* read rx fifo, and disable the adapter */
398 do {
399 i2c_dw_read(adap);
400 } while (dev->status & STATUS_READ_IN_PROGRESS);
401 writeb(0, dev->base + DW_IC_ENABLE);
402 ret = num;
403 goto done;
404 }
405
406 /* We have an error */
407 if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
408 unsigned long abort_source = dev->abort_source;
409 int i;
410
411 for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) {
412 dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
413 }
414 }
415 ret = -EIO;
416
417done:
418 mutex_unlock(&dev->lock);
419
420 return ret;
421}
422
423static u32 i2c_dw_func(struct i2c_adapter *adap)
424{
425 return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
426}
427
428static void dw_i2c_pump_msg(unsigned long data)
429{
430 struct dw_i2c_dev *dev = (struct dw_i2c_dev *) data;
431 u16 intr_mask;
432
433 i2c_dw_read(&dev->adapter);
434 i2c_dw_xfer_msg(&dev->adapter);
435
436 intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
437 if (dev->status & STATUS_WRITE_IN_PROGRESS)
438 intr_mask |= DW_IC_INTR_TX_EMPTY;
439 writew(intr_mask, dev->base + DW_IC_INTR_MASK);
440}
441
442/*
443 * Interrupt service routine. This gets called whenever an I2C interrupt
444 * occurs.
445 */
446static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
447{
448 struct dw_i2c_dev *dev = dev_id;
449 u16 stat;
450
451 stat = readw(dev->base + DW_IC_INTR_STAT);
452 dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
453 if (stat & DW_IC_INTR_TX_ABRT) {
454 dev->abort_source = readw(dev->base + DW_IC_TX_ABRT_SOURCE);
455 dev->cmd_err |= DW_IC_ERR_TX_ABRT;
456 dev->status = STATUS_IDLE;
457 } else if (stat & DW_IC_INTR_TX_EMPTY)
458 tasklet_schedule(&dev->pump_msg);
459
460 readb(dev->base + DW_IC_CLR_INTR); /* clear interrupts */
461 writew(0, dev->base + DW_IC_INTR_MASK); /* disable interrupts */
462 if (stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
463 complete(&dev->cmd_complete);
464
465 return IRQ_HANDLED;
466}
467
468static struct i2c_algorithm i2c_dw_algo = {
469 .master_xfer = i2c_dw_xfer,
470 .functionality = i2c_dw_func,
471};
472
473static int __devinit dw_i2c_probe(struct platform_device *pdev)
474{
475 struct dw_i2c_dev *dev;
476 struct i2c_adapter *adap;
477 struct resource *mem, *irq, *ioarea;
478 int r;
479
480 /* NOTE: driver uses the static register mapping */
481 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
482 if (!mem) {
483 dev_err(&pdev->dev, "no mem resource?\n");
484 return -EINVAL;
485 }
486
487 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
488 if (!irq) {
489 dev_err(&pdev->dev, "no irq resource?\n");
490 return -EINVAL;
491 }
492
493 ioarea = request_mem_region(mem->start, resource_size(mem),
494 pdev->name);
495 if (!ioarea) {
496 dev_err(&pdev->dev, "I2C region already claimed\n");
497 return -EBUSY;
498 }
499
500 dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
501 if (!dev) {
502 r = -ENOMEM;
503 goto err_release_region;
504 }
505
506 init_completion(&dev->cmd_complete);
507 tasklet_init(&dev->pump_msg, dw_i2c_pump_msg, (unsigned long) dev);
508 mutex_init(&dev->lock);
509 dev->dev = get_device(&pdev->dev);
510 dev->irq = irq->start;
511 platform_set_drvdata(pdev, dev);
512
513 dev->clk = clk_get(&pdev->dev, NULL);
514 if (IS_ERR(dev->clk)) {
515 r = -ENODEV;
516 goto err_free_mem;
517 }
518 clk_enable(dev->clk);
519
520 dev->base = ioremap(mem->start, resource_size(mem));
521 if (dev->base == NULL) {
522 dev_err(&pdev->dev, "failure mapping io resources\n");
523 r = -EBUSY;
524 goto err_unuse_clocks;
525 }
526 {
527 u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1);
528
529 dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
530 dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
531 }
532 i2c_dw_init(dev);
533
534 writew(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */
535 r = request_irq(dev->irq, i2c_dw_isr, 0, pdev->name, dev);
536 if (r) {
537 dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
538 goto err_iounmap;
539 }
540
541 adap = &dev->adapter;
542 i2c_set_adapdata(adap, dev);
543 adap->owner = THIS_MODULE;
544 adap->class = I2C_CLASS_HWMON;
545 strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
546 sizeof(adap->name));
547 adap->algo = &i2c_dw_algo;
548 adap->dev.parent = &pdev->dev;
549
550 adap->nr = pdev->id;
551 r = i2c_add_numbered_adapter(adap);
552 if (r) {
553 dev_err(&pdev->dev, "failure adding adapter\n");
554 goto err_free_irq;
555 }
556
557 return 0;
558
559err_free_irq:
560 free_irq(dev->irq, dev);
561err_iounmap:
562 iounmap(dev->base);
563err_unuse_clocks:
564 clk_disable(dev->clk);
565 clk_put(dev->clk);
566 dev->clk = NULL;
567err_free_mem:
568 platform_set_drvdata(pdev, NULL);
569 put_device(&pdev->dev);
570 kfree(dev);
571err_release_region:
572 release_mem_region(mem->start, resource_size(mem));
573
574 return r;
575}
576
577static int __devexit dw_i2c_remove(struct platform_device *pdev)
578{
579 struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
580 struct resource *mem;
581
582 platform_set_drvdata(pdev, NULL);
583 i2c_del_adapter(&dev->adapter);
584 put_device(&pdev->dev);
585
586 clk_disable(dev->clk);
587 clk_put(dev->clk);
588 dev->clk = NULL;
589
590 writeb(0, dev->base + DW_IC_ENABLE);
591 free_irq(dev->irq, dev);
592 kfree(dev);
593
594 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
595 release_mem_region(mem->start, resource_size(mem));
596 return 0;
597}
598
599/* work with hotplug and coldplug */
600MODULE_ALIAS("platform:i2c_designware");
601
602static struct platform_driver dw_i2c_driver = {
603 .remove = __devexit_p(dw_i2c_remove),
604 .driver = {
605 .name = "i2c_designware",
606 .owner = THIS_MODULE,
607 },
608};
609
610static int __init dw_i2c_init_driver(void)
611{
612 return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
613}
614module_init(dw_i2c_init_driver);
615
616static void __exit dw_i2c_exit_driver(void)
617{
618 platform_driver_unregister(&dw_i2c_driver);
619}
620module_exit(dw_i2c_exit_driver);
621
622MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
623MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
624MODULE_LICENSE("GPL");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1dc721517e4c..c155bd3ec9f1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1725,6 +1725,7 @@ config TLAN
1725 1725
1726config KS8842 1726config KS8842
1727 tristate "Micrel KSZ8842" 1727 tristate "Micrel KSZ8842"
1728 depends on HAS_IOMEM
1728 help 1729 help
1729 This platform driver is for Micrel KSZ8842 chip. 1730 This platform driver is for Micrel KSZ8842 chip.
1730 1731
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 38f1c3375d7f..b70cc99962fc 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev)
6825 return 0; 6825 return 0;
6826} 6826}
6827 6827
6828static u32
6829bnx2_get_link(struct net_device *dev)
6830{
6831 struct bnx2 *bp = netdev_priv(dev);
6832
6833 return bp->link_up;
6834}
6835
6828static int 6836static int
6829bnx2_get_eeprom_len(struct net_device *dev) 6837bnx2_get_eeprom_len(struct net_device *dev)
6830{ 6838{
@@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
7392 .get_wol = bnx2_get_wol, 7400 .get_wol = bnx2_get_wol,
7393 .set_wol = bnx2_set_wol, 7401 .set_wol = bnx2_set_wol,
7394 .nway_reset = bnx2_nway_reset, 7402 .nway_reset = bnx2_nway_reset,
7395 .get_link = ethtool_op_get_link, 7403 .get_link = bnx2_get_link,
7396 .get_eeprom_len = bnx2_get_eeprom_len, 7404 .get_eeprom_len = bnx2_get_eeprom_len,
7397 .get_eeprom = bnx2_get_eeprom, 7405 .get_eeprom = bnx2_get_eeprom,
7398 .set_eeprom = bnx2_set_eeprom, 7406 .set_eeprom = bnx2_set_eeprom,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5e18812bf49..33821a81cbf8 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING
36 If unsure, say Y. 36 If unsure, say Y.
37 37
38config CAN_SJA1000 38config CAN_SJA1000
39 depends on CAN_DEV 39 depends on CAN_DEV && HAS_IOMEM
40 tristate "Philips SJA1000" 40 tristate "Philips SJA1000"
41 ---help--- 41 ---help---
42 Driver for the SJA1000 CAN controllers from Philips or NXP 42 Driver for the SJA1000 CAN controllers from Philips or NXP
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index bdb143d2b5c7..055bb61d6e77 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
944 u32 val = 0; 944 u32 val = 0;
945 int retries = 60; 945 int retries = 60;
946 946
947 if (!pegtune_val) { 947 if (pegtune_val)
948 do { 948 return 0;
949 val = NXRD32(adapter, CRB_CMDPEG_STATE);
950 949
951 if (val == PHAN_INITIALIZE_COMPLETE || 950 do {
952 val == PHAN_INITIALIZE_ACK) 951 val = NXRD32(adapter, CRB_CMDPEG_STATE);
953 return 0;
954 952
955 msleep(500); 953 switch (val) {
954 case PHAN_INITIALIZE_COMPLETE:
955 case PHAN_INITIALIZE_ACK:
956 return 0;
957 case PHAN_INITIALIZE_FAILED:
958 goto out_err;
959 default:
960 break;
961 }
956 962
957 } while (--retries); 963 msleep(500);
958 964
959 if (!retries) { 965 } while (--retries);
960 pegtune_val = NXRD32(adapter,
961 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
962 printk(KERN_WARNING "netxen_phantom_init: init failed, "
963 "pegtune_val=%x\n", pegtune_val);
964 return -1;
965 }
966 }
967 966
968 return 0; 967 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
968
969out_err:
970 dev_warn(&adapter->pdev->dev, "firmware init failed\n");
971 return -EIO;
969} 972}
970 973
971static int 974static int
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 71daa3d5f114..2919a2d12bf4 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
705 first_driver = (adapter->ahw.pci_func == 0); 705 first_driver = (adapter->ahw.pci_func == 0);
706 706
707 if (!first_driver) 707 if (!first_driver)
708 return 0; 708 goto wait_init;
709 709
710 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); 710 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
711 711
@@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
752 | (_NETXEN_NIC_LINUX_SUBVERSION); 752 | (_NETXEN_NIC_LINUX_SUBVERSION);
753 NXWR32(adapter, CRB_DRIVER_VERSION, val); 753 NXWR32(adapter, CRB_DRIVER_VERSION, val);
754 754
755wait_init:
755 /* Handshake with the card before we register the devices. */ 756 /* Handshake with the card before we register the devices. */
756 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 757 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
757 if (err) { 758 if (err) {
@@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1178 free_netdev(netdev); 1179 free_netdev(netdev);
1179} 1180}
1180 1181
1182#ifdef CONFIG_PM
1181static int 1183static int
1182netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) 1184netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1183{ 1185{
@@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev)
1242 1244
1243 return 0; 1245 return 0;
1244} 1246}
1247#endif
1245 1248
1246static int netxen_nic_open(struct net_device *netdev) 1249static int netxen_nic_open(struct net_device *netdev)
1247{ 1250{
@@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = {
1771 .id_table = netxen_pci_tbl, 1774 .id_table = netxen_pci_tbl,
1772 .probe = netxen_nic_probe, 1775 .probe = netxen_nic_probe,
1773 .remove = __devexit_p(netxen_nic_remove), 1776 .remove = __devexit_p(netxen_nic_remove),
1777#ifdef CONFIG_PM
1774 .suspend = netxen_nic_suspend, 1778 .suspend = netxen_nic_suspend,
1775 .resume = netxen_nic_resume 1779 .resume = netxen_nic_resume
1780#endif
1776}; 1781};
1777 1782
1778/* Driver Registration on NetXen card */ 1783/* Driver Registration on NetXen card */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index bbc6d4d3cc94..3e4b67aaa6ea 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3142 (void __iomem *)port_regs; 3142 (void __iomem *)port_regs;
3143 u32 delay = 10; 3143 u32 delay = 10;
3144 int status = 0; 3144 int status = 0;
3145 unsigned long hw_flags = 0;
3145 3146
3146 if(ql_mii_setup(qdev)) 3147 if(ql_mii_setup(qdev))
3147 return -1; 3148 return -1;
@@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3150 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 3151 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3151 (ISP_SERIAL_PORT_IF_WE | 3152 (ISP_SERIAL_PORT_IF_WE |
3152 (ISP_SERIAL_PORT_IF_WE << 16))); 3153 (ISP_SERIAL_PORT_IF_WE << 16)));
3153 3154 /* Give the PHY time to come out of reset. */
3155 mdelay(100);
3154 qdev->port_link_state = LS_DOWN; 3156 qdev->port_link_state = LS_DOWN;
3155 netif_carrier_off(qdev->ndev); 3157 netif_carrier_off(qdev->ndev);
3156 3158
@@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3350 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3352 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3351 if (value & PORT_STATUS_IC) 3353 if (value & PORT_STATUS_IC)
3352 break; 3354 break;
3355 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3353 msleep(500); 3356 msleep(500);
3357 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3354 } while (--delay); 3358 } while (--delay);
3355 3359
3356 if (delay == 0) { 3360 if (delay == 0) {
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 178853a07440..e53eacd75c8d 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,6 +39,7 @@
39#include <linux/sysdev.h> 39#include <linux/sysdev.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#include <asm/iommu.h> 41#include <asm/iommu.h>
42#include <asm/e820.h>
42#include "pci.h" 43#include "pci.h"
43 44
44#define ROOT_SIZE VTD_PAGE_SIZE 45#define ROOT_SIZE VTD_PAGE_SIZE
@@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte)
217 return (pte->val & 3) != 0; 218 return (pte->val & 3) != 0;
218} 219}
219 220
221/*
222 * This domain is a statically identity mapping domain.
223 * 1. This domain creats a static 1:1 mapping to all usable memory.
224 * 2. It maps to each iommu if successful.
225 * 3. Each iommu mapps to this domain if successful.
226 */
227struct dmar_domain *si_domain;
228
220/* devices under the same p2p bridge are owned in one domain */ 229/* devices under the same p2p bridge are owned in one domain */
221#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) 230#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
222 231
@@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte)
225 */ 234 */
226#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) 235#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
227 236
237/* si_domain contains mulitple devices */
238#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
239
228struct dmar_domain { 240struct dmar_domain {
229 int id; /* domain id */ 241 int id; /* domain id */
230 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ 242 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
@@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
435 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); 447 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
436} 448}
437 449
438/* in native case, each domain is related to only one iommu */ 450/* This functionin only returns single iommu in a domain */
439static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) 451static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
440{ 452{
441 int iommu_id; 453 int iommu_id;
442 454
455 /* si_domain and vm domain should not get here. */
443 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); 456 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
457 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
444 458
445 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 459 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
446 if (iommu_id < 0 || iommu_id >= g_num_of_iommus) 460 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
@@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1189 free_context_table(iommu); 1203 free_context_table(iommu);
1190} 1204}
1191 1205
1192static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) 1206static struct dmar_domain *alloc_domain(void)
1193{ 1207{
1194 unsigned long num;
1195 unsigned long ndomains;
1196 struct dmar_domain *domain; 1208 struct dmar_domain *domain;
1197 unsigned long flags;
1198 1209
1199 domain = alloc_domain_mem(); 1210 domain = alloc_domain_mem();
1200 if (!domain) 1211 if (!domain)
1201 return NULL; 1212 return NULL;
1202 1213
1214 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1215 domain->flags = 0;
1216
1217 return domain;
1218}
1219
1220static int iommu_attach_domain(struct dmar_domain *domain,
1221 struct intel_iommu *iommu)
1222{
1223 int num;
1224 unsigned long ndomains;
1225 unsigned long flags;
1226
1203 ndomains = cap_ndoms(iommu->cap); 1227 ndomains = cap_ndoms(iommu->cap);
1204 1228
1205 spin_lock_irqsave(&iommu->lock, flags); 1229 spin_lock_irqsave(&iommu->lock, flags);
1230
1206 num = find_first_zero_bit(iommu->domain_ids, ndomains); 1231 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1207 if (num >= ndomains) { 1232 if (num >= ndomains) {
1208 spin_unlock_irqrestore(&iommu->lock, flags); 1233 spin_unlock_irqrestore(&iommu->lock, flags);
1209 free_domain_mem(domain);
1210 printk(KERN_ERR "IOMMU: no free domain ids\n"); 1234 printk(KERN_ERR "IOMMU: no free domain ids\n");
1211 return NULL; 1235 return -ENOMEM;
1212 } 1236 }
1213 1237
1214 set_bit(num, iommu->domain_ids);
1215 domain->id = num; 1238 domain->id = num;
1216 memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); 1239 set_bit(num, iommu->domain_ids);
1217 set_bit(iommu->seq_id, &domain->iommu_bmp); 1240 set_bit(iommu->seq_id, &domain->iommu_bmp);
1218 domain->flags = 0;
1219 iommu->domains[num] = domain; 1241 iommu->domains[num] = domain;
1220 spin_unlock_irqrestore(&iommu->lock, flags); 1242 spin_unlock_irqrestore(&iommu->lock, flags);
1221 1243
1222 return domain; 1244 return 0;
1223} 1245}
1224 1246
1225static void iommu_free_domain(struct dmar_domain *domain) 1247static void iommu_detach_domain(struct dmar_domain *domain,
1248 struct intel_iommu *iommu)
1226{ 1249{
1227 unsigned long flags; 1250 unsigned long flags;
1228 struct intel_iommu *iommu; 1251 int num, ndomains;
1229 1252 int found = 0;
1230 iommu = domain_get_iommu(domain);
1231 1253
1232 spin_lock_irqsave(&iommu->lock, flags); 1254 spin_lock_irqsave(&iommu->lock, flags);
1233 clear_bit(domain->id, iommu->domain_ids); 1255 ndomains = cap_ndoms(iommu->cap);
1256 num = find_first_bit(iommu->domain_ids, ndomains);
1257 for (; num < ndomains; ) {
1258 if (iommu->domains[num] == domain) {
1259 found = 1;
1260 break;
1261 }
1262 num = find_next_bit(iommu->domain_ids,
1263 cap_ndoms(iommu->cap), num+1);
1264 }
1265
1266 if (found) {
1267 clear_bit(num, iommu->domain_ids);
1268 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1269 iommu->domains[num] = NULL;
1270 }
1234 spin_unlock_irqrestore(&iommu->lock, flags); 1271 spin_unlock_irqrestore(&iommu->lock, flags);
1235} 1272}
1236 1273
@@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1350 1387
1351static void domain_exit(struct dmar_domain *domain) 1388static void domain_exit(struct dmar_domain *domain)
1352{ 1389{
1390 struct dmar_drhd_unit *drhd;
1391 struct intel_iommu *iommu;
1353 u64 end; 1392 u64 end;
1354 1393
1355 /* Domain 0 is reserved, so dont process it */ 1394 /* Domain 0 is reserved, so dont process it */
@@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain)
1368 /* free page tables */ 1407 /* free page tables */
1369 dma_pte_free_pagetable(domain, 0, end); 1408 dma_pte_free_pagetable(domain, 0, end);
1370 1409
1371 iommu_free_domain(domain); 1410 for_each_active_iommu(iommu, drhd)
1411 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1412 iommu_detach_domain(domain, iommu);
1413
1372 free_domain_mem(domain); 1414 free_domain_mem(domain);
1373} 1415}
1374 1416
@@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1408 id = domain->id; 1450 id = domain->id;
1409 pgd = domain->pgd; 1451 pgd = domain->pgd;
1410 1452
1411 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { 1453 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1454 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1412 int found = 0; 1455 int found = 0;
1413 1456
1414 /* find an available domain id for this device in iommu */ 1457 /* find an available domain id for this device in iommu */
@@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1433 } 1476 }
1434 1477
1435 set_bit(num, iommu->domain_ids); 1478 set_bit(num, iommu->domain_ids);
1479 set_bit(iommu->seq_id, &domain->iommu_bmp);
1436 iommu->domains[num] = domain; 1480 iommu->domains[num] = domain;
1437 id = num; 1481 id = num;
1438 } 1482 }
@@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1675 unsigned long flags; 1719 unsigned long flags;
1676 int bus = 0, devfn = 0; 1720 int bus = 0, devfn = 0;
1677 int segment; 1721 int segment;
1722 int ret;
1678 1723
1679 domain = find_domain(pdev); 1724 domain = find_domain(pdev);
1680 if (domain) 1725 if (domain)
@@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1707 } 1752 }
1708 } 1753 }
1709 1754
1755 domain = alloc_domain();
1756 if (!domain)
1757 goto error;
1758
1710 /* Allocate new domain for the device */ 1759 /* Allocate new domain for the device */
1711 drhd = dmar_find_matched_drhd_unit(pdev); 1760 drhd = dmar_find_matched_drhd_unit(pdev);
1712 if (!drhd) { 1761 if (!drhd) {
@@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1716 } 1765 }
1717 iommu = drhd->iommu; 1766 iommu = drhd->iommu;
1718 1767
1719 domain = iommu_alloc_domain(iommu); 1768 ret = iommu_attach_domain(domain, iommu);
1720 if (!domain) 1769 if (ret) {
1770 domain_exit(domain);
1721 goto error; 1771 goto error;
1772 }
1722 1773
1723 if (domain_init(domain, gaw)) { 1774 if (domain_init(domain, gaw)) {
1724 domain_exit(domain); 1775 domain_exit(domain);
@@ -1792,6 +1843,8 @@ error:
1792 return find_domain(pdev); 1843 return find_domain(pdev);
1793} 1844}
1794 1845
1846static int iommu_identity_mapping;
1847
1795static int iommu_prepare_identity_map(struct pci_dev *pdev, 1848static int iommu_prepare_identity_map(struct pci_dev *pdev,
1796 unsigned long long start, 1849 unsigned long long start,
1797 unsigned long long end) 1850 unsigned long long end)
@@ -1804,8 +1857,11 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1804 printk(KERN_INFO 1857 printk(KERN_INFO
1805 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 1858 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1806 pci_name(pdev), start, end); 1859 pci_name(pdev), start, end);
1807 /* page table init */ 1860 if (iommu_identity_mapping)
1808 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 1861 domain = si_domain;
1862 else
1863 /* page table init */
1864 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1809 if (!domain) 1865 if (!domain)
1810 return -ENOMEM; 1866 return -ENOMEM;
1811 1867
@@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void)
1952 return 0; 2008 return 0;
1953} 2009}
1954 2010
1955static int __init init_dmars(void) 2011static int md_domain_init(struct dmar_domain *domain, int guest_width);
2012static int si_domain_init(void)
2013{
2014 struct dmar_drhd_unit *drhd;
2015 struct intel_iommu *iommu;
2016 int ret = 0;
2017
2018 si_domain = alloc_domain();
2019 if (!si_domain)
2020 return -EFAULT;
2021
2022
2023 for_each_active_iommu(iommu, drhd) {
2024 ret = iommu_attach_domain(si_domain, iommu);
2025 if (ret) {
2026 domain_exit(si_domain);
2027 return -EFAULT;
2028 }
2029 }
2030
2031 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2032 domain_exit(si_domain);
2033 return -EFAULT;
2034 }
2035
2036 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2037
2038 return 0;
2039}
2040
2041static void domain_remove_one_dev_info(struct dmar_domain *domain,
2042 struct pci_dev *pdev);
2043static int identity_mapping(struct pci_dev *pdev)
2044{
2045 struct device_domain_info *info;
2046
2047 if (likely(!iommu_identity_mapping))
2048 return 0;
2049
2050
2051 list_for_each_entry(info, &si_domain->devices, link)
2052 if (info->dev == pdev)
2053 return 1;
2054 return 0;
2055}
2056
2057static int domain_add_dev_info(struct dmar_domain *domain,
2058 struct pci_dev *pdev)
2059{
2060 struct device_domain_info *info;
2061 unsigned long flags;
2062
2063 info = alloc_devinfo_mem();
2064 if (!info)
2065 return -ENOMEM;
2066
2067 info->segment = pci_domain_nr(pdev->bus);
2068 info->bus = pdev->bus->number;
2069 info->devfn = pdev->devfn;
2070 info->dev = pdev;
2071 info->domain = domain;
2072
2073 spin_lock_irqsave(&device_domain_lock, flags);
2074 list_add(&info->link, &domain->devices);
2075 list_add(&info->global, &device_domain_list);
2076 pdev->dev.archdata.iommu = info;
2077 spin_unlock_irqrestore(&device_domain_lock, flags);
2078
2079 return 0;
2080}
2081
2082static int iommu_prepare_static_identity_mapping(void)
2083{
2084 int i;
2085 struct pci_dev *pdev = NULL;
2086 int ret;
2087
2088 ret = si_domain_init();
2089 if (ret)
2090 return -EFAULT;
2091
2092 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2093 for_each_pci_dev(pdev) {
2094 for (i = 0; i < e820.nr_map; i++) {
2095 struct e820entry *ei = &e820.map[i];
2096
2097 if (ei->type == E820_RAM) {
2098 ret = iommu_prepare_identity_map(pdev,
2099 ei->addr, ei->addr + ei->size);
2100 if (ret) {
2101 printk(KERN_INFO "1:1 mapping to one domain failed.\n");
2102 return -EFAULT;
2103 }
2104 }
2105 }
2106 ret = domain_add_dev_info(si_domain, pdev);
2107 if (ret)
2108 return ret;
2109 }
2110
2111 return 0;
2112}
2113
2114int __init init_dmars(void)
1956{ 2115{
1957 struct dmar_drhd_unit *drhd; 2116 struct dmar_drhd_unit *drhd;
1958 struct dmar_rmrr_unit *rmrr; 2117 struct dmar_rmrr_unit *rmrr;
@@ -1962,6 +2121,13 @@ static int __init init_dmars(void)
1962 int pass_through = 1; 2121 int pass_through = 1;
1963 2122
1964 /* 2123 /*
2124 * In case pass through can not be enabled, iommu tries to use identity
2125 * mapping.
2126 */
2127 if (iommu_pass_through)
2128 iommu_identity_mapping = 1;
2129
2130 /*
1965 * for each drhd 2131 * for each drhd
1966 * allocate root 2132 * allocate root
1967 * initialize and program root entry to not present 2133 * initialize and program root entry to not present
@@ -2090,9 +2256,12 @@ static int __init init_dmars(void)
2090 2256
2091 /* 2257 /*
2092 * If pass through is not set or not enabled, setup context entries for 2258 * If pass through is not set or not enabled, setup context entries for
2093 * identity mappings for rmrr, gfx, and isa. 2259 * identity mappings for rmrr, gfx, and isa and may fall back to static
2260 * identity mapping if iommu_identity_mapping is set.
2094 */ 2261 */
2095 if (!iommu_pass_through) { 2262 if (!iommu_pass_through) {
2263 if (iommu_identity_mapping)
2264 iommu_prepare_static_identity_mapping();
2096 /* 2265 /*
2097 * For each rmrr 2266 * For each rmrr
2098 * for each dev attached to rmrr 2267 * for each dev attached to rmrr
@@ -2107,6 +2276,7 @@ static int __init init_dmars(void)
2107 * endfor 2276 * endfor
2108 * endfor 2277 * endfor
2109 */ 2278 */
2279 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2110 for_each_rmrr_units(rmrr) { 2280 for_each_rmrr_units(rmrr) {
2111 for (i = 0; i < rmrr->devices_cnt; i++) { 2281 for (i = 0; i < rmrr->devices_cnt; i++) {
2112 pdev = rmrr->devices[i]; 2282 pdev = rmrr->devices[i];
@@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2248 return domain; 2418 return domain;
2249} 2419}
2250 2420
2421static int iommu_dummy(struct pci_dev *pdev)
2422{
2423 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2424}
2425
2426/* Check if the pdev needs to go through non-identity map and unmap process.*/
2427static int iommu_no_mapping(struct pci_dev *pdev)
2428{
2429 int found;
2430
2431 if (!iommu_identity_mapping)
2432 return iommu_dummy(pdev);
2433
2434 found = identity_mapping(pdev);
2435 if (found) {
2436 if (pdev->dma_mask > DMA_BIT_MASK(32))
2437 return 1;
2438 else {
2439 /*
2440 * 32 bit DMA is removed from si_domain and fall back
2441 * to non-identity mapping.
2442 */
2443 domain_remove_one_dev_info(si_domain, pdev);
2444 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2445 pci_name(pdev));
2446 return 0;
2447 }
2448 } else {
2449 /*
2450 * In case of a detached 64 bit DMA device from vm, the device
2451 * is put into si_domain for identity mapping.
2452 */
2453 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2454 int ret;
2455 ret = domain_add_dev_info(si_domain, pdev);
2456 if (!ret) {
2457 printk(KERN_INFO "64bit %s uses identity mapping\n",
2458 pci_name(pdev));
2459 return 1;
2460 }
2461 }
2462 }
2463
2464 return iommu_dummy(pdev);
2465}
2466
2251static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2467static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2252 size_t size, int dir, u64 dma_mask) 2468 size_t size, int dir, u64 dma_mask)
2253{ 2469{
@@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2260 struct intel_iommu *iommu; 2476 struct intel_iommu *iommu;
2261 2477
2262 BUG_ON(dir == DMA_NONE); 2478 BUG_ON(dir == DMA_NONE);
2263 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2479
2480 if (iommu_no_mapping(pdev))
2264 return paddr; 2481 return paddr;
2265 2482
2266 domain = get_valid_domain_for_dev(pdev); 2483 domain = get_valid_domain_for_dev(pdev);
@@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2401 struct iova *iova; 2618 struct iova *iova;
2402 struct intel_iommu *iommu; 2619 struct intel_iommu *iommu;
2403 2620
2404 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2621 if (iommu_no_mapping(pdev))
2405 return; 2622 return;
2623
2406 domain = find_domain(pdev); 2624 domain = find_domain(pdev);
2407 BUG_ON(!domain); 2625 BUG_ON(!domain);
2408 2626
@@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2492 struct scatterlist *sg; 2710 struct scatterlist *sg;
2493 struct intel_iommu *iommu; 2711 struct intel_iommu *iommu;
2494 2712
2495 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2713 if (iommu_no_mapping(pdev))
2496 return; 2714 return;
2497 2715
2498 domain = find_domain(pdev); 2716 domain = find_domain(pdev);
@@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2553 struct intel_iommu *iommu; 2771 struct intel_iommu *iommu;
2554 2772
2555 BUG_ON(dir == DMA_NONE); 2773 BUG_ON(dir == DMA_NONE);
2556 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2774 if (iommu_no_mapping(pdev))
2557 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 2775 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2558 2776
2559 domain = get_valid_domain_for_dev(pdev); 2777 domain = get_valid_domain_for_dev(pdev);
@@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void)
2951 return 0; 3169 return 0;
2952} 3170}
2953 3171
2954static int vm_domain_add_dev_info(struct dmar_domain *domain,
2955 struct pci_dev *pdev)
2956{
2957 struct device_domain_info *info;
2958 unsigned long flags;
2959
2960 info = alloc_devinfo_mem();
2961 if (!info)
2962 return -ENOMEM;
2963
2964 info->segment = pci_domain_nr(pdev->bus);
2965 info->bus = pdev->bus->number;
2966 info->devfn = pdev->devfn;
2967 info->dev = pdev;
2968 info->domain = domain;
2969
2970 spin_lock_irqsave(&device_domain_lock, flags);
2971 list_add(&info->link, &domain->devices);
2972 list_add(&info->global, &device_domain_list);
2973 pdev->dev.archdata.iommu = info;
2974 spin_unlock_irqrestore(&device_domain_lock, flags);
2975
2976 return 0;
2977}
2978
2979static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 3172static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2980 struct pci_dev *pdev) 3173 struct pci_dev *pdev)
2981{ 3174{
@@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3003 } 3196 }
3004} 3197}
3005 3198
3006static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, 3199static void domain_remove_one_dev_info(struct dmar_domain *domain,
3007 struct pci_dev *pdev) 3200 struct pci_dev *pdev)
3008{ 3201{
3009 struct device_domain_info *info; 3202 struct device_domain_info *info;
@@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
3136 return domain; 3329 return domain;
3137} 3330}
3138 3331
3139static int vm_domain_init(struct dmar_domain *domain, int guest_width) 3332static int md_domain_init(struct dmar_domain *domain, int guest_width)
3140{ 3333{
3141 int adjust_width; 3334 int adjust_width;
3142 3335
@@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
3227 "intel_iommu_domain_init: dmar_domain == NULL\n"); 3420 "intel_iommu_domain_init: dmar_domain == NULL\n");
3228 return -ENOMEM; 3421 return -ENOMEM;
3229 } 3422 }
3230 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 3423 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3231 printk(KERN_ERR 3424 printk(KERN_ERR
3232 "intel_iommu_domain_init() failed\n"); 3425 "intel_iommu_domain_init() failed\n");
3233 vm_domain_exit(dmar_domain); 3426 vm_domain_exit(dmar_domain);
@@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3262 3455
3263 old_domain = find_domain(pdev); 3456 old_domain = find_domain(pdev);
3264 if (old_domain) { 3457 if (old_domain) {
3265 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) 3458 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3266 vm_domain_remove_one_dev_info(old_domain, pdev); 3459 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3460 domain_remove_one_dev_info(old_domain, pdev);
3267 else 3461 else
3268 domain_remove_dev_info(old_domain); 3462 domain_remove_dev_info(old_domain);
3269 } 3463 }
@@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3285 return -EFAULT; 3479 return -EFAULT;
3286 } 3480 }
3287 3481
3288 ret = vm_domain_add_dev_info(dmar_domain, pdev); 3482 ret = domain_add_dev_info(dmar_domain, pdev);
3289 if (ret) 3483 if (ret)
3290 return ret; 3484 return ret;
3291 3485
@@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3299 struct dmar_domain *dmar_domain = domain->priv; 3493 struct dmar_domain *dmar_domain = domain->priv;
3300 struct pci_dev *pdev = to_pci_dev(dev); 3494 struct pci_dev *pdev = to_pci_dev(dev);
3301 3495
3302 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3496 domain_remove_one_dev_info(dmar_domain, pdev);
3303} 3497}
3304 3498
3305static int intel_iommu_map_range(struct iommu_domain *domain, 3499static int intel_iommu_map_range(struct iommu_domain *domain,
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 1e83c8c5f985..4f5b8712931f 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -10,6 +10,8 @@
10#include <linux/intel-iommu.h> 10#include <linux/intel-iommu.h>
11#include "intr_remapping.h" 11#include "intr_remapping.h"
12#include <acpi/acpi.h> 12#include <acpi/acpi.h>
13#include <asm/pci-direct.h>
14#include "pci.h"
13 15
14static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 16static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15static int ir_ioapic_num; 17static int ir_ioapic_num;
@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified)
314 index = irq_iommu->irte_index + irq_iommu->sub_handle; 316 index = irq_iommu->irte_index + irq_iommu->sub_handle;
315 irte = &iommu->ir_table->base[index]; 317 irte = &iommu->ir_table->base[index];
316 318
317 set_64bit((unsigned long *)irte, irte_modified->low); 319 set_64bit((unsigned long *)&irte->low, irte_modified->low);
320 set_64bit((unsigned long *)&irte->high, irte_modified->high);
318 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 321 __iommu_flush_cache(iommu, irte, sizeof(*irte));
319 322
320 rc = qi_flush_iec(iommu, index, 0); 323 rc = qi_flush_iec(iommu, index, 0);
@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
369 return drhd->iommu; 372 return drhd->iommu;
370} 373}
371 374
375static int clear_entries(struct irq_2_iommu *irq_iommu)
376{
377 struct irte *start, *entry, *end;
378 struct intel_iommu *iommu;
379 int index;
380
381 if (irq_iommu->sub_handle)
382 return 0;
383
384 iommu = irq_iommu->iommu;
385 index = irq_iommu->irte_index + irq_iommu->sub_handle;
386
387 start = iommu->ir_table->base + index;
388 end = start + (1 << irq_iommu->irte_mask);
389
390 for (entry = start; entry < end; entry++) {
391 set_64bit((unsigned long *)&entry->low, 0);
392 set_64bit((unsigned long *)&entry->high, 0);
393 }
394
395 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
396}
397
372int free_irte(int irq) 398int free_irte(int irq)
373{ 399{
374 int rc = 0; 400 int rc = 0;
375 int index, i;
376 struct irte *irte;
377 struct intel_iommu *iommu;
378 struct irq_2_iommu *irq_iommu; 401 struct irq_2_iommu *irq_iommu;
379 unsigned long flags; 402 unsigned long flags;
380 403
@@ -385,16 +408,7 @@ int free_irte(int irq)
385 return -1; 408 return -1;
386 } 409 }
387 410
388 iommu = irq_iommu->iommu; 411 rc = clear_entries(irq_iommu);
389
390 index = irq_iommu->irte_index + irq_iommu->sub_handle;
391 irte = &iommu->ir_table->base[index];
392
393 if (!irq_iommu->sub_handle) {
394 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
395 set_64bit((unsigned long *)(irte + i), 0);
396 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
397 }
398 412
399 irq_iommu->iommu = NULL; 413 irq_iommu->iommu = NULL;
400 irq_iommu->irte_index = 0; 414 irq_iommu->irte_index = 0;
@@ -406,6 +420,91 @@ int free_irte(int irq)
406 return rc; 420 return rc;
407} 421}
408 422
423/*
424 * source validation type
425 */
426#define SVT_NO_VERIFY 0x0 /* no verification is required */
427#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
428#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
429
430/*
431 * source-id qualifier
432 */
433#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
434#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
435 * the third least significant bit
436 */
437#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
438 * the second and third least significant bits
439 */
440#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
441 * the least three significant bits
442 */
443
444/*
445 * set SVT, SQ and SID fields of irte to verify
446 * source ids of interrupt requests
447 */
448static void set_irte_sid(struct irte *irte, unsigned int svt,
449 unsigned int sq, unsigned int sid)
450{
451 irte->svt = svt;
452 irte->sq = sq;
453 irte->sid = sid;
454}
455
456int set_ioapic_sid(struct irte *irte, int apic)
457{
458 int i;
459 u16 sid = 0;
460
461 if (!irte)
462 return -1;
463
464 for (i = 0; i < MAX_IO_APICS; i++) {
465 if (ir_ioapic[i].id == apic) {
466 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
467 break;
468 }
469 }
470
471 if (sid == 0) {
472 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
473 return -1;
474 }
475
476 set_irte_sid(irte, 1, 0, sid);
477
478 return 0;
479}
480
481int set_msi_sid(struct irte *irte, struct pci_dev *dev)
482{
483 struct pci_dev *bridge;
484
485 if (!irte || !dev)
486 return -1;
487
488 /* PCIe device or Root Complex integrated PCI device */
489 if (dev->is_pcie || !dev->bus->parent) {
490 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
491 (dev->bus->number << 8) | dev->devfn);
492 return 0;
493 }
494
495 bridge = pci_find_upstream_pcie_bridge(dev);
496 if (bridge) {
497 if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
498 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
499 (bridge->bus->number << 8) | dev->bus->number);
500 else /* this is a legacy PCI bridge */
501 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
502 (bridge->bus->number << 8) | bridge->devfn);
503 }
504
505 return 0;
506}
507
409static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 508static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
410{ 509{
411 u64 addr; 510 u64 addr;
@@ -612,6 +711,35 @@ error:
612 return -1; 711 return -1;
613} 712}
614 713
714static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
715 struct intel_iommu *iommu)
716{
717 struct acpi_dmar_pci_path *path;
718 u8 bus;
719 int count;
720
721 bus = scope->bus;
722 path = (struct acpi_dmar_pci_path *)(scope + 1);
723 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
724 / sizeof(struct acpi_dmar_pci_path);
725
726 while (--count > 0) {
727 /*
728 * Access PCI directly due to the PCI
729 * subsystem isn't initialized yet.
730 */
731 bus = read_pci_config_byte(bus, path->dev, path->fn,
732 PCI_SECONDARY_BUS);
733 path++;
734 }
735
736 ir_ioapic[ir_ioapic_num].bus = bus;
737 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
738 ir_ioapic[ir_ioapic_num].iommu = iommu;
739 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
740 ir_ioapic_num++;
741}
742
615static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, 743static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
616 struct intel_iommu *iommu) 744 struct intel_iommu *iommu)
617{ 745{
@@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
636 " 0x%Lx\n", scope->enumeration_id, 764 " 0x%Lx\n", scope->enumeration_id,
637 drhd->address); 765 drhd->address);
638 766
639 ir_ioapic[ir_ioapic_num].iommu = iommu; 767 ir_parse_one_ioapic_scope(scope, iommu);
640 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
641 ir_ioapic_num++;
642 } 768 }
643 start += scope->length; 769 start += scope->length;
644 } 770 }
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
index ca48f0df8ac9..63a263c18415 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/pci/intr_remapping.h
@@ -3,6 +3,8 @@
3struct ioapic_scope { 3struct ioapic_scope {
4 struct intel_iommu *iommu; 4 struct intel_iommu *iommu;
5 unsigned int id; 5 unsigned int id;
6 unsigned int bus; /* PCI bus number */
7 unsigned int devfn; /* PCI devfn number */
6}; 8};
7 9
8#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) 10#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index e371a9c15341..a07015d646dd 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -398,8 +398,7 @@ static int sbs_init(struct pci_dev *dev)
398{ 398{
399 u8 __iomem *p; 399 u8 __iomem *p;
400 400
401 p = ioremap_nocache(pci_resource_start(dev, 0), 401 p = pci_ioremap_bar(dev, 0);
402 pci_resource_len(dev, 0));
403 402
404 if (p == NULL) 403 if (p == NULL)
405 return -ENOMEM; 404 return -ENOMEM;
@@ -423,8 +422,7 @@ static void __devexit sbs_exit(struct pci_dev *dev)
423{ 422{
424 u8 __iomem *p; 423 u8 __iomem *p;
425 424
426 p = ioremap_nocache(pci_resource_start(dev, 0), 425 p = pci_ioremap_bar(dev, 0);
427 pci_resource_len(dev, 0));
428 /* FIXME: What if resource_len < OCT_REG_CR_OFF */ 426 /* FIXME: What if resource_len < OCT_REG_CR_OFF */
429 if (p != NULL) 427 if (p != NULL)
430 writeb(0, p + OCT_REG_CR_OFF); 428 writeb(0, p + OCT_REG_CR_OFF);
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 9f2891c2c4a2..cd1b6a45bb82 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1548,8 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1548 goto probe_exit1; 1548 goto probe_exit1;
1549 } 1549 }
1550 1550
1551 icom_adapter->base_addr = ioremap(icom_adapter->base_addr_pci, 1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
1552 pci_resource_len(dev, 0));
1553 1552
1554 if (!icom_adapter->base_addr) 1553 if (!icom_adapter->base_addr)
1555 goto probe_exit1; 1554 goto probe_exit1;
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 107ce2e187b8..00f4577d2f7f 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -467,7 +467,7 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
467 printk(KERN_INFO "jsm: linemap is full, added device failed\n"); 467 printk(KERN_INFO "jsm: linemap is full, added device failed\n");
468 continue; 468 continue;
469 } else 469 } else
470 set_bit((int)line, linemap); 470 set_bit(line, linemap);
471 brd->channels[i]->uart_port.line = line; 471 brd->channels[i]->uart_port.line = line;
472 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) 472 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
473 printk(KERN_INFO "jsm: add device failed\n"); 473 printk(KERN_INFO "jsm: add device failed\n");
@@ -503,7 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd)
503 503
504 ch = brd->channels[i]; 504 ch = brd->channels[i];
505 505
506 clear_bit((int)(ch->uart_port.line), linemap); 506 clear_bit(ch->uart_port.line, linemap);
507 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); 507 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
508 } 508 }
509 509
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 7313c2edcb83..54dd16d66a4b 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -461,6 +461,94 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
461 spin_unlock_irqrestore(&up->port.lock, flags); 461 spin_unlock_irqrestore(&up->port.lock, flags);
462} 462}
463 463
464#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
465/*
466 * Wait for transmitter & holding register to empty
467 */
468static void wait_for_xmitr(struct uart_txx9_port *up)
469{
470 unsigned int tmout = 10000;
471
472 /* Wait up to 10ms for the character(s) to be sent. */
473 while (--tmout &&
474 !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
475 udelay(1);
476
477 /* Wait up to 1s for flow control if necessary */
478 if (up->port.flags & UPF_CONS_FLOW) {
479 tmout = 1000000;
480 while (--tmout &&
481 (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
482 udelay(1);
483 }
484}
485#endif
486
487#ifdef CONFIG_CONSOLE_POLL
488/*
489 * Console polling routines for writing and reading from the uart while
490 * in an interrupt or debug context.
491 */
492
493static int serial_txx9_get_poll_char(struct uart_port *port)
494{
495 unsigned int ier;
496 unsigned char c;
497 struct uart_txx9_port *up = (struct uart_txx9_port *)port;
498
499 /*
500 * First save the IER then disable the interrupts
501 */
502 ier = sio_in(up, TXX9_SIDICR);
503 sio_out(up, TXX9_SIDICR, 0);
504
505 while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
506 ;
507
508 c = sio_in(up, TXX9_SIRFIFO);
509
510 /*
511 * Finally, clear RX interrupt status
512 * and restore the IER
513 */
514 sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
515 sio_out(up, TXX9_SIDICR, ier);
516 return c;
517}
518
519
520static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
521{
522 unsigned int ier;
523 struct uart_txx9_port *up = (struct uart_txx9_port *)port;
524
525 /*
526 * First save the IER then disable the interrupts
527 */
528 ier = sio_in(up, TXX9_SIDICR);
529 sio_out(up, TXX9_SIDICR, 0);
530
531 wait_for_xmitr(up);
532 /*
533 * Send the character out.
534 * If a LF, also do CR...
535 */
536 sio_out(up, TXX9_SITFIFO, c);
537 if (c == 10) {
538 wait_for_xmitr(up);
539 sio_out(up, TXX9_SITFIFO, 13);
540 }
541
542 /*
543 * Finally, wait for transmitter to become empty
544 * and restore the IER
545 */
546 wait_for_xmitr(up);
547 sio_out(up, TXX9_SIDICR, ier);
548}
549
550#endif /* CONFIG_CONSOLE_POLL */
551
464static int serial_txx9_startup(struct uart_port *port) 552static int serial_txx9_startup(struct uart_port *port)
465{ 553{
466 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 554 struct uart_txx9_port *up = (struct uart_txx9_port *)port;
@@ -781,6 +869,10 @@ static struct uart_ops serial_txx9_pops = {
781 .release_port = serial_txx9_release_port, 869 .release_port = serial_txx9_release_port,
782 .request_port = serial_txx9_request_port, 870 .request_port = serial_txx9_request_port,
783 .config_port = serial_txx9_config_port, 871 .config_port = serial_txx9_config_port,
872#ifdef CONFIG_CONSOLE_POLL
873 .poll_get_char = serial_txx9_get_poll_char,
874 .poll_put_char = serial_txx9_put_poll_char,
875#endif
784}; 876};
785 877
786static struct uart_txx9_port serial_txx9_ports[UART_NR]; 878static struct uart_txx9_port serial_txx9_ports[UART_NR];
@@ -803,27 +895,6 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
803 895
804#ifdef CONFIG_SERIAL_TXX9_CONSOLE 896#ifdef CONFIG_SERIAL_TXX9_CONSOLE
805 897
806/*
807 * Wait for transmitter & holding register to empty
808 */
809static inline void wait_for_xmitr(struct uart_txx9_port *up)
810{
811 unsigned int tmout = 10000;
812
813 /* Wait up to 10ms for the character(s) to be sent. */
814 while (--tmout &&
815 !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
816 udelay(1);
817
818 /* Wait up to 1s for flow control if necessary */
819 if (up->port.flags & UPF_CONS_FLOW) {
820 tmout = 1000000;
821 while (--tmout &&
822 (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
823 udelay(1);
824 }
825}
826
827static void serial_txx9_console_putchar(struct uart_port *port, int ch) 898static void serial_txx9_console_putchar(struct uart_port *port, int ch)
828{ 899{
829 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 900 struct uart_txx9_port *up = (struct uart_txx9_port *)port;
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 603972576f0f..f128427b995b 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -29,51 +29,28 @@
29 29
30#ifdef CONFIG_FS_POSIX_ACL 30#ifdef CONFIG_FS_POSIX_ACL
31 31
32static void btrfs_update_cached_acl(struct inode *inode,
33 struct posix_acl **p_acl,
34 struct posix_acl *acl)
35{
36 spin_lock(&inode->i_lock);
37 if (*p_acl && *p_acl != BTRFS_ACL_NOT_CACHED)
38 posix_acl_release(*p_acl);
39 *p_acl = posix_acl_dup(acl);
40 spin_unlock(&inode->i_lock);
41}
42
43static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
44{ 33{
45 int size; 34 int size;
46 const char *name; 35 const char *name;
47 char *value = NULL; 36 char *value = NULL;
48 struct posix_acl *acl = NULL, **p_acl; 37 struct posix_acl *acl;
38
39 acl = get_cached_acl(inode, type);
40 if (acl != ACL_NOT_CACHED)
41 return acl;
49 42
50 switch (type) { 43 switch (type) {
51 case ACL_TYPE_ACCESS: 44 case ACL_TYPE_ACCESS:
52 name = POSIX_ACL_XATTR_ACCESS; 45 name = POSIX_ACL_XATTR_ACCESS;
53 p_acl = &BTRFS_I(inode)->i_acl;
54 break; 46 break;
55 case ACL_TYPE_DEFAULT: 47 case ACL_TYPE_DEFAULT:
56 name = POSIX_ACL_XATTR_DEFAULT; 48 name = POSIX_ACL_XATTR_DEFAULT;
57 p_acl = &BTRFS_I(inode)->i_default_acl;
58 break; 49 break;
59 default: 50 default:
60 return ERR_PTR(-EINVAL); 51 BUG();
61 } 52 }
62 53
63 /* Handle the cached NULL acl case without locking */
64 acl = ACCESS_ONCE(*p_acl);
65 if (!acl)
66 return acl;
67
68 spin_lock(&inode->i_lock);
69 acl = *p_acl;
70 if (acl != BTRFS_ACL_NOT_CACHED)
71 acl = posix_acl_dup(acl);
72 spin_unlock(&inode->i_lock);
73
74 if (acl != BTRFS_ACL_NOT_CACHED)
75 return acl;
76
77 size = __btrfs_getxattr(inode, name, "", 0); 54 size = __btrfs_getxattr(inode, name, "", 0);
78 if (size > 0) { 55 if (size > 0) {
79 value = kzalloc(size, GFP_NOFS); 56 value = kzalloc(size, GFP_NOFS);
@@ -82,13 +59,13 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
82 size = __btrfs_getxattr(inode, name, value, size); 59 size = __btrfs_getxattr(inode, name, value, size);
83 if (size > 0) { 60 if (size > 0) {
84 acl = posix_acl_from_xattr(value, size); 61 acl = posix_acl_from_xattr(value, size);
85 btrfs_update_cached_acl(inode, p_acl, acl); 62 set_cached_acl(inode, type, acl);
86 } 63 }
87 kfree(value); 64 kfree(value);
88 } else if (size == -ENOENT || size == -ENODATA || size == 0) { 65 } else if (size == -ENOENT || size == -ENODATA || size == 0) {
89 /* FIXME, who returns -ENOENT? I think nobody */ 66 /* FIXME, who returns -ENOENT? I think nobody */
90 acl = NULL; 67 acl = NULL;
91 btrfs_update_cached_acl(inode, p_acl, acl); 68 set_cached_acl(inode, type, acl);
92 } else { 69 } else {
93 acl = ERR_PTR(-EIO); 70 acl = ERR_PTR(-EIO);
94 } 71 }
@@ -121,7 +98,6 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
121{ 98{
122 int ret, size = 0; 99 int ret, size = 0;
123 const char *name; 100 const char *name;
124 struct posix_acl **p_acl;
125 char *value = NULL; 101 char *value = NULL;
126 mode_t mode; 102 mode_t mode;
127 103
@@ -141,13 +117,11 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
141 ret = 0; 117 ret = 0;
142 inode->i_mode = mode; 118 inode->i_mode = mode;
143 name = POSIX_ACL_XATTR_ACCESS; 119 name = POSIX_ACL_XATTR_ACCESS;
144 p_acl = &BTRFS_I(inode)->i_acl;
145 break; 120 break;
146 case ACL_TYPE_DEFAULT: 121 case ACL_TYPE_DEFAULT:
147 if (!S_ISDIR(inode->i_mode)) 122 if (!S_ISDIR(inode->i_mode))
148 return acl ? -EINVAL : 0; 123 return acl ? -EINVAL : 0;
149 name = POSIX_ACL_XATTR_DEFAULT; 124 name = POSIX_ACL_XATTR_DEFAULT;
150 p_acl = &BTRFS_I(inode)->i_default_acl;
151 break; 125 break;
152 default: 126 default:
153 return -EINVAL; 127 return -EINVAL;
@@ -172,7 +146,7 @@ out:
172 kfree(value); 146 kfree(value);
173 147
174 if (!ret) 148 if (!ret)
175 btrfs_update_cached_acl(inode, p_acl, acl); 149 set_cached_acl(inode, type, acl);
176 150
177 return ret; 151 return ret;
178} 152}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index acb4f3517582..ea1ea0af8c0e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -53,10 +53,6 @@ struct btrfs_inode {
53 /* used to order data wrt metadata */ 53 /* used to order data wrt metadata */
54 struct btrfs_ordered_inode_tree ordered_tree; 54 struct btrfs_ordered_inode_tree ordered_tree;
55 55
56 /* standard acl pointers */
57 struct posix_acl *i_acl;
58 struct posix_acl *i_default_acl;
59
60 /* for keeping track of orphaned inodes */ 56 /* for keeping track of orphaned inodes */
61 struct list_head i_orphan; 57 struct list_head i_orphan;
62 58
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 03441a99ea38..2779c2f5360a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -41,8 +41,6 @@ struct btrfs_ordered_sum;
41 41
42#define BTRFS_MAGIC "_BHRfS_M" 42#define BTRFS_MAGIC "_BHRfS_M"
43 43
44#define BTRFS_ACL_NOT_CACHED ((void *)-1)
45
46#define BTRFS_MAX_LEVEL 8 44#define BTRFS_MAX_LEVEL 8
47 45
48#define BTRFS_COMPAT_EXTENT_TREE_V0 46#define BTRFS_COMPAT_EXTENT_TREE_V0
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8612b3a09811..78ad38ddd01f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2123,8 +2123,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
2123 */ 2123 */
2124 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); 2124 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2125 if (!maybe_acls) { 2125 if (!maybe_acls) {
2126 BTRFS_I(inode)->i_acl = NULL; 2126 inode->i_acl = NULL;
2127 BTRFS_I(inode)->i_default_acl = NULL; 2127 inode->i_default_acl = NULL;
2128 } 2128 }
2129 2129
2130 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2130 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
@@ -3141,9 +3141,6 @@ static noinline void init_btrfs_i(struct inode *inode)
3141{ 3141{
3142 struct btrfs_inode *bi = BTRFS_I(inode); 3142 struct btrfs_inode *bi = BTRFS_I(inode);
3143 3143
3144 bi->i_acl = BTRFS_ACL_NOT_CACHED;
3145 bi->i_default_acl = BTRFS_ACL_NOT_CACHED;
3146
3147 bi->generation = 0; 3144 bi->generation = 0;
3148 bi->sequence = 0; 3145 bi->sequence = 0;
3149 bi->last_trans = 0; 3146 bi->last_trans = 0;
@@ -4640,8 +4637,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
4640 ei->last_trans = 0; 4637 ei->last_trans = 0;
4641 ei->logged_trans = 0; 4638 ei->logged_trans = 0;
4642 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 4639 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4643 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4644 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4645 INIT_LIST_HEAD(&ei->i_orphan); 4640 INIT_LIST_HEAD(&ei->i_orphan);
4646 INIT_LIST_HEAD(&ei->ordered_operations); 4641 INIT_LIST_HEAD(&ei->ordered_operations);
4647 return &ei->vfs_inode; 4642 return &ei->vfs_inode;
@@ -4655,13 +4650,6 @@ void btrfs_destroy_inode(struct inode *inode)
4655 WARN_ON(!list_empty(&inode->i_dentry)); 4650 WARN_ON(!list_empty(&inode->i_dentry));
4656 WARN_ON(inode->i_data.nrpages); 4651 WARN_ON(inode->i_data.nrpages);
4657 4652
4658 if (BTRFS_I(inode)->i_acl &&
4659 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4660 posix_acl_release(BTRFS_I(inode)->i_acl);
4661 if (BTRFS_I(inode)->i_default_acl &&
4662 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4663 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4664
4665 /* 4653 /*
4666 * Make sure we're properly removed from the ordered operation 4654 * Make sure we're properly removed from the ordered operation
4667 * lists. 4655 * lists.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c135202c38b3..626c7483b4de 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -31,6 +31,7 @@
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/netlink.h> 32#include <linux/netlink.h>
33#include <linux/vt.h> 33#include <linux/vt.h>
34#include <linux/falloc.h>
34#include <linux/fs.h> 35#include <linux/fs.h>
35#include <linux/file.h> 36#include <linux/file.h>
36#include <linux/ppp_defs.h> 37#include <linux/ppp_defs.h>
@@ -1779,6 +1780,41 @@ lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
1779 return sys_ioctl(fd, cmd, (unsigned long)tn); 1780 return sys_ioctl(fd, cmd, (unsigned long)tn);
1780} 1781}
1781 1782
1783/* on ia32 l_start is on a 32-bit boundary */
1784#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
1785struct space_resv_32 {
1786 __s16 l_type;
1787 __s16 l_whence;
1788 __s64 l_start __attribute__((packed));
1789 /* len == 0 means until end of file */
1790 __s64 l_len __attribute__((packed));
1791 __s32 l_sysid;
1792 __u32 l_pid;
1793 __s32 l_pad[4]; /* reserve area */
1794};
1795
1796#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
1797#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
1798
1799/* just account for different alignment */
1800static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
1801{
1802 struct space_resv_32 __user *p32 = (void __user *)arg;
1803 struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
1804
1805 if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
1806 copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
1807 copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
1808 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
1809 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
1810 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
1811 copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
1812 return -EFAULT;
1813
1814 return ioctl_preallocate(file, p);
1815}
1816#endif
1817
1782 1818
1783typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int, 1819typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int,
1784 unsigned long, struct file *); 1820 unsigned long, struct file *);
@@ -2756,6 +2792,18 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
2756 case FIOQSIZE: 2792 case FIOQSIZE:
2757 break; 2793 break;
2758 2794
2795#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
2796 case FS_IOC_RESVSP_32:
2797 case FS_IOC_RESVSP64_32:
2798 error = compat_ioctl_preallocate(filp, arg);
2799 goto out_fput;
2800#else
2801 case FS_IOC_RESVSP:
2802 case FS_IOC_RESVSP64:
2803 error = ioctl_preallocate(filp, (void __user *)arg);
2804 goto out_fput;
2805#endif
2806
2759 case FIBMAP: 2807 case FIBMAP:
2760 case FIGETBSZ: 2808 case FIGETBSZ:
2761 case FIONREAD: 2809 case FIONREAD:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 9b1d285f9fe6..75efb028974b 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -423,7 +423,6 @@ static void devpts_kill_sb(struct super_block *sb)
423} 423}
424 424
425static struct file_system_type devpts_fs_type = { 425static struct file_system_type devpts_fs_type = {
426 .owner = THIS_MODULE,
427 .name = "devpts", 426 .name = "devpts",
428 .get_sb = devpts_get_sb, 427 .get_sb = devpts_get_sb,
429 .kill_sb = devpts_kill_sb, 428 .kill_sb = devpts_kill_sb,
@@ -564,13 +563,4 @@ static int __init init_devpts_fs(void)
564 } 563 }
565 return err; 564 return err;
566} 565}
567
568static void __exit exit_devpts_fs(void)
569{
570 unregister_filesystem(&devpts_fs_type);
571 mntput(devpts_mnt);
572}
573
574module_init(init_devpts_fs) 566module_init(init_devpts_fs)
575module_exit(exit_devpts_fs)
576MODULE_LICENSE("GPL");
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index d46e38cb85c5..d636e1297cad 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -125,37 +125,12 @@ fail:
125 return ERR_PTR(-EINVAL); 125 return ERR_PTR(-EINVAL);
126} 126}
127 127
128static inline struct posix_acl *
129ext2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
130{
131 struct posix_acl *acl = EXT2_ACL_NOT_CACHED;
132
133 spin_lock(&inode->i_lock);
134 if (*i_acl != EXT2_ACL_NOT_CACHED)
135 acl = posix_acl_dup(*i_acl);
136 spin_unlock(&inode->i_lock);
137
138 return acl;
139}
140
141static inline void
142ext2_iset_acl(struct inode *inode, struct posix_acl **i_acl,
143 struct posix_acl *acl)
144{
145 spin_lock(&inode->i_lock);
146 if (*i_acl != EXT2_ACL_NOT_CACHED)
147 posix_acl_release(*i_acl);
148 *i_acl = posix_acl_dup(acl);
149 spin_unlock(&inode->i_lock);
150}
151
152/* 128/*
153 * inode->i_mutex: don't care 129 * inode->i_mutex: don't care
154 */ 130 */
155static struct posix_acl * 131static struct posix_acl *
156ext2_get_acl(struct inode *inode, int type) 132ext2_get_acl(struct inode *inode, int type)
157{ 133{
158 struct ext2_inode_info *ei = EXT2_I(inode);
159 int name_index; 134 int name_index;
160 char *value = NULL; 135 char *value = NULL;
161 struct posix_acl *acl; 136 struct posix_acl *acl;
@@ -164,23 +139,19 @@ ext2_get_acl(struct inode *inode, int type)
164 if (!test_opt(inode->i_sb, POSIX_ACL)) 139 if (!test_opt(inode->i_sb, POSIX_ACL))
165 return NULL; 140 return NULL;
166 141
167 switch(type) { 142 acl = get_cached_acl(inode, type);
168 case ACL_TYPE_ACCESS: 143 if (acl != ACL_NOT_CACHED)
169 acl = ext2_iget_acl(inode, &ei->i_acl); 144 return acl;
170 if (acl != EXT2_ACL_NOT_CACHED) 145
171 return acl; 146 switch (type) {
172 name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; 147 case ACL_TYPE_ACCESS:
173 break; 148 name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
174 149 break;
175 case ACL_TYPE_DEFAULT: 150 case ACL_TYPE_DEFAULT:
176 acl = ext2_iget_acl(inode, &ei->i_default_acl); 151 name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT;
177 if (acl != EXT2_ACL_NOT_CACHED) 152 break;
178 return acl; 153 default:
179 name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT; 154 BUG();
180 break;
181
182 default:
183 return ERR_PTR(-EINVAL);
184 } 155 }
185 retval = ext2_xattr_get(inode, name_index, "", NULL, 0); 156 retval = ext2_xattr_get(inode, name_index, "", NULL, 0);
186 if (retval > 0) { 157 if (retval > 0) {
@@ -197,17 +168,9 @@ ext2_get_acl(struct inode *inode, int type)
197 acl = ERR_PTR(retval); 168 acl = ERR_PTR(retval);
198 kfree(value); 169 kfree(value);
199 170
200 if (!IS_ERR(acl)) { 171 if (!IS_ERR(acl))
201 switch(type) { 172 set_cached_acl(inode, type, acl);
202 case ACL_TYPE_ACCESS:
203 ext2_iset_acl(inode, &ei->i_acl, acl);
204 break;
205 173
206 case ACL_TYPE_DEFAULT:
207 ext2_iset_acl(inode, &ei->i_default_acl, acl);
208 break;
209 }
210 }
211 return acl; 174 return acl;
212} 175}
213 176
@@ -217,7 +180,6 @@ ext2_get_acl(struct inode *inode, int type)
217static int 180static int
218ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) 181ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
219{ 182{
220 struct ext2_inode_info *ei = EXT2_I(inode);
221 int name_index; 183 int name_index;
222 void *value = NULL; 184 void *value = NULL;
223 size_t size = 0; 185 size_t size = 0;
@@ -263,17 +225,8 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
263 error = ext2_xattr_set(inode, name_index, "", value, size, 0); 225 error = ext2_xattr_set(inode, name_index, "", value, size, 0);
264 226
265 kfree(value); 227 kfree(value);
266 if (!error) { 228 if (!error)
267 switch(type) { 229 set_cached_acl(inode, type, acl);
268 case ACL_TYPE_ACCESS:
269 ext2_iset_acl(inode, &ei->i_acl, acl);
270 break;
271
272 case ACL_TYPE_DEFAULT:
273 ext2_iset_acl(inode, &ei->i_default_acl, acl);
274 break;
275 }
276 }
277 return error; 230 return error;
278} 231}
279 232
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index b42cf578554b..ecefe478898f 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -53,10 +53,6 @@ static inline int ext2_acl_count(size_t size)
53 53
54#ifdef CONFIG_EXT2_FS_POSIX_ACL 54#ifdef CONFIG_EXT2_FS_POSIX_ACL
55 55
56/* Value for inode->u.ext2_i.i_acl and inode->u.ext2_i.i_default_acl
57 if the ACL has not been cached */
58#define EXT2_ACL_NOT_CACHED ((void *)-1)
59
60/* acl.c */ 56/* acl.c */
61extern int ext2_permission (struct inode *, int); 57extern int ext2_permission (struct inode *, int);
62extern int ext2_acl_chmod (struct inode *); 58extern int ext2_acl_chmod (struct inode *);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index d988a718aedb..9a8a8e27a063 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -47,10 +47,6 @@ struct ext2_inode_info {
47 */ 47 */
48 struct rw_semaphore xattr_sem; 48 struct rw_semaphore xattr_sem;
49#endif 49#endif
50#ifdef CONFIG_EXT2_FS_POSIX_ACL
51 struct posix_acl *i_acl;
52 struct posix_acl *i_default_acl;
53#endif
54 rwlock_t i_meta_lock; 50 rwlock_t i_meta_lock;
55 51
56 /* 52 /*
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 29ed682061f6..e27130341d4f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1224,10 +1224,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1224 return inode; 1224 return inode;
1225 1225
1226 ei = EXT2_I(inode); 1226 ei = EXT2_I(inode);
1227#ifdef CONFIG_EXT2_FS_POSIX_ACL
1228 ei->i_acl = EXT2_ACL_NOT_CACHED;
1229 ei->i_default_acl = EXT2_ACL_NOT_CACHED;
1230#endif
1231 ei->i_block_alloc_info = NULL; 1227 ei->i_block_alloc_info = NULL;
1232 1228
1233 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh); 1229 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 458999638c3d..1a9ffee47d56 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -152,10 +152,6 @@ static struct inode *ext2_alloc_inode(struct super_block *sb)
152 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); 152 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
153 if (!ei) 153 if (!ei)
154 return NULL; 154 return NULL;
155#ifdef CONFIG_EXT2_FS_POSIX_ACL
156 ei->i_acl = EXT2_ACL_NOT_CACHED;
157 ei->i_default_acl = EXT2_ACL_NOT_CACHED;
158#endif
159 ei->i_block_alloc_info = NULL; 155 ei->i_block_alloc_info = NULL;
160 ei->vfs_inode.i_version = 1; 156 ei->vfs_inode.i_version = 1;
161 return &ei->vfs_inode; 157 return &ei->vfs_inode;
@@ -198,18 +194,6 @@ static void destroy_inodecache(void)
198static void ext2_clear_inode(struct inode *inode) 194static void ext2_clear_inode(struct inode *inode)
199{ 195{
200 struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info; 196 struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info;
201#ifdef CONFIG_EXT2_FS_POSIX_ACL
202 struct ext2_inode_info *ei = EXT2_I(inode);
203
204 if (ei->i_acl && ei->i_acl != EXT2_ACL_NOT_CACHED) {
205 posix_acl_release(ei->i_acl);
206 ei->i_acl = EXT2_ACL_NOT_CACHED;
207 }
208 if (ei->i_default_acl && ei->i_default_acl != EXT2_ACL_NOT_CACHED) {
209 posix_acl_release(ei->i_default_acl);
210 ei->i_default_acl = EXT2_ACL_NOT_CACHED;
211 }
212#endif
213 ext2_discard_reservation(inode); 197 ext2_discard_reservation(inode);
214 EXT2_I(inode)->i_block_alloc_info = NULL; 198 EXT2_I(inode)->i_block_alloc_info = NULL;
215 if (unlikely(rsv)) 199 if (unlikely(rsv))
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index e0c745451715..e167bae37ef0 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -126,33 +126,6 @@ fail:
126 return ERR_PTR(-EINVAL); 126 return ERR_PTR(-EINVAL);
127} 127}
128 128
129static inline struct posix_acl *
130ext3_iget_acl(struct inode *inode, struct posix_acl **i_acl)
131{
132 struct posix_acl *acl = ACCESS_ONCE(*i_acl);
133
134 if (acl) {
135 spin_lock(&inode->i_lock);
136 acl = *i_acl;
137 if (acl != EXT3_ACL_NOT_CACHED)
138 acl = posix_acl_dup(acl);
139 spin_unlock(&inode->i_lock);
140 }
141
142 return acl;
143}
144
145static inline void
146ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
147 struct posix_acl *acl)
148{
149 spin_lock(&inode->i_lock);
150 if (*i_acl != EXT3_ACL_NOT_CACHED)
151 posix_acl_release(*i_acl);
152 *i_acl = posix_acl_dup(acl);
153 spin_unlock(&inode->i_lock);
154}
155
156/* 129/*
157 * Inode operation get_posix_acl(). 130 * Inode operation get_posix_acl().
158 * 131 *
@@ -161,7 +134,6 @@ ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
161static struct posix_acl * 134static struct posix_acl *
162ext3_get_acl(struct inode *inode, int type) 135ext3_get_acl(struct inode *inode, int type)
163{ 136{
164 struct ext3_inode_info *ei = EXT3_I(inode);
165 int name_index; 137 int name_index;
166 char *value = NULL; 138 char *value = NULL;
167 struct posix_acl *acl; 139 struct posix_acl *acl;
@@ -170,24 +142,21 @@ ext3_get_acl(struct inode *inode, int type)
170 if (!test_opt(inode->i_sb, POSIX_ACL)) 142 if (!test_opt(inode->i_sb, POSIX_ACL))
171 return NULL; 143 return NULL;
172 144
173 switch(type) { 145 acl = get_cached_acl(inode, type);
174 case ACL_TYPE_ACCESS: 146 if (acl != ACL_NOT_CACHED)
175 acl = ext3_iget_acl(inode, &ei->i_acl); 147 return acl;
176 if (acl != EXT3_ACL_NOT_CACHED) 148
177 return acl; 149 switch (type) {
178 name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; 150 case ACL_TYPE_ACCESS:
179 break; 151 name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
180 152 break;
181 case ACL_TYPE_DEFAULT: 153 case ACL_TYPE_DEFAULT:
182 acl = ext3_iget_acl(inode, &ei->i_default_acl); 154 name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
183 if (acl != EXT3_ACL_NOT_CACHED) 155 break;
184 return acl; 156 default:
185 name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; 157 BUG();
186 break;
187
188 default:
189 return ERR_PTR(-EINVAL);
190 } 158 }
159
191 retval = ext3_xattr_get(inode, name_index, "", NULL, 0); 160 retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
192 if (retval > 0) { 161 if (retval > 0) {
193 value = kmalloc(retval, GFP_NOFS); 162 value = kmalloc(retval, GFP_NOFS);
@@ -203,17 +172,9 @@ ext3_get_acl(struct inode *inode, int type)
203 acl = ERR_PTR(retval); 172 acl = ERR_PTR(retval);
204 kfree(value); 173 kfree(value);
205 174
206 if (!IS_ERR(acl)) { 175 if (!IS_ERR(acl))
207 switch(type) { 176 set_cached_acl(inode, type, acl);
208 case ACL_TYPE_ACCESS:
209 ext3_iset_acl(inode, &ei->i_acl, acl);
210 break;
211 177
212 case ACL_TYPE_DEFAULT:
213 ext3_iset_acl(inode, &ei->i_default_acl, acl);
214 break;
215 }
216 }
217 return acl; 178 return acl;
218} 179}
219 180
@@ -226,7 +187,6 @@ static int
226ext3_set_acl(handle_t *handle, struct inode *inode, int type, 187ext3_set_acl(handle_t *handle, struct inode *inode, int type,
227 struct posix_acl *acl) 188 struct posix_acl *acl)
228{ 189{
229 struct ext3_inode_info *ei = EXT3_I(inode);
230 int name_index; 190 int name_index;
231 void *value = NULL; 191 void *value = NULL;
232 size_t size = 0; 192 size_t size = 0;
@@ -271,17 +231,10 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
271 value, size, 0); 231 value, size, 0);
272 232
273 kfree(value); 233 kfree(value);
274 if (!error) {
275 switch(type) {
276 case ACL_TYPE_ACCESS:
277 ext3_iset_acl(inode, &ei->i_acl, acl);
278 break;
279 234
280 case ACL_TYPE_DEFAULT: 235 if (!error)
281 ext3_iset_acl(inode, &ei->i_default_acl, acl); 236 set_cached_acl(inode, type, acl);
282 break; 237
283 }
284 }
285 return error; 238 return error;
286} 239}
287 240
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
index 42da16b8cac0..07d15a3a5969 100644
--- a/fs/ext3/acl.h
+++ b/fs/ext3/acl.h
@@ -53,10 +53,6 @@ static inline int ext3_acl_count(size_t size)
53 53
54#ifdef CONFIG_EXT3_FS_POSIX_ACL 54#ifdef CONFIG_EXT3_FS_POSIX_ACL
55 55
56/* Value for inode->u.ext3_i.i_acl and inode->u.ext3_i.i_default_acl
57 if the ACL has not been cached */
58#define EXT3_ACL_NOT_CACHED ((void *)-1)
59
60/* acl.c */ 56/* acl.c */
61extern int ext3_permission (struct inode *, int); 57extern int ext3_permission (struct inode *, int);
62extern int ext3_acl_chmod (struct inode *); 58extern int ext3_acl_chmod (struct inode *);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 05dea8132fc0..5f51fed5c750 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2752,10 +2752,6 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2752 return inode; 2752 return inode;
2753 2753
2754 ei = EXT3_I(inode); 2754 ei = EXT3_I(inode);
2755#ifdef CONFIG_EXT3_FS_POSIX_ACL
2756 ei->i_acl = EXT3_ACL_NOT_CACHED;
2757 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2758#endif
2759 ei->i_block_alloc_info = NULL; 2755 ei->i_block_alloc_info = NULL;
2760 2756
2761 ret = __ext3_get_inode_loc(inode, &iloc, 0); 2757 ret = __ext3_get_inode_loc(inode, &iloc, 0);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 601e881e6105..524b349c6299 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -464,10 +464,6 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
464 ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); 464 ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
465 if (!ei) 465 if (!ei)
466 return NULL; 466 return NULL;
467#ifdef CONFIG_EXT3_FS_POSIX_ACL
468 ei->i_acl = EXT3_ACL_NOT_CACHED;
469 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
470#endif
471 ei->i_block_alloc_info = NULL; 467 ei->i_block_alloc_info = NULL;
472 ei->vfs_inode.i_version = 1; 468 ei->vfs_inode.i_version = 1;
473 return &ei->vfs_inode; 469 return &ei->vfs_inode;
@@ -518,18 +514,6 @@ static void destroy_inodecache(void)
518static void ext3_clear_inode(struct inode *inode) 514static void ext3_clear_inode(struct inode *inode)
519{ 515{
520 struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info; 516 struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
521#ifdef CONFIG_EXT3_FS_POSIX_ACL
522 if (EXT3_I(inode)->i_acl &&
523 EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
524 posix_acl_release(EXT3_I(inode)->i_acl);
525 EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
526 }
527 if (EXT3_I(inode)->i_default_acl &&
528 EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
529 posix_acl_release(EXT3_I(inode)->i_default_acl);
530 EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
531 }
532#endif
533 ext3_discard_reservation(inode); 517 ext3_discard_reservation(inode);
534 EXT3_I(inode)->i_block_alloc_info = NULL; 518 EXT3_I(inode)->i_block_alloc_info = NULL;
535 if (unlikely(rsv)) 519 if (unlikely(rsv))
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 605aeed96d68..f6d8967149ca 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -126,33 +126,6 @@ fail:
126 return ERR_PTR(-EINVAL); 126 return ERR_PTR(-EINVAL);
127} 127}
128 128
129static inline struct posix_acl *
130ext4_iget_acl(struct inode *inode, struct posix_acl **i_acl)
131{
132 struct posix_acl *acl = ACCESS_ONCE(*i_acl);
133
134 if (acl) {
135 spin_lock(&inode->i_lock);
136 acl = *i_acl;
137 if (acl != EXT4_ACL_NOT_CACHED)
138 acl = posix_acl_dup(acl);
139 spin_unlock(&inode->i_lock);
140 }
141
142 return acl;
143}
144
145static inline void
146ext4_iset_acl(struct inode *inode, struct posix_acl **i_acl,
147 struct posix_acl *acl)
148{
149 spin_lock(&inode->i_lock);
150 if (*i_acl != EXT4_ACL_NOT_CACHED)
151 posix_acl_release(*i_acl);
152 *i_acl = posix_acl_dup(acl);
153 spin_unlock(&inode->i_lock);
154}
155
156/* 129/*
157 * Inode operation get_posix_acl(). 130 * Inode operation get_posix_acl().
158 * 131 *
@@ -161,7 +134,6 @@ ext4_iset_acl(struct inode *inode, struct posix_acl **i_acl,
161static struct posix_acl * 134static struct posix_acl *
162ext4_get_acl(struct inode *inode, int type) 135ext4_get_acl(struct inode *inode, int type)
163{ 136{
164 struct ext4_inode_info *ei = EXT4_I(inode);
165 int name_index; 137 int name_index;
166 char *value = NULL; 138 char *value = NULL;
167 struct posix_acl *acl; 139 struct posix_acl *acl;
@@ -170,23 +142,19 @@ ext4_get_acl(struct inode *inode, int type)
170 if (!test_opt(inode->i_sb, POSIX_ACL)) 142 if (!test_opt(inode->i_sb, POSIX_ACL))
171 return NULL; 143 return NULL;
172 144
145 acl = get_cached_acl(inode, type);
146 if (acl != ACL_NOT_CACHED)
147 return acl;
148
173 switch (type) { 149 switch (type) {
174 case ACL_TYPE_ACCESS: 150 case ACL_TYPE_ACCESS:
175 acl = ext4_iget_acl(inode, &ei->i_acl);
176 if (acl != EXT4_ACL_NOT_CACHED)
177 return acl;
178 name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; 151 name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
179 break; 152 break;
180
181 case ACL_TYPE_DEFAULT: 153 case ACL_TYPE_DEFAULT:
182 acl = ext4_iget_acl(inode, &ei->i_default_acl);
183 if (acl != EXT4_ACL_NOT_CACHED)
184 return acl;
185 name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT; 154 name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
186 break; 155 break;
187
188 default: 156 default:
189 return ERR_PTR(-EINVAL); 157 BUG();
190 } 158 }
191 retval = ext4_xattr_get(inode, name_index, "", NULL, 0); 159 retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
192 if (retval > 0) { 160 if (retval > 0) {
@@ -203,17 +171,9 @@ ext4_get_acl(struct inode *inode, int type)
203 acl = ERR_PTR(retval); 171 acl = ERR_PTR(retval);
204 kfree(value); 172 kfree(value);
205 173
206 if (!IS_ERR(acl)) { 174 if (!IS_ERR(acl))
207 switch (type) { 175 set_cached_acl(inode, type, acl);
208 case ACL_TYPE_ACCESS:
209 ext4_iset_acl(inode, &ei->i_acl, acl);
210 break;
211 176
212 case ACL_TYPE_DEFAULT:
213 ext4_iset_acl(inode, &ei->i_default_acl, acl);
214 break;
215 }
216 }
217 return acl; 177 return acl;
218} 178}
219 179
@@ -226,7 +186,6 @@ static int
226ext4_set_acl(handle_t *handle, struct inode *inode, int type, 186ext4_set_acl(handle_t *handle, struct inode *inode, int type,
227 struct posix_acl *acl) 187 struct posix_acl *acl)
228{ 188{
229 struct ext4_inode_info *ei = EXT4_I(inode);
230 int name_index; 189 int name_index;
231 void *value = NULL; 190 void *value = NULL;
232 size_t size = 0; 191 size_t size = 0;
@@ -271,17 +230,9 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
271 value, size, 0); 230 value, size, 0);
272 231
273 kfree(value); 232 kfree(value);
274 if (!error) { 233 if (!error)
275 switch (type) { 234 set_cached_acl(inode, type, acl);
276 case ACL_TYPE_ACCESS:
277 ext4_iset_acl(inode, &ei->i_acl, acl);
278 break;
279 235
280 case ACL_TYPE_DEFAULT:
281 ext4_iset_acl(inode, &ei->i_default_acl, acl);
282 break;
283 }
284 }
285 return error; 236 return error;
286} 237}
287 238
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index cb45257a246e..949789d2bba6 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -53,10 +53,6 @@ static inline int ext4_acl_count(size_t size)
53 53
54#ifdef CONFIG_EXT4_FS_POSIX_ACL 54#ifdef CONFIG_EXT4_FS_POSIX_ACL
55 55
56/* Value for inode->u.ext4_i.i_acl and inode->u.ext4_i.i_default_acl
57 if the ACL has not been cached */
58#define EXT4_ACL_NOT_CACHED ((void *)-1)
59
60/* acl.c */ 56/* acl.c */
61extern int ext4_permission(struct inode *, int); 57extern int ext4_permission(struct inode *, int);
62extern int ext4_acl_chmod(struct inode *); 58extern int ext4_acl_chmod(struct inode *);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 17b9998680e3..0ddf7e55abe1 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -595,10 +595,6 @@ struct ext4_inode_info {
595 */ 595 */
596 struct rw_semaphore xattr_sem; 596 struct rw_semaphore xattr_sem;
597#endif 597#endif
598#ifdef CONFIG_EXT4_FS_POSIX_ACL
599 struct posix_acl *i_acl;
600 struct posix_acl *i_default_acl;
601#endif
602 598
603 struct list_head i_orphan; /* unlinked but open inodes */ 599 struct list_head i_orphan; /* unlinked but open inodes */
604 600
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7c17ae275af4..60a26f3a6f8b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4453,10 +4453,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4453 return inode; 4453 return inode;
4454 4454
4455 ei = EXT4_I(inode); 4455 ei = EXT4_I(inode);
4456#ifdef CONFIG_EXT4_FS_POSIX_ACL
4457 ei->i_acl = EXT4_ACL_NOT_CACHED;
4458 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
4459#endif
4460 4456
4461 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4457 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4462 if (ret < 0) 4458 if (ret < 0)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8bb9e2d3e4b8..8f4f079e6b9a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -666,10 +666,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
666 if (!ei) 666 if (!ei)
667 return NULL; 667 return NULL;
668 668
669#ifdef CONFIG_EXT4_FS_POSIX_ACL
670 ei->i_acl = EXT4_ACL_NOT_CACHED;
671 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
672#endif
673 ei->vfs_inode.i_version = 1; 669 ei->vfs_inode.i_version = 1;
674 ei->vfs_inode.i_data.writeback_index = 0; 670 ei->vfs_inode.i_data.writeback_index = 0;
675 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); 671 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
@@ -735,18 +731,6 @@ static void destroy_inodecache(void)
735 731
736static void ext4_clear_inode(struct inode *inode) 732static void ext4_clear_inode(struct inode *inode)
737{ 733{
738#ifdef CONFIG_EXT4_FS_POSIX_ACL
739 if (EXT4_I(inode)->i_acl &&
740 EXT4_I(inode)->i_acl != EXT4_ACL_NOT_CACHED) {
741 posix_acl_release(EXT4_I(inode)->i_acl);
742 EXT4_I(inode)->i_acl = EXT4_ACL_NOT_CACHED;
743 }
744 if (EXT4_I(inode)->i_default_acl &&
745 EXT4_I(inode)->i_default_acl != EXT4_ACL_NOT_CACHED) {
746 posix_acl_release(EXT4_I(inode)->i_default_acl);
747 EXT4_I(inode)->i_default_acl = EXT4_ACL_NOT_CACHED;
748 }
749#endif
750 ext4_discard_preallocations(inode); 734 ext4_discard_preallocations(inode);
751 if (EXT4_JOURNAL(inode)) 735 if (EXT4_JOURNAL(inode))
752 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, 736 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index caf049146ca2..c54226be5294 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -278,7 +278,26 @@ int sb_has_dirty_inodes(struct super_block *sb)
278EXPORT_SYMBOL(sb_has_dirty_inodes); 278EXPORT_SYMBOL(sb_has_dirty_inodes);
279 279
280/* 280/*
281 * Write a single inode's dirty pages and inode data out to disk. 281 * Wait for writeback on an inode to complete.
282 */
283static void inode_wait_for_writeback(struct inode *inode)
284{
285 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
286 wait_queue_head_t *wqh;
287
288 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
289 do {
290 spin_unlock(&inode_lock);
291 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
292 spin_lock(&inode_lock);
293 } while (inode->i_state & I_SYNC);
294}
295
296/*
297 * Write out an inode's dirty pages. Called under inode_lock. Either the
298 * caller has ref on the inode (either via __iget or via syscall against an fd)
299 * or the inode has I_WILL_FREE set (via generic_forget_inode)
300 *
282 * If `wait' is set, wait on the writeout. 301 * If `wait' is set, wait on the writeout.
283 * 302 *
284 * The whole writeout design is quite complex and fragile. We want to avoid 303 * The whole writeout design is quite complex and fragile. We want to avoid
@@ -288,13 +307,38 @@ EXPORT_SYMBOL(sb_has_dirty_inodes);
288 * Called under inode_lock. 307 * Called under inode_lock.
289 */ 308 */
290static int 309static int
291__sync_single_inode(struct inode *inode, struct writeback_control *wbc) 310writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
292{ 311{
293 unsigned dirty;
294 struct address_space *mapping = inode->i_mapping; 312 struct address_space *mapping = inode->i_mapping;
295 int wait = wbc->sync_mode == WB_SYNC_ALL; 313 int wait = wbc->sync_mode == WB_SYNC_ALL;
314 unsigned dirty;
296 int ret; 315 int ret;
297 316
317 if (!atomic_read(&inode->i_count))
318 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
319 else
320 WARN_ON(inode->i_state & I_WILL_FREE);
321
322 if (inode->i_state & I_SYNC) {
323 /*
324 * If this inode is locked for writeback and we are not doing
325 * writeback-for-data-integrity, move it to s_more_io so that
326 * writeback can proceed with the other inodes on s_io.
327 *
328 * We'll have another go at writing back this inode when we
329 * completed a full scan of s_io.
330 */
331 if (!wait) {
332 requeue_io(inode);
333 return 0;
334 }
335
336 /*
337 * It's a data-integrity sync. We must wait.
338 */
339 inode_wait_for_writeback(inode);
340 }
341
298 BUG_ON(inode->i_state & I_SYNC); 342 BUG_ON(inode->i_state & I_SYNC);
299 343
300 /* Set I_SYNC, reset I_DIRTY */ 344 /* Set I_SYNC, reset I_DIRTY */
@@ -390,50 +434,6 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
390} 434}
391 435
392/* 436/*
393 * Write out an inode's dirty pages. Called under inode_lock. Either the
394 * caller has ref on the inode (either via __iget or via syscall against an fd)
395 * or the inode has I_WILL_FREE set (via generic_forget_inode)
396 */
397static int
398__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
399{
400 wait_queue_head_t *wqh;
401
402 if (!atomic_read(&inode->i_count))
403 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
404 else
405 WARN_ON(inode->i_state & I_WILL_FREE);
406
407 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
408 /*
409 * We're skipping this inode because it's locked, and we're not
410 * doing writeback-for-data-integrity. Move it to s_more_io so
411 * that writeback can proceed with the other inodes on s_io.
412 * We'll have another go at writing back this inode when we
413 * completed a full scan of s_io.
414 */
415 requeue_io(inode);
416 return 0;
417 }
418
419 /*
420 * It's a data-integrity sync. We must wait.
421 */
422 if (inode->i_state & I_SYNC) {
423 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
424
425 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
426 do {
427 spin_unlock(&inode_lock);
428 __wait_on_bit(wqh, &wq, inode_wait,
429 TASK_UNINTERRUPTIBLE);
430 spin_lock(&inode_lock);
431 } while (inode->i_state & I_SYNC);
432 }
433 return __sync_single_inode(inode, wbc);
434}
435
436/*
437 * Write out a superblock's list of dirty inodes. A wait will be performed 437 * Write out a superblock's list of dirty inodes. A wait will be performed
438 * upon no inodes, all inodes or the final one, depending upon sync_mode. 438 * upon no inodes, all inodes or the final one, depending upon sync_mode.
439 * 439 *
@@ -526,7 +526,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
526 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 526 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
527 __iget(inode); 527 __iget(inode);
528 pages_skipped = wbc->pages_skipped; 528 pages_skipped = wbc->pages_skipped;
529 __writeback_single_inode(inode, wbc); 529 writeback_single_inode(inode, wbc);
530 if (current_is_pdflush()) 530 if (current_is_pdflush())
531 writeback_release(bdi); 531 writeback_release(bdi);
532 if (wbc->pages_skipped != pages_skipped) { 532 if (wbc->pages_skipped != pages_skipped) {
@@ -708,7 +708,7 @@ int write_inode_now(struct inode *inode, int sync)
708 708
709 might_sleep(); 709 might_sleep();
710 spin_lock(&inode_lock); 710 spin_lock(&inode_lock);
711 ret = __writeback_single_inode(inode, &wbc); 711 ret = writeback_single_inode(inode, &wbc);
712 spin_unlock(&inode_lock); 712 spin_unlock(&inode_lock);
713 if (sync) 713 if (sync)
714 inode_sync_wait(inode); 714 inode_sync_wait(inode);
@@ -732,7 +732,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
732 int ret; 732 int ret;
733 733
734 spin_lock(&inode_lock); 734 spin_lock(&inode_lock);
735 ret = __writeback_single_inode(inode, wbc); 735 ret = writeback_single_inode(inode, wbc);
736 spin_unlock(&inode_lock); 736 spin_unlock(&inode_lock);
737 return ret; 737 return ret;
738} 738}
diff --git a/fs/inode.c b/fs/inode.c
index f643be565df8..901bad1e5f12 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -25,6 +25,7 @@
25#include <linux/fsnotify.h> 25#include <linux/fsnotify.h>
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/async.h> 27#include <linux/async.h>
28#include <linux/posix_acl.h>
28 29
29/* 30/*
30 * This is needed for the following functions: 31 * This is needed for the following functions:
@@ -189,6 +190,9 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
189 } 190 }
190 inode->i_private = NULL; 191 inode->i_private = NULL;
191 inode->i_mapping = mapping; 192 inode->i_mapping = mapping;
193#ifdef CONFIG_FS_POSIX_ACL
194 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
195#endif
192 196
193#ifdef CONFIG_FSNOTIFY 197#ifdef CONFIG_FSNOTIFY
194 inode->i_fsnotify_mask = 0; 198 inode->i_fsnotify_mask = 0;
@@ -227,6 +231,12 @@ void destroy_inode(struct inode *inode)
227 ima_inode_free(inode); 231 ima_inode_free(inode);
228 security_inode_free(inode); 232 security_inode_free(inode);
229 fsnotify_inode_delete(inode); 233 fsnotify_inode_delete(inode);
234#ifdef CONFIG_FS_POSIX_ACL
235 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
236 posix_acl_release(inode->i_acl);
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl);
239#endif
230 if (inode->i_sb->s_op->destroy_inode) 240 if (inode->i_sb->s_op->destroy_inode)
231 inode->i_sb->s_op->destroy_inode(inode); 241 inode->i_sb->s_op->destroy_inode(inode);
232 else 242 else
@@ -665,12 +675,17 @@ void unlock_new_inode(struct inode *inode)
665 if (inode->i_mode & S_IFDIR) { 675 if (inode->i_mode & S_IFDIR) {
666 struct file_system_type *type = inode->i_sb->s_type; 676 struct file_system_type *type = inode->i_sb->s_type;
667 677
668 /* 678 /* Set new key only if filesystem hasn't already changed it */
669 * ensure nobody is actually holding i_mutex 679 if (!lockdep_match_class(&inode->i_mutex,
670 */ 680 &type->i_mutex_key)) {
671 mutex_destroy(&inode->i_mutex); 681 /*
672 mutex_init(&inode->i_mutex); 682 * ensure nobody is actually holding i_mutex
673 lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key); 683 */
684 mutex_destroy(&inode->i_mutex);
685 mutex_init(&inode->i_mutex);
686 lockdep_set_class(&inode->i_mutex,
687 &type->i_mutex_dir_key);
688 }
674 } 689 }
675#endif 690#endif
676 /* 691 /*
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 001f8d3118f2..5612880fcbe7 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -15,6 +15,7 @@
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/writeback.h> 16#include <linux/writeback.h>
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/falloc.h>
18 19
19#include <asm/ioctls.h> 20#include <asm/ioctls.h>
20 21
@@ -403,6 +404,37 @@ EXPORT_SYMBOL(generic_block_fiemap);
403 404
404#endif /* CONFIG_BLOCK */ 405#endif /* CONFIG_BLOCK */
405 406
407/*
408 * This provides compatibility with legacy XFS pre-allocation ioctls
409 * which predate the fallocate syscall.
410 *
411 * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
412 * are used here, rest are ignored.
413 */
414int ioctl_preallocate(struct file *filp, void __user *argp)
415{
416 struct inode *inode = filp->f_path.dentry->d_inode;
417 struct space_resv sr;
418
419 if (copy_from_user(&sr, argp, sizeof(sr)))
420 return -EFAULT;
421
422 switch (sr.l_whence) {
423 case SEEK_SET:
424 break;
425 case SEEK_CUR:
426 sr.l_start += filp->f_pos;
427 break;
428 case SEEK_END:
429 sr.l_start += i_size_read(inode);
430 break;
431 default:
432 return -EINVAL;
433 }
434
435 return do_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
436}
437
406static int file_ioctl(struct file *filp, unsigned int cmd, 438static int file_ioctl(struct file *filp, unsigned int cmd,
407 unsigned long arg) 439 unsigned long arg)
408{ 440{
@@ -414,6 +446,9 @@ static int file_ioctl(struct file *filp, unsigned int cmd,
414 return ioctl_fibmap(filp, p); 446 return ioctl_fibmap(filp, p);
415 case FIONREAD: 447 case FIONREAD:
416 return put_user(i_size_read(inode) - filp->f_pos, p); 448 return put_user(i_size_read(inode) - filp->f_pos, p);
449 case FS_IOC_RESVSP:
450 case FS_IOC_RESVSP64:
451 return ioctl_preallocate(filp, p);
417 } 452 }
418 453
419 return vfs_ioctl(filp, cmd, arg); 454 return vfs_ioctl(filp, cmd, arg);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 043740dde20c..edd2ad6416d8 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -156,48 +156,25 @@ static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size)
156 return ERR_PTR(-EINVAL); 156 return ERR_PTR(-EINVAL);
157} 157}
158 158
159static struct posix_acl *jffs2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
160{
161 struct posix_acl *acl = JFFS2_ACL_NOT_CACHED;
162
163 spin_lock(&inode->i_lock);
164 if (*i_acl != JFFS2_ACL_NOT_CACHED)
165 acl = posix_acl_dup(*i_acl);
166 spin_unlock(&inode->i_lock);
167 return acl;
168}
169
170static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct posix_acl *acl)
171{
172 spin_lock(&inode->i_lock);
173 if (*i_acl != JFFS2_ACL_NOT_CACHED)
174 posix_acl_release(*i_acl);
175 *i_acl = posix_acl_dup(acl);
176 spin_unlock(&inode->i_lock);
177}
178
179static struct posix_acl *jffs2_get_acl(struct inode *inode, int type) 159static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
180{ 160{
181 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
182 struct posix_acl *acl; 161 struct posix_acl *acl;
183 char *value = NULL; 162 char *value = NULL;
184 int rc, xprefix; 163 int rc, xprefix;
185 164
165 acl = get_cached_acl(inode, type);
166 if (acl != ACL_NOT_CACHED)
167 return acl;
168
186 switch (type) { 169 switch (type) {
187 case ACL_TYPE_ACCESS: 170 case ACL_TYPE_ACCESS:
188 acl = jffs2_iget_acl(inode, &f->i_acl_access);
189 if (acl != JFFS2_ACL_NOT_CACHED)
190 return acl;
191 xprefix = JFFS2_XPREFIX_ACL_ACCESS; 171 xprefix = JFFS2_XPREFIX_ACL_ACCESS;
192 break; 172 break;
193 case ACL_TYPE_DEFAULT: 173 case ACL_TYPE_DEFAULT:
194 acl = jffs2_iget_acl(inode, &f->i_acl_default);
195 if (acl != JFFS2_ACL_NOT_CACHED)
196 return acl;
197 xprefix = JFFS2_XPREFIX_ACL_DEFAULT; 174 xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
198 break; 175 break;
199 default: 176 default:
200 return ERR_PTR(-EINVAL); 177 BUG();
201 } 178 }
202 rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0); 179 rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0);
203 if (rc > 0) { 180 if (rc > 0) {
@@ -215,16 +192,8 @@ static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
215 } 192 }
216 if (value) 193 if (value)
217 kfree(value); 194 kfree(value);
218 if (!IS_ERR(acl)) { 195 if (!IS_ERR(acl))
219 switch (type) { 196 set_cached_acl(inode, type, acl);
220 case ACL_TYPE_ACCESS:
221 jffs2_iset_acl(inode, &f->i_acl_access, acl);
222 break;
223 case ACL_TYPE_DEFAULT:
224 jffs2_iset_acl(inode, &f->i_acl_default, acl);
225 break;
226 }
227 }
228 return acl; 197 return acl;
229} 198}
230 199
@@ -249,7 +218,6 @@ static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *a
249 218
250static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) 219static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
251{ 220{
252 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
253 int rc, xprefix; 221 int rc, xprefix;
254 222
255 if (S_ISLNK(inode->i_mode)) 223 if (S_ISLNK(inode->i_mode))
@@ -285,16 +253,8 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
285 return -EINVAL; 253 return -EINVAL;
286 } 254 }
287 rc = __jffs2_set_acl(inode, xprefix, acl); 255 rc = __jffs2_set_acl(inode, xprefix, acl);
288 if (!rc) { 256 if (!rc)
289 switch(type) { 257 set_cached_acl(inode, type, acl);
290 case ACL_TYPE_ACCESS:
291 jffs2_iset_acl(inode, &f->i_acl_access, acl);
292 break;
293 case ACL_TYPE_DEFAULT:
294 jffs2_iset_acl(inode, &f->i_acl_default, acl);
295 break;
296 }
297 }
298 return rc; 258 return rc;
299} 259}
300 260
@@ -321,12 +281,11 @@ int jffs2_permission(struct inode *inode, int mask)
321 281
322int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode) 282int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
323{ 283{
324 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
325 struct posix_acl *acl, *clone; 284 struct posix_acl *acl, *clone;
326 int rc; 285 int rc;
327 286
328 f->i_acl_default = NULL; 287 inode->i_default_acl = NULL;
329 f->i_acl_access = NULL; 288 inode->i_acl = NULL;
330 289
331 if (S_ISLNK(*i_mode)) 290 if (S_ISLNK(*i_mode))
332 return 0; /* Symlink always has no-ACL */ 291 return 0; /* Symlink always has no-ACL */
@@ -339,7 +298,7 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
339 *i_mode &= ~current_umask(); 298 *i_mode &= ~current_umask();
340 } else { 299 } else {
341 if (S_ISDIR(*i_mode)) 300 if (S_ISDIR(*i_mode))
342 jffs2_iset_acl(inode, &f->i_acl_default, acl); 301 set_cached_acl(inode, ACL_TYPE_DEFAULT, acl);
343 302
344 clone = posix_acl_clone(acl, GFP_KERNEL); 303 clone = posix_acl_clone(acl, GFP_KERNEL);
345 if (!clone) 304 if (!clone)
@@ -350,7 +309,7 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
350 return rc; 309 return rc;
351 } 310 }
352 if (rc > 0) 311 if (rc > 0)
353 jffs2_iset_acl(inode, &f->i_acl_access, clone); 312 set_cached_acl(inode, ACL_TYPE_ACCESS, clone);
354 313
355 posix_acl_release(clone); 314 posix_acl_release(clone);
356 } 315 }
@@ -359,17 +318,16 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
359 318
360int jffs2_init_acl_post(struct inode *inode) 319int jffs2_init_acl_post(struct inode *inode)
361{ 320{
362 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
363 int rc; 321 int rc;
364 322
365 if (f->i_acl_default) { 323 if (inode->i_default_acl) {
366 rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, f->i_acl_default); 324 rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, inode->i_default_acl);
367 if (rc) 325 if (rc)
368 return rc; 326 return rc;
369 } 327 }
370 328
371 if (f->i_acl_access) { 329 if (inode->i_acl) {
372 rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, f->i_acl_access); 330 rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, inode->i_acl);
373 if (rc) 331 if (rc)
374 return rc; 332 return rc;
375 } 333 }
@@ -377,18 +335,6 @@ int jffs2_init_acl_post(struct inode *inode)
377 return 0; 335 return 0;
378} 336}
379 337
380void jffs2_clear_acl(struct jffs2_inode_info *f)
381{
382 if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) {
383 posix_acl_release(f->i_acl_access);
384 f->i_acl_access = JFFS2_ACL_NOT_CACHED;
385 }
386 if (f->i_acl_default && f->i_acl_default != JFFS2_ACL_NOT_CACHED) {
387 posix_acl_release(f->i_acl_default);
388 f->i_acl_default = JFFS2_ACL_NOT_CACHED;
389 }
390}
391
392int jffs2_acl_chmod(struct inode *inode) 338int jffs2_acl_chmod(struct inode *inode)
393{ 339{
394 struct posix_acl *acl, *clone; 340 struct posix_acl *acl, *clone;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 8ca058aed384..fc929f2a14f6 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -26,13 +26,10 @@ struct jffs2_acl_header {
26 26
27#ifdef CONFIG_JFFS2_FS_POSIX_ACL 27#ifdef CONFIG_JFFS2_FS_POSIX_ACL
28 28
29#define JFFS2_ACL_NOT_CACHED ((void *)-1)
30
31extern int jffs2_permission(struct inode *, int); 29extern int jffs2_permission(struct inode *, int);
32extern int jffs2_acl_chmod(struct inode *); 30extern int jffs2_acl_chmod(struct inode *);
33extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); 31extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
34extern int jffs2_init_acl_post(struct inode *); 32extern int jffs2_init_acl_post(struct inode *);
35extern void jffs2_clear_acl(struct jffs2_inode_info *);
36 33
37extern struct xattr_handler jffs2_acl_access_xattr_handler; 34extern struct xattr_handler jffs2_acl_access_xattr_handler;
38extern struct xattr_handler jffs2_acl_default_xattr_handler; 35extern struct xattr_handler jffs2_acl_default_xattr_handler;
@@ -43,6 +40,5 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
43#define jffs2_acl_chmod(inode) (0) 40#define jffs2_acl_chmod(inode) (0)
44#define jffs2_init_acl_pre(dir_i,inode,mode) (0) 41#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
45#define jffs2_init_acl_post(inode) (0) 42#define jffs2_init_acl_post(inode) (0)
46#define jffs2_clear_acl(f)
47 43
48#endif /* CONFIG_JFFS2_FS_POSIX_ACL */ 44#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
index 4c41db91eaa4..c6923da98263 100644
--- a/fs/jffs2/jffs2_fs_i.h
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -50,10 +50,6 @@ struct jffs2_inode_info {
50 uint16_t flags; 50 uint16_t flags;
51 uint8_t usercompr; 51 uint8_t usercompr;
52 struct inode vfs_inode; 52 struct inode vfs_inode;
53#ifdef CONFIG_JFFS2_FS_POSIX_ACL
54 struct posix_acl *i_acl_access;
55 struct posix_acl *i_acl_default;
56#endif
57}; 53};
58 54
59#endif /* _JFFS2_FS_I */ 55#endif /* _JFFS2_FS_I */
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 2228380c47b9..a7f03b7ebcb3 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -56,10 +56,6 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
56 f->target = NULL; 56 f->target = NULL;
57 f->flags = 0; 57 f->flags = 0;
58 f->usercompr = 0; 58 f->usercompr = 0;
59#ifdef CONFIG_JFFS2_FS_POSIX_ACL
60 f->i_acl_access = JFFS2_ACL_NOT_CACHED;
61 f->i_acl_default = JFFS2_ACL_NOT_CACHED;
62#endif
63} 59}
64 60
65 61
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 1fc1e92356ee..1a80301004b8 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1424,7 +1424,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1424 struct jffs2_full_dirent *fd, *fds; 1424 struct jffs2_full_dirent *fd, *fds;
1425 int deleted; 1425 int deleted;
1426 1426
1427 jffs2_clear_acl(f);
1428 jffs2_xattr_delete_inode(c, f->inocache); 1427 jffs2_xattr_delete_inode(c, f->inocache);
1429 mutex_lock(&f->sem); 1428 mutex_lock(&f->sem);
1430 deleted = f->inocache && !f->inocache->pino_nlink; 1429 deleted = f->inocache && !f->inocache->pino_nlink;
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 06ca1b8d2054..f272bf032e1e 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -31,27 +31,24 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
31{ 31{
32 struct posix_acl *acl; 32 struct posix_acl *acl;
33 char *ea_name; 33 char *ea_name;
34 struct jfs_inode_info *ji = JFS_IP(inode);
35 struct posix_acl **p_acl;
36 int size; 34 int size;
37 char *value = NULL; 35 char *value = NULL;
38 36
37 acl = get_cached_acl(inode, type);
38 if (acl != ACL_NOT_CACHED)
39 return acl;
40
39 switch(type) { 41 switch(type) {
40 case ACL_TYPE_ACCESS: 42 case ACL_TYPE_ACCESS:
41 ea_name = POSIX_ACL_XATTR_ACCESS; 43 ea_name = POSIX_ACL_XATTR_ACCESS;
42 p_acl = &ji->i_acl;
43 break; 44 break;
44 case ACL_TYPE_DEFAULT: 45 case ACL_TYPE_DEFAULT:
45 ea_name = POSIX_ACL_XATTR_DEFAULT; 46 ea_name = POSIX_ACL_XATTR_DEFAULT;
46 p_acl = &ji->i_default_acl;
47 break; 47 break;
48 default: 48 default:
49 return ERR_PTR(-EINVAL); 49 return ERR_PTR(-EINVAL);
50 } 50 }
51 51
52 if (*p_acl != JFS_ACL_NOT_CACHED)
53 return posix_acl_dup(*p_acl);
54
55 size = __jfs_getxattr(inode, ea_name, NULL, 0); 52 size = __jfs_getxattr(inode, ea_name, NULL, 0);
56 53
57 if (size > 0) { 54 if (size > 0) {
@@ -62,17 +59,18 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
62 } 59 }
63 60
64 if (size < 0) { 61 if (size < 0) {
65 if (size == -ENODATA) { 62 if (size == -ENODATA)
66 *p_acl = NULL;
67 acl = NULL; 63 acl = NULL;
68 } else 64 else
69 acl = ERR_PTR(size); 65 acl = ERR_PTR(size);
70 } else { 66 } else {
71 acl = posix_acl_from_xattr(value, size); 67 acl = posix_acl_from_xattr(value, size);
72 if (!IS_ERR(acl))
73 *p_acl = posix_acl_dup(acl);
74 } 68 }
75 kfree(value); 69 kfree(value);
70 if (!IS_ERR(acl)) {
71 set_cached_acl(inode, type, acl);
72 posix_acl_release(acl);
73 }
76 return acl; 74 return acl;
77} 75}
78 76
@@ -80,8 +78,6 @@ static int jfs_set_acl(tid_t tid, struct inode *inode, int type,
80 struct posix_acl *acl) 78 struct posix_acl *acl)
81{ 79{
82 char *ea_name; 80 char *ea_name;
83 struct jfs_inode_info *ji = JFS_IP(inode);
84 struct posix_acl **p_acl;
85 int rc; 81 int rc;
86 int size = 0; 82 int size = 0;
87 char *value = NULL; 83 char *value = NULL;
@@ -92,11 +88,9 @@ static int jfs_set_acl(tid_t tid, struct inode *inode, int type,
92 switch(type) { 88 switch(type) {
93 case ACL_TYPE_ACCESS: 89 case ACL_TYPE_ACCESS:
94 ea_name = POSIX_ACL_XATTR_ACCESS; 90 ea_name = POSIX_ACL_XATTR_ACCESS;
95 p_acl = &ji->i_acl;
96 break; 91 break;
97 case ACL_TYPE_DEFAULT: 92 case ACL_TYPE_DEFAULT:
98 ea_name = POSIX_ACL_XATTR_DEFAULT; 93 ea_name = POSIX_ACL_XATTR_DEFAULT;
99 p_acl = &ji->i_default_acl;
100 if (!S_ISDIR(inode->i_mode)) 94 if (!S_ISDIR(inode->i_mode))
101 return acl ? -EACCES : 0; 95 return acl ? -EACCES : 0;
102 break; 96 break;
@@ -116,27 +110,23 @@ static int jfs_set_acl(tid_t tid, struct inode *inode, int type,
116out: 110out:
117 kfree(value); 111 kfree(value);
118 112
119 if (!rc) { 113 if (!rc)
120 if (*p_acl && (*p_acl != JFS_ACL_NOT_CACHED)) 114 set_cached_acl(inode, type, acl);
121 posix_acl_release(*p_acl); 115
122 *p_acl = posix_acl_dup(acl);
123 }
124 return rc; 116 return rc;
125} 117}
126 118
127static int jfs_check_acl(struct inode *inode, int mask) 119static int jfs_check_acl(struct inode *inode, int mask)
128{ 120{
129 struct jfs_inode_info *ji = JFS_IP(inode); 121 if (inode->i_acl == ACL_NOT_CACHED) {
130
131 if (ji->i_acl == JFS_ACL_NOT_CACHED) {
132 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); 122 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
133 if (IS_ERR(acl)) 123 if (IS_ERR(acl))
134 return PTR_ERR(acl); 124 return PTR_ERR(acl);
135 posix_acl_release(acl); 125 posix_acl_release(acl);
136 } 126 }
137 127
138 if (ji->i_acl) 128 if (inode->i_acl)
139 return posix_acl_permission(inode, ji->i_acl, mask); 129 return posix_acl_permission(inode, inode->i_acl, mask);
140 return -EAGAIN; 130 return -EAGAIN;
141} 131}
142 132
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 439901d205fe..1439f119ec83 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -74,10 +74,6 @@ struct jfs_inode_info {
74 /* xattr_sem allows us to access the xattrs without taking i_mutex */ 74 /* xattr_sem allows us to access the xattrs without taking i_mutex */
75 struct rw_semaphore xattr_sem; 75 struct rw_semaphore xattr_sem;
76 lid_t xtlid; /* lid of xtree lock on directory */ 76 lid_t xtlid; /* lid of xtree lock on directory */
77#ifdef CONFIG_JFS_POSIX_ACL
78 struct posix_acl *i_acl;
79 struct posix_acl *i_default_acl;
80#endif
81 union { 77 union {
82 struct { 78 struct {
83 xtpage_t _xtroot; /* 288: xtree root */ 79 xtpage_t _xtroot; /* 288: xtree root */
@@ -107,8 +103,6 @@ struct jfs_inode_info {
107#define i_inline u.link._inline 103#define i_inline u.link._inline
108#define i_inline_ea u.link._inline_ea 104#define i_inline_ea u.link._inline_ea
109 105
110#define JFS_ACL_NOT_CACHED ((void *)-1)
111
112#define IREAD_LOCK(ip, subclass) \ 106#define IREAD_LOCK(ip, subclass) \
113 down_read_nested(&JFS_IP(ip)->rdwrlock, subclass) 107 down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
114#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock) 108#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 09b1b6ee2186..37e6dcda8fc8 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -128,18 +128,6 @@ static void jfs_destroy_inode(struct inode *inode)
128 ji->active_ag = -1; 128 ji->active_ag = -1;
129 } 129 }
130 spin_unlock_irq(&ji->ag_lock); 130 spin_unlock_irq(&ji->ag_lock);
131
132#ifdef CONFIG_JFS_POSIX_ACL
133 if (ji->i_acl != JFS_ACL_NOT_CACHED) {
134 posix_acl_release(ji->i_acl);
135 ji->i_acl = JFS_ACL_NOT_CACHED;
136 }
137 if (ji->i_default_acl != JFS_ACL_NOT_CACHED) {
138 posix_acl_release(ji->i_default_acl);
139 ji->i_default_acl = JFS_ACL_NOT_CACHED;
140 }
141#endif
142
143 kmem_cache_free(jfs_inode_cachep, ji); 131 kmem_cache_free(jfs_inode_cachep, ji);
144} 132}
145 133
@@ -798,10 +786,6 @@ static void init_once(void *foo)
798 init_rwsem(&jfs_ip->xattr_sem); 786 init_rwsem(&jfs_ip->xattr_sem);
799 spin_lock_init(&jfs_ip->ag_lock); 787 spin_lock_init(&jfs_ip->ag_lock);
800 jfs_ip->active_ag = -1; 788 jfs_ip->active_ag = -1;
801#ifdef CONFIG_JFS_POSIX_ACL
802 jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
803 jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
804#endif
805 inode_init_once(&jfs_ip->vfs_inode); 789 inode_init_once(&jfs_ip->vfs_inode);
806} 790}
807 791
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 61dfa8173ebc..fad364548bc9 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -727,10 +727,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
727 /* 727 /*
728 * We're changing the ACL. Get rid of the cached one 728 * We're changing the ACL. Get rid of the cached one
729 */ 729 */
730 acl =JFS_IP(inode)->i_acl; 730 forget_cached_acl(inode, ACL_TYPE_ACCESS);
731 if (acl != JFS_ACL_NOT_CACHED)
732 posix_acl_release(acl);
733 JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED;
734 731
735 return 0; 732 return 0;
736 } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) { 733 } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) {
@@ -746,10 +743,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
746 /* 743 /*
747 * We're changing the default ACL. Get rid of the cached one 744 * We're changing the default ACL. Get rid of the cached one
748 */ 745 */
749 acl =JFS_IP(inode)->i_default_acl; 746 forget_cached_acl(inode, ACL_TYPE_DEFAULT);
750 if (acl && (acl != JFS_ACL_NOT_CACHED))
751 posix_acl_release(acl);
752 JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED;
753 747
754 return 0; 748 return 0;
755 } 749 }
diff --git a/fs/namei.c b/fs/namei.c
index 527119afb6a5..5b961eb71cbf 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1698,8 +1698,11 @@ struct file *do_filp_open(int dfd, const char *pathname,
1698 if (error) 1698 if (error)
1699 return ERR_PTR(error); 1699 return ERR_PTR(error);
1700 error = path_walk(pathname, &nd); 1700 error = path_walk(pathname, &nd);
1701 if (error) 1701 if (error) {
1702 if (nd.root.mnt)
1703 path_put(&nd.root);
1702 return ERR_PTR(error); 1704 return ERR_PTR(error);
1705 }
1703 if (unlikely(!audit_dummy_context())) 1706 if (unlikely(!audit_dummy_context()))
1704 audit_inode(pathname, nd.path.dentry); 1707 audit_inode(pathname, nd.path.dentry);
1705 1708
@@ -1759,6 +1762,8 @@ do_last:
1759 } 1762 }
1760 filp = nameidata_to_filp(&nd, open_flag); 1763 filp = nameidata_to_filp(&nd, open_flag);
1761 mnt_drop_write(nd.path.mnt); 1764 mnt_drop_write(nd.path.mnt);
1765 if (nd.root.mnt)
1766 path_put(&nd.root);
1762 return filp; 1767 return filp;
1763 } 1768 }
1764 1769
@@ -1819,6 +1824,8 @@ ok:
1819 */ 1824 */
1820 if (will_write) 1825 if (will_write)
1821 mnt_drop_write(nd.path.mnt); 1826 mnt_drop_write(nd.path.mnt);
1827 if (nd.root.mnt)
1828 path_put(&nd.root);
1822 return filp; 1829 return filp;
1823 1830
1824exit_mutex_unlock: 1831exit_mutex_unlock:
@@ -1859,6 +1866,8 @@ do_link:
1859 * with "intent.open". 1866 * with "intent.open".
1860 */ 1867 */
1861 release_open_intent(&nd); 1868 release_open_intent(&nd);
1869 if (nd.root.mnt)
1870 path_put(&nd.root);
1862 return ERR_PTR(error); 1871 return ERR_PTR(error);
1863 } 1872 }
1864 nd.flags &= ~LOOKUP_PARENT; 1873 nd.flags &= ~LOOKUP_PARENT;
diff --git a/fs/namespace.c b/fs/namespace.c
index a7bea8c8bd46..3dc283fd4716 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -42,6 +42,8 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
42static int event; 42static int event;
43static DEFINE_IDA(mnt_id_ida); 43static DEFINE_IDA(mnt_id_ida);
44static DEFINE_IDA(mnt_group_ida); 44static DEFINE_IDA(mnt_group_ida);
45static int mnt_id_start = 0;
46static int mnt_group_start = 1;
45 47
46static struct list_head *mount_hashtable __read_mostly; 48static struct list_head *mount_hashtable __read_mostly;
47static struct kmem_cache *mnt_cache __read_mostly; 49static struct kmem_cache *mnt_cache __read_mostly;
@@ -69,7 +71,9 @@ static int mnt_alloc_id(struct vfsmount *mnt)
69retry: 71retry:
70 ida_pre_get(&mnt_id_ida, GFP_KERNEL); 72 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
71 spin_lock(&vfsmount_lock); 73 spin_lock(&vfsmount_lock);
72 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id); 74 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
75 if (!res)
76 mnt_id_start = mnt->mnt_id + 1;
73 spin_unlock(&vfsmount_lock); 77 spin_unlock(&vfsmount_lock);
74 if (res == -EAGAIN) 78 if (res == -EAGAIN)
75 goto retry; 79 goto retry;
@@ -79,8 +83,11 @@ retry:
79 83
80static void mnt_free_id(struct vfsmount *mnt) 84static void mnt_free_id(struct vfsmount *mnt)
81{ 85{
86 int id = mnt->mnt_id;
82 spin_lock(&vfsmount_lock); 87 spin_lock(&vfsmount_lock);
83 ida_remove(&mnt_id_ida, mnt->mnt_id); 88 ida_remove(&mnt_id_ida, id);
89 if (mnt_id_start > id)
90 mnt_id_start = id;
84 spin_unlock(&vfsmount_lock); 91 spin_unlock(&vfsmount_lock);
85} 92}
86 93
@@ -91,10 +98,18 @@ static void mnt_free_id(struct vfsmount *mnt)
91 */ 98 */
92static int mnt_alloc_group_id(struct vfsmount *mnt) 99static int mnt_alloc_group_id(struct vfsmount *mnt)
93{ 100{
101 int res;
102
94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) 103 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
95 return -ENOMEM; 104 return -ENOMEM;
96 105
97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id); 106 res = ida_get_new_above(&mnt_group_ida,
107 mnt_group_start,
108 &mnt->mnt_group_id);
109 if (!res)
110 mnt_group_start = mnt->mnt_group_id + 1;
111
112 return res;
98} 113}
99 114
100/* 115/*
@@ -102,7 +117,10 @@ static int mnt_alloc_group_id(struct vfsmount *mnt)
102 */ 117 */
103void mnt_release_group_id(struct vfsmount *mnt) 118void mnt_release_group_id(struct vfsmount *mnt)
104{ 119{
105 ida_remove(&mnt_group_ida, mnt->mnt_group_id); 120 int id = mnt->mnt_group_id;
121 ida_remove(&mnt_group_ida, id);
122 if (mnt_group_start > id)
123 mnt_group_start = id;
106 mnt->mnt_group_id = 0; 124 mnt->mnt_group_id = 0;
107} 125}
108 126
@@ -2222,16 +2240,9 @@ static void __init init_mount_tree(void)
2222 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); 2240 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2223 if (IS_ERR(mnt)) 2241 if (IS_ERR(mnt))
2224 panic("Can't create rootfs"); 2242 panic("Can't create rootfs");
2225 ns = kmalloc(sizeof(*ns), GFP_KERNEL); 2243 ns = create_mnt_ns(mnt);
2226 if (!ns) 2244 if (IS_ERR(ns))
2227 panic("Can't allocate initial namespace"); 2245 panic("Can't allocate initial namespace");
2228 atomic_set(&ns->count, 1);
2229 INIT_LIST_HEAD(&ns->list);
2230 init_waitqueue_head(&ns->poll);
2231 ns->event = 0;
2232 list_add(&mnt->mnt_list, &ns->list);
2233 ns->root = mnt;
2234 mnt->mnt_ns = ns;
2235 2246
2236 init_task.nsproxy->mnt_ns = ns; 2247 init_task.nsproxy->mnt_ns = ns;
2237 get_mnt_ns(ns); 2248 get_mnt_ns(ns);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2696d6b513b7..fe9d8f2a13f8 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -309,10 +309,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
309 /* ii->i_file_acl = 0; */ 309 /* ii->i_file_acl = 0; */
310 /* ii->i_dir_acl = 0; */ 310 /* ii->i_dir_acl = 0; */
311 ii->i_dir_start_lookup = 0; 311 ii->i_dir_start_lookup = 0;
312#ifdef CONFIG_NILFS_FS_POSIX_ACL
313 ii->i_acl = NULL;
314 ii->i_default_acl = NULL;
315#endif
316 ii->i_cno = 0; 312 ii->i_cno = 0;
317 nilfs_set_inode_flags(inode); 313 nilfs_set_inode_flags(inode);
318 spin_lock(&sbi->s_next_gen_lock); 314 spin_lock(&sbi->s_next_gen_lock);
@@ -434,10 +430,6 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
434 430
435 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 431 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
436 432
437#ifdef CONFIG_NILFS_FS_POSIX_ACL
438 ii->i_acl = NILFS_ACL_NOT_CACHED;
439 ii->i_default_acl = NILFS_ACL_NOT_CACHED;
440#endif
441 if (nilfs_read_inode_common(inode, raw_inode)) 433 if (nilfs_read_inode_common(inode, raw_inode))
442 goto failed_unmap; 434 goto failed_unmap;
443 435
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index edf6a59d9f2a..724c63766e82 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -58,10 +58,6 @@ struct nilfs_inode_info {
58 */ 58 */
59 struct rw_semaphore xattr_sem; 59 struct rw_semaphore xattr_sem;
60#endif 60#endif
61#ifdef CONFIG_NILFS_POSIX_ACL
62 struct posix_acl *i_acl;
63 struct posix_acl *i_default_acl;
64#endif
65 struct buffer_head *i_bh; /* i_bh contains a new or dirty 61 struct buffer_head *i_bh; /* i_bh contains a new or dirty
66 disk inode */ 62 disk inode */
67 struct inode vfs_inode; 63 struct inode vfs_inode;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index ab785f85aa50..8e2ec43b18f4 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -189,16 +189,6 @@ static void nilfs_clear_inode(struct inode *inode)
189{ 189{
190 struct nilfs_inode_info *ii = NILFS_I(inode); 190 struct nilfs_inode_info *ii = NILFS_I(inode);
191 191
192#ifdef CONFIG_NILFS_POSIX_ACL
193 if (ii->i_acl && ii->i_acl != NILFS_ACL_NOT_CACHED) {
194 posix_acl_release(ii->i_acl);
195 ii->i_acl = NILFS_ACL_NOT_CACHED;
196 }
197 if (ii->i_default_acl && ii->i_default_acl != NILFS_ACL_NOT_CACHED) {
198 posix_acl_release(ii->i_default_acl);
199 ii->i_default_acl = NILFS_ACL_NOT_CACHED;
200 }
201#endif
202 /* 192 /*
203 * Free resources allocated in nilfs_read_inode(), here. 193 * Free resources allocated in nilfs_read_inode(), here.
204 */ 194 */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 6cdeaa76f27f..110bb57c46ab 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -92,6 +92,9 @@ struct ocfs2_unblock_ctl {
92 enum ocfs2_unblock_action unblock_action; 92 enum ocfs2_unblock_action unblock_action;
93}; 93};
94 94
95/* Lockdep class keys */
96struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
97
95static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, 98static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
96 int new_level); 99 int new_level);
97static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres); 100static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
@@ -317,9 +320,16 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
317 u32 dlm_flags); 320 u32 dlm_flags);
318static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, 321static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
319 int wanted); 322 int wanted);
320static void ocfs2_cluster_unlock(struct ocfs2_super *osb, 323static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
321 struct ocfs2_lock_res *lockres, 324 struct ocfs2_lock_res *lockres,
322 int level); 325 int level, unsigned long caller_ip);
326static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
327 struct ocfs2_lock_res *lockres,
328 int level)
329{
330 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
331}
332
323static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres); 333static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
324static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres); 334static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
325static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres); 335static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
@@ -489,6 +499,13 @@ static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
489 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug); 499 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
490 500
491 ocfs2_init_lock_stats(res); 501 ocfs2_init_lock_stats(res);
502#ifdef CONFIG_DEBUG_LOCK_ALLOC
503 if (type != OCFS2_LOCK_TYPE_OPEN)
504 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
505 &lockdep_keys[type], 0);
506 else
507 res->l_lockdep_map.key = NULL;
508#endif
492} 509}
493 510
494void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) 511void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
@@ -644,14 +661,10 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
644static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res, 661static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
645 struct ocfs2_super *osb) 662 struct ocfs2_super *osb)
646{ 663{
647 struct ocfs2_orphan_scan_lvb *lvb;
648
649 ocfs2_lock_res_init_once(res); 664 ocfs2_lock_res_init_once(res);
650 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name); 665 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
651 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN, 666 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
652 &ocfs2_orphan_scan_lops, osb); 667 &ocfs2_orphan_scan_lops, osb);
653 lvb = ocfs2_dlm_lvb(&res->l_lksb);
654 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
655} 668}
656 669
657void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, 670void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
@@ -1256,11 +1269,13 @@ static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1256 return ret; 1269 return ret;
1257} 1270}
1258 1271
1259static int ocfs2_cluster_lock(struct ocfs2_super *osb, 1272static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1260 struct ocfs2_lock_res *lockres, 1273 struct ocfs2_lock_res *lockres,
1261 int level, 1274 int level,
1262 u32 lkm_flags, 1275 u32 lkm_flags,
1263 int arg_flags) 1276 int arg_flags,
1277 int l_subclass,
1278 unsigned long caller_ip)
1264{ 1279{
1265 struct ocfs2_mask_waiter mw; 1280 struct ocfs2_mask_waiter mw;
1266 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR); 1281 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
@@ -1403,13 +1418,37 @@ out:
1403 } 1418 }
1404 ocfs2_update_lock_stats(lockres, level, &mw, ret); 1419 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1405 1420
1421#ifdef CONFIG_DEBUG_LOCK_ALLOC
1422 if (!ret && lockres->l_lockdep_map.key != NULL) {
1423 if (level == DLM_LOCK_PR)
1424 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1425 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1426 caller_ip);
1427 else
1428 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1429 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1430 caller_ip);
1431 }
1432#endif
1406 mlog_exit(ret); 1433 mlog_exit(ret);
1407 return ret; 1434 return ret;
1408} 1435}
1409 1436
1410static void ocfs2_cluster_unlock(struct ocfs2_super *osb, 1437static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1411 struct ocfs2_lock_res *lockres, 1438 struct ocfs2_lock_res *lockres,
1412 int level) 1439 int level,
1440 u32 lkm_flags,
1441 int arg_flags)
1442{
1443 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1444 0, _RET_IP_);
1445}
1446
1447
1448static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1449 struct ocfs2_lock_res *lockres,
1450 int level,
1451 unsigned long caller_ip)
1413{ 1452{
1414 unsigned long flags; 1453 unsigned long flags;
1415 1454
@@ -1418,6 +1457,10 @@ static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1418 ocfs2_dec_holders(lockres, level); 1457 ocfs2_dec_holders(lockres, level);
1419 ocfs2_downconvert_on_unlock(osb, lockres); 1458 ocfs2_downconvert_on_unlock(osb, lockres);
1420 spin_unlock_irqrestore(&lockres->l_lock, flags); 1459 spin_unlock_irqrestore(&lockres->l_lock, flags);
1460#ifdef CONFIG_DEBUG_LOCK_ALLOC
1461 if (lockres->l_lockdep_map.key != NULL)
1462 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1463#endif
1421 mlog_exit_void(); 1464 mlog_exit_void();
1422} 1465}
1423 1466
@@ -1989,7 +2032,8 @@ static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1989{ 2032{
1990 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2033 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
1991 2034
1992 if (lvb->lvb_version == OCFS2_LVB_VERSION 2035 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2036 && lvb->lvb_version == OCFS2_LVB_VERSION
1993 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation) 2037 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1994 return 1; 2038 return 1;
1995 return 0; 2039 return 0;
@@ -2162,10 +2206,11 @@ static int ocfs2_assign_bh(struct inode *inode,
2162 * returns < 0 error if the callback will never be called, otherwise 2206 * returns < 0 error if the callback will never be called, otherwise
2163 * the result of the lock will be communicated via the callback. 2207 * the result of the lock will be communicated via the callback.
2164 */ 2208 */
2165int ocfs2_inode_lock_full(struct inode *inode, 2209int ocfs2_inode_lock_full_nested(struct inode *inode,
2166 struct buffer_head **ret_bh, 2210 struct buffer_head **ret_bh,
2167 int ex, 2211 int ex,
2168 int arg_flags) 2212 int arg_flags,
2213 int subclass)
2169{ 2214{
2170 int status, level, acquired; 2215 int status, level, acquired;
2171 u32 dlm_flags; 2216 u32 dlm_flags;
@@ -2203,7 +2248,8 @@ int ocfs2_inode_lock_full(struct inode *inode,
2203 if (arg_flags & OCFS2_META_LOCK_NOQUEUE) 2248 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2204 dlm_flags |= DLM_LKF_NOQUEUE; 2249 dlm_flags |= DLM_LKF_NOQUEUE;
2205 2250
2206 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags); 2251 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2252 arg_flags, subclass, _RET_IP_);
2207 if (status < 0) { 2253 if (status < 0) {
2208 if (status != -EAGAIN && status != -EIOCBRETRY) 2254 if (status != -EAGAIN && status != -EIOCBRETRY)
2209 mlog_errno(status); 2255 mlog_errno(status);
@@ -2369,35 +2415,45 @@ void ocfs2_inode_unlock(struct inode *inode,
2369 mlog_exit_void(); 2415 mlog_exit_void();
2370} 2416}
2371 2417
2372int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno, int ex) 2418int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2373{ 2419{
2374 struct ocfs2_lock_res *lockres; 2420 struct ocfs2_lock_res *lockres;
2375 struct ocfs2_orphan_scan_lvb *lvb; 2421 struct ocfs2_orphan_scan_lvb *lvb;
2376 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2377 int status = 0; 2422 int status = 0;
2378 2423
2424 if (ocfs2_is_hard_readonly(osb))
2425 return -EROFS;
2426
2427 if (ocfs2_mount_local(osb))
2428 return 0;
2429
2379 lockres = &osb->osb_orphan_scan.os_lockres; 2430 lockres = &osb->osb_orphan_scan.os_lockres;
2380 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); 2431 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2381 if (status < 0) 2432 if (status < 0)
2382 return status; 2433 return status;
2383 2434
2384 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2435 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2385 if (lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION) 2436 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2437 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2386 *seqno = be32_to_cpu(lvb->lvb_os_seqno); 2438 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2439 else
2440 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2441
2387 return status; 2442 return status;
2388} 2443}
2389 2444
2390void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno, int ex) 2445void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2391{ 2446{
2392 struct ocfs2_lock_res *lockres; 2447 struct ocfs2_lock_res *lockres;
2393 struct ocfs2_orphan_scan_lvb *lvb; 2448 struct ocfs2_orphan_scan_lvb *lvb;
2394 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2395 2449
2396 lockres = &osb->osb_orphan_scan.os_lockres; 2450 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2397 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2451 lockres = &osb->osb_orphan_scan.os_lockres;
2398 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION; 2452 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2399 lvb->lvb_os_seqno = cpu_to_be32(seqno); 2453 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2400 ocfs2_cluster_unlock(osb, lockres, level); 2454 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2455 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2456 }
2401} 2457}
2402 2458
2403int ocfs2_super_lock(struct ocfs2_super *osb, 2459int ocfs2_super_lock(struct ocfs2_super *osb,
@@ -3627,7 +3683,8 @@ static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3627 struct ocfs2_global_disk_dqinfo *gdinfo; 3683 struct ocfs2_global_disk_dqinfo *gdinfo;
3628 int status = 0; 3684 int status = 0;
3629 3685
3630 if (lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) { 3686 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3687 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3631 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace); 3688 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3632 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace); 3689 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3633 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms); 3690 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 31b90d7b8f51..7553836931de 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -78,6 +78,14 @@ struct ocfs2_orphan_scan_lvb {
78/* don't block waiting for the downconvert thread, instead return -EAGAIN */ 78/* don't block waiting for the downconvert thread, instead return -EAGAIN */
79#define OCFS2_LOCK_NONBLOCK (0x04) 79#define OCFS2_LOCK_NONBLOCK (0x04)
80 80
81/* Locking subclasses of inode cluster lock */
82enum {
83 OI_LS_NORMAL = 0,
84 OI_LS_PARENT,
85 OI_LS_RENAME1,
86 OI_LS_RENAME2,
87};
88
81int ocfs2_dlm_init(struct ocfs2_super *osb); 89int ocfs2_dlm_init(struct ocfs2_super *osb);
82void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending); 90void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending);
83void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res); 91void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res);
@@ -104,25 +112,31 @@ void ocfs2_open_unlock(struct inode *inode);
104int ocfs2_inode_lock_atime(struct inode *inode, 112int ocfs2_inode_lock_atime(struct inode *inode,
105 struct vfsmount *vfsmnt, 113 struct vfsmount *vfsmnt,
106 int *level); 114 int *level);
107int ocfs2_inode_lock_full(struct inode *inode, 115int ocfs2_inode_lock_full_nested(struct inode *inode,
108 struct buffer_head **ret_bh, 116 struct buffer_head **ret_bh,
109 int ex, 117 int ex,
110 int arg_flags); 118 int arg_flags,
119 int subclass);
111int ocfs2_inode_lock_with_page(struct inode *inode, 120int ocfs2_inode_lock_with_page(struct inode *inode,
112 struct buffer_head **ret_bh, 121 struct buffer_head **ret_bh,
113 int ex, 122 int ex,
114 struct page *page); 123 struct page *page);
124/* Variants without special locking class or flags */
125#define ocfs2_inode_lock_full(i, r, e, f)\
126 ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
127#define ocfs2_inode_lock_nested(i, b, e, s)\
128 ocfs2_inode_lock_full_nested(i, b, e, 0, s)
115/* 99% of the time we don't want to supply any additional flags -- 129/* 99% of the time we don't want to supply any additional flags --
116 * those are for very specific cases only. */ 130 * those are for very specific cases only. */
117#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full(i, b, e, 0) 131#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full_nested(i, b, e, 0, OI_LS_NORMAL)
118void ocfs2_inode_unlock(struct inode *inode, 132void ocfs2_inode_unlock(struct inode *inode,
119 int ex); 133 int ex);
120int ocfs2_super_lock(struct ocfs2_super *osb, 134int ocfs2_super_lock(struct ocfs2_super *osb,
121 int ex); 135 int ex);
122void ocfs2_super_unlock(struct ocfs2_super *osb, 136void ocfs2_super_unlock(struct ocfs2_super *osb,
123 int ex); 137 int ex);
124int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno, int ex); 138int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno);
125void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno, int ex); 139void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno);
126 140
127int ocfs2_rename_lock(struct ocfs2_super *osb); 141int ocfs2_rename_lock(struct ocfs2_super *osb);
128void ocfs2_rename_unlock(struct ocfs2_super *osb); 142void ocfs2_rename_unlock(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 07267e0da909..62442e413a00 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2026,7 +2026,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
2026 size_t len, 2026 size_t len,
2027 unsigned int flags) 2027 unsigned int flags)
2028{ 2028{
2029 int ret = 0; 2029 int ret = 0, lock_level = 0;
2030 struct inode *inode = in->f_path.dentry->d_inode; 2030 struct inode *inode = in->f_path.dentry->d_inode;
2031 2031
2032 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, 2032 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
@@ -2037,12 +2037,12 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
2037 /* 2037 /*
2038 * See the comment in ocfs2_file_aio_read() 2038 * See the comment in ocfs2_file_aio_read()
2039 */ 2039 */
2040 ret = ocfs2_inode_lock(inode, NULL, 0); 2040 ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
2041 if (ret < 0) { 2041 if (ret < 0) {
2042 mlog_errno(ret); 2042 mlog_errno(ret);
2043 goto bail; 2043 goto bail;
2044 } 2044 }
2045 ocfs2_inode_unlock(inode, 0); 2045 ocfs2_inode_unlock(inode, lock_level);
2046 2046
2047 ret = generic_file_splice_read(in, ppos, pipe, len, flags); 2047 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2048 2048
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 10e1fa87396a..4dc8890ba316 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -215,6 +215,8 @@ bail:
215static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) 215static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
216{ 216{
217 struct ocfs2_find_inode_args *args = opaque; 217 struct ocfs2_find_inode_args *args = opaque;
218 static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
219 ocfs2_file_ip_alloc_sem_key;
218 220
219 mlog_entry("inode = %p, opaque = %p\n", inode, opaque); 221 mlog_entry("inode = %p, opaque = %p\n", inode, opaque);
220 222
@@ -223,6 +225,15 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
223 if (args->fi_sysfile_type != 0) 225 if (args->fi_sysfile_type != 0)
224 lockdep_set_class(&inode->i_mutex, 226 lockdep_set_class(&inode->i_mutex,
225 &ocfs2_sysfile_lock_key[args->fi_sysfile_type]); 227 &ocfs2_sysfile_lock_key[args->fi_sysfile_type]);
228 if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE ||
229 args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE ||
230 args->fi_sysfile_type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
231 args->fi_sysfile_type == LOCAL_GROUP_QUOTA_SYSTEM_INODE)
232 lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
233 &ocfs2_quota_ip_alloc_sem_key);
234 else
235 lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
236 &ocfs2_file_ip_alloc_sem_key);
226 237
227 mlog_exit(0); 238 mlog_exit(0);
228 return 0; 239 return 0;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 4a3b9e6b31ad..f033760ecbea 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1880,13 +1880,20 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1880 1880
1881 os = &osb->osb_orphan_scan; 1881 os = &osb->osb_orphan_scan;
1882 1882
1883 status = ocfs2_orphan_scan_lock(osb, &seqno, DLM_LOCK_EX); 1883 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1884 goto out;
1885
1886 status = ocfs2_orphan_scan_lock(osb, &seqno);
1884 if (status < 0) { 1887 if (status < 0) {
1885 if (status != -EAGAIN) 1888 if (status != -EAGAIN)
1886 mlog_errno(status); 1889 mlog_errno(status);
1887 goto out; 1890 goto out;
1888 } 1891 }
1889 1892
1893 /* Do no queue the tasks if the volume is being umounted */
1894 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1895 goto unlock;
1896
1890 if (os->os_seqno != seqno) { 1897 if (os->os_seqno != seqno) {
1891 os->os_seqno = seqno; 1898 os->os_seqno = seqno;
1892 goto unlock; 1899 goto unlock;
@@ -1903,7 +1910,7 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1903 os->os_count++; 1910 os->os_count++;
1904 os->os_scantime = CURRENT_TIME; 1911 os->os_scantime = CURRENT_TIME;
1905unlock: 1912unlock:
1906 ocfs2_orphan_scan_unlock(osb, seqno, DLM_LOCK_EX); 1913 ocfs2_orphan_scan_unlock(osb, seqno);
1907out: 1914out:
1908 return; 1915 return;
1909} 1916}
@@ -1920,8 +1927,9 @@ void ocfs2_orphan_scan_work(struct work_struct *work)
1920 1927
1921 mutex_lock(&os->os_lock); 1928 mutex_lock(&os->os_lock);
1922 ocfs2_queue_orphan_scan(osb); 1929 ocfs2_queue_orphan_scan(osb);
1923 schedule_delayed_work(&os->os_orphan_scan_work, 1930 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
1924 ocfs2_orphan_scan_timeout()); 1931 schedule_delayed_work(&os->os_orphan_scan_work,
1932 ocfs2_orphan_scan_timeout());
1925 mutex_unlock(&os->os_lock); 1933 mutex_unlock(&os->os_lock);
1926} 1934}
1927 1935
@@ -1930,26 +1938,33 @@ void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
1930 struct ocfs2_orphan_scan *os; 1938 struct ocfs2_orphan_scan *os;
1931 1939
1932 os = &osb->osb_orphan_scan; 1940 os = &osb->osb_orphan_scan;
1933 mutex_lock(&os->os_lock); 1941 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
1934 cancel_delayed_work(&os->os_orphan_scan_work); 1942 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
1935 mutex_unlock(&os->os_lock); 1943 mutex_lock(&os->os_lock);
1944 cancel_delayed_work(&os->os_orphan_scan_work);
1945 mutex_unlock(&os->os_lock);
1946 }
1936} 1947}
1937 1948
1938int ocfs2_orphan_scan_init(struct ocfs2_super *osb) 1949void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
1939{ 1950{
1940 struct ocfs2_orphan_scan *os; 1951 struct ocfs2_orphan_scan *os;
1941 1952
1942 os = &osb->osb_orphan_scan; 1953 os = &osb->osb_orphan_scan;
1943 os->os_osb = osb; 1954 os->os_osb = osb;
1944 os->os_count = 0; 1955 os->os_count = 0;
1956 os->os_seqno = 0;
1945 os->os_scantime = CURRENT_TIME; 1957 os->os_scantime = CURRENT_TIME;
1946 mutex_init(&os->os_lock); 1958 mutex_init(&os->os_lock);
1947 1959 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
1948 INIT_DELAYED_WORK(&os->os_orphan_scan_work, 1960
1949 ocfs2_orphan_scan_work); 1961 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1950 schedule_delayed_work(&os->os_orphan_scan_work, 1962 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
1951 ocfs2_orphan_scan_timeout()); 1963 else {
1952 return 0; 1964 atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
1965 schedule_delayed_work(&os->os_orphan_scan_work,
1966 ocfs2_orphan_scan_timeout());
1967 }
1953} 1968}
1954 1969
1955struct ocfs2_orphan_filldir_priv { 1970struct ocfs2_orphan_filldir_priv {
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 61045eeb3f6e..5432c7f79cc6 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -144,7 +144,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
144} 144}
145 145
146/* Exported only for the journal struct init code in super.c. Do not call. */ 146/* Exported only for the journal struct init code in super.c. Do not call. */
147int ocfs2_orphan_scan_init(struct ocfs2_super *osb); 147void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
148void ocfs2_orphan_scan_stop(struct ocfs2_super *osb); 148void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
149void ocfs2_orphan_scan_exit(struct ocfs2_super *osb); 149void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
150 150
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 33464c6b60a2..8601f934010b 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -118,7 +118,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
118 mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len, 118 mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
119 dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); 119 dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
120 120
121 status = ocfs2_inode_lock(dir, NULL, 0); 121 status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT);
122 if (status < 0) { 122 if (status < 0) {
123 if (status != -ENOENT) 123 if (status != -ENOENT)
124 mlog_errno(status); 124 mlog_errno(status);
@@ -636,7 +636,7 @@ static int ocfs2_link(struct dentry *old_dentry,
636 if (S_ISDIR(inode->i_mode)) 636 if (S_ISDIR(inode->i_mode))
637 return -EPERM; 637 return -EPERM;
638 638
639 err = ocfs2_inode_lock(dir, &parent_fe_bh, 1); 639 err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
640 if (err < 0) { 640 if (err < 0) {
641 if (err != -ENOENT) 641 if (err != -ENOENT)
642 mlog_errno(err); 642 mlog_errno(err);
@@ -800,7 +800,8 @@ static int ocfs2_unlink(struct inode *dir,
800 return -EPERM; 800 return -EPERM;
801 } 801 }
802 802
803 status = ocfs2_inode_lock(dir, &parent_node_bh, 1); 803 status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1,
804 OI_LS_PARENT);
804 if (status < 0) { 805 if (status < 0) {
805 if (status != -ENOENT) 806 if (status != -ENOENT)
806 mlog_errno(status); 807 mlog_errno(status);
@@ -978,7 +979,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
978 inode1 = tmpinode; 979 inode1 = tmpinode;
979 } 980 }
980 /* lock id2 */ 981 /* lock id2 */
981 status = ocfs2_inode_lock(inode2, bh2, 1); 982 status = ocfs2_inode_lock_nested(inode2, bh2, 1,
983 OI_LS_RENAME1);
982 if (status < 0) { 984 if (status < 0) {
983 if (status != -ENOENT) 985 if (status != -ENOENT)
984 mlog_errno(status); 986 mlog_errno(status);
@@ -987,7 +989,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
987 } 989 }
988 990
989 /* lock id1 */ 991 /* lock id1 */
990 status = ocfs2_inode_lock(inode1, bh1, 1); 992 status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
991 if (status < 0) { 993 if (status < 0) {
992 /* 994 /*
993 * An error return must mean that no cluster locks 995 * An error return must mean that no cluster locks
@@ -1103,7 +1105,8 @@ static int ocfs2_rename(struct inode *old_dir,
1103 * won't have to concurrently downconvert the inode and the 1105 * won't have to concurrently downconvert the inode and the
1104 * dentry locks. 1106 * dentry locks.
1105 */ 1107 */
1106 status = ocfs2_inode_lock(old_inode, &old_inode_bh, 1); 1108 status = ocfs2_inode_lock_nested(old_inode, &old_inode_bh, 1,
1109 OI_LS_PARENT);
1107 if (status < 0) { 1110 if (status < 0) {
1108 if (status != -ENOENT) 1111 if (status != -ENOENT)
1109 mlog_errno(status); 1112 mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 18c1d9ec1c93..c9345ebb8493 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -34,6 +34,7 @@
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/lockdep.h>
37#ifndef CONFIG_OCFS2_COMPAT_JBD 38#ifndef CONFIG_OCFS2_COMPAT_JBD
38# include <linux/jbd2.h> 39# include <linux/jbd2.h>
39#else 40#else
@@ -152,6 +153,14 @@ struct ocfs2_lock_res {
152 unsigned int l_lock_max_exmode; /* Max wait for EX */ 153 unsigned int l_lock_max_exmode; /* Max wait for EX */
153 unsigned int l_lock_refresh; /* Disk refreshes */ 154 unsigned int l_lock_refresh; /* Disk refreshes */
154#endif 155#endif
156#ifdef CONFIG_DEBUG_LOCK_ALLOC
157 struct lockdep_map l_lockdep_map;
158#endif
159};
160
161enum ocfs2_orphan_scan_state {
162 ORPHAN_SCAN_ACTIVE,
163 ORPHAN_SCAN_INACTIVE
155}; 164};
156 165
157struct ocfs2_orphan_scan { 166struct ocfs2_orphan_scan {
@@ -162,6 +171,7 @@ struct ocfs2_orphan_scan {
162 struct timespec os_scantime; /* time this node ran the scan */ 171 struct timespec os_scantime; /* time this node ran the scan */
163 u32 os_count; /* tracks node specific scans */ 172 u32 os_count; /* tracks node specific scans */
164 u32 os_seqno; /* tracks cluster wide scans */ 173 u32 os_seqno; /* tracks cluster wide scans */
174 atomic_t os_state; /* ACTIVE or INACTIVE */
165}; 175};
166 176
167struct ocfs2_dlm_debug { 177struct ocfs2_dlm_debug {
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index fcd120f1493a..3f661376a2de 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -236,6 +236,16 @@ static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
236 return dlm_status_to_errno(lksb->lksb_o2dlm.status); 236 return dlm_status_to_errno(lksb->lksb_o2dlm.status);
237} 237}
238 238
239/*
240 * o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB
241 * contents, it will zero out the LVB. Thus the caller can always trust
242 * the contents.
243 */
244static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
245{
246 return 1;
247}
248
239static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb) 249static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb)
240{ 250{
241 return (void *)(lksb->lksb_o2dlm.lvb); 251 return (void *)(lksb->lksb_o2dlm.lvb);
@@ -354,6 +364,7 @@ static struct ocfs2_stack_operations o2cb_stack_ops = {
354 .dlm_lock = o2cb_dlm_lock, 364 .dlm_lock = o2cb_dlm_lock,
355 .dlm_unlock = o2cb_dlm_unlock, 365 .dlm_unlock = o2cb_dlm_unlock,
356 .lock_status = o2cb_dlm_lock_status, 366 .lock_status = o2cb_dlm_lock_status,
367 .lvb_valid = o2cb_dlm_lvb_valid,
357 .lock_lvb = o2cb_dlm_lvb, 368 .lock_lvb = o2cb_dlm_lvb,
358 .dump_lksb = o2cb_dump_lksb, 369 .dump_lksb = o2cb_dump_lksb,
359}; 370};
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 9b76d41a8ac6..ff4c798a5635 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -738,6 +738,13 @@ static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
738 return lksb->lksb_fsdlm.sb_status; 738 return lksb->lksb_fsdlm.sb_status;
739} 739}
740 740
741static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
742{
743 int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
744
745 return !invalid;
746}
747
741static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) 748static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
742{ 749{
743 if (!lksb->lksb_fsdlm.sb_lvbptr) 750 if (!lksb->lksb_fsdlm.sb_lvbptr)
@@ -873,6 +880,7 @@ static struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
873 .dlm_lock = user_dlm_lock, 880 .dlm_lock = user_dlm_lock,
874 .dlm_unlock = user_dlm_unlock, 881 .dlm_unlock = user_dlm_unlock,
875 .lock_status = user_dlm_lock_status, 882 .lock_status = user_dlm_lock_status,
883 .lvb_valid = user_dlm_lvb_valid,
876 .lock_lvb = user_dlm_lvb, 884 .lock_lvb = user_dlm_lvb,
877 .plock = user_plock, 885 .plock = user_plock,
878 .dump_lksb = user_dlm_dump_lksb, 886 .dump_lksb = user_dlm_dump_lksb,
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 68b668b0e60a..3f2f1c45b7b6 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -6,7 +6,7 @@
6 * Code which implements an OCFS2 specific interface to underlying 6 * Code which implements an OCFS2 specific interface to underlying
7 * cluster stacks. 7 * cluster stacks.
8 * 8 *
9 * Copyright (C) 2007 Oracle. All rights reserved. 9 * Copyright (C) 2007, 2009 Oracle. All rights reserved.
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public 12 * modify it under the terms of the GNU General Public
@@ -271,11 +271,12 @@ int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
271} 271}
272EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status); 272EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
273 273
274/* 274int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
275 * Why don't we cast to ocfs2_meta_lvb? The "clean" answer is that we 275{
276 * don't cast at the glue level. The real answer is that the header 276 return active_stack->sp_ops->lvb_valid(lksb);
277 * ordering is nigh impossible. 277}
278 */ 278EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
279
279void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb) 280void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb)
280{ 281{
281 return active_stack->sp_ops->lock_lvb(lksb); 282 return active_stack->sp_ops->lock_lvb(lksb);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index c571af375ef8..03a44d60eac9 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -186,6 +186,11 @@ struct ocfs2_stack_operations {
186 int (*lock_status)(union ocfs2_dlm_lksb *lksb); 186 int (*lock_status)(union ocfs2_dlm_lksb *lksb);
187 187
188 /* 188 /*
189 * Return non-zero if the LVB is valid.
190 */
191 int (*lvb_valid)(union ocfs2_dlm_lksb *lksb);
192
193 /*
189 * Pull the lvb pointer off of the stack-specific lksb. 194 * Pull the lvb pointer off of the stack-specific lksb.
190 */ 195 */
191 void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb); 196 void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb);
@@ -252,6 +257,7 @@ int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
252 struct ocfs2_lock_res *astarg); 257 struct ocfs2_lock_res *astarg);
253 258
254int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb); 259int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb);
260int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb);
255void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb); 261void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb);
256void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb); 262void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb);
257 263
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 8439f6b324b9..73a16d4666dc 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -923,14 +923,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
923 int nr) 923 int nr)
924{ 924{
925 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; 925 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
926 int ret;
926 927
927 if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap)) 928 if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
928 return 0; 929 return 0;
929 if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data) 930
931 if (!buffer_jbd(bg_bh))
930 return 1; 932 return 1;
931 933
934 jbd_lock_bh_state(bg_bh);
932 bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data; 935 bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
933 return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); 936 if (bg)
937 ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
938 else
939 ret = 1;
940 jbd_unlock_bh_state(bg_bh);
941
942 return ret;
934} 943}
935 944
936static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, 945static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
@@ -1885,6 +1894,7 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle,
1885 unsigned int tmp; 1894 unsigned int tmp;
1886 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; 1895 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
1887 struct ocfs2_group_desc *undo_bg = NULL; 1896 struct ocfs2_group_desc *undo_bg = NULL;
1897 int cluster_bitmap = 0;
1888 1898
1889 mlog_entry_void(); 1899 mlog_entry_void();
1890 1900
@@ -1905,18 +1915,28 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle,
1905 } 1915 }
1906 1916
1907 if (ocfs2_is_cluster_bitmap(alloc_inode)) 1917 if (ocfs2_is_cluster_bitmap(alloc_inode))
1908 undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data; 1918 cluster_bitmap = 1;
1919
1920 if (cluster_bitmap) {
1921 jbd_lock_bh_state(group_bh);
1922 undo_bg = (struct ocfs2_group_desc *)
1923 bh2jh(group_bh)->b_committed_data;
1924 BUG_ON(!undo_bg);
1925 }
1909 1926
1910 tmp = num_bits; 1927 tmp = num_bits;
1911 while(tmp--) { 1928 while(tmp--) {
1912 ocfs2_clear_bit((bit_off + tmp), 1929 ocfs2_clear_bit((bit_off + tmp),
1913 (unsigned long *) bg->bg_bitmap); 1930 (unsigned long *) bg->bg_bitmap);
1914 if (ocfs2_is_cluster_bitmap(alloc_inode)) 1931 if (cluster_bitmap)
1915 ocfs2_set_bit(bit_off + tmp, 1932 ocfs2_set_bit(bit_off + tmp,
1916 (unsigned long *) undo_bg->bg_bitmap); 1933 (unsigned long *) undo_bg->bg_bitmap);
1917 } 1934 }
1918 le16_add_cpu(&bg->bg_free_bits_count, num_bits); 1935 le16_add_cpu(&bg->bg_free_bits_count, num_bits);
1919 1936
1937 if (cluster_bitmap)
1938 jbd_unlock_bh_state(group_bh);
1939
1920 status = ocfs2_journal_dirty(handle, group_bh); 1940 status = ocfs2_journal_dirty(handle, group_bh);
1921 if (status < 0) 1941 if (status < 0)
1922 mlog_errno(status); 1942 mlog_errno(status);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 0d3ed7407a04..7efb349fb9bd 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -205,11 +205,10 @@ static const match_table_t tokens = {
205#ifdef CONFIG_DEBUG_FS 205#ifdef CONFIG_DEBUG_FS
206static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) 206static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
207{ 207{
208 int out = 0;
209 int i;
210 struct ocfs2_cluster_connection *cconn = osb->cconn; 208 struct ocfs2_cluster_connection *cconn = osb->cconn;
211 struct ocfs2_recovery_map *rm = osb->recovery_map; 209 struct ocfs2_recovery_map *rm = osb->recovery_map;
212 struct ocfs2_orphan_scan *os; 210 struct ocfs2_orphan_scan *os = &osb->osb_orphan_scan;
211 int i, out = 0;
213 212
214 out += snprintf(buf + out, len - out, 213 out += snprintf(buf + out, len - out,
215 "%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n", 214 "%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n",
@@ -234,20 +233,24 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
234 "%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount", 233 "%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount",
235 osb->s_mount_opt, osb->s_atime_quantum); 234 osb->s_mount_opt, osb->s_atime_quantum);
236 235
237 out += snprintf(buf + out, len - out, 236 if (cconn) {
238 "%10s => Stack: %s Name: %*s Version: %d.%d\n", 237 out += snprintf(buf + out, len - out,
239 "Cluster", 238 "%10s => Stack: %s Name: %*s "
240 (*osb->osb_cluster_stack == '\0' ? 239 "Version: %d.%d\n", "Cluster",
241 "o2cb" : osb->osb_cluster_stack), 240 (*osb->osb_cluster_stack == '\0' ?
242 cconn->cc_namelen, cconn->cc_name, 241 "o2cb" : osb->osb_cluster_stack),
243 cconn->cc_version.pv_major, cconn->cc_version.pv_minor); 242 cconn->cc_namelen, cconn->cc_name,
243 cconn->cc_version.pv_major,
244 cconn->cc_version.pv_minor);
245 }
244 246
245 spin_lock(&osb->dc_task_lock); 247 spin_lock(&osb->dc_task_lock);
246 out += snprintf(buf + out, len - out, 248 out += snprintf(buf + out, len - out,
247 "%10s => Pid: %d Count: %lu WakeSeq: %lu " 249 "%10s => Pid: %d Count: %lu WakeSeq: %lu "
248 "WorkSeq: %lu\n", "DownCnvt", 250 "WorkSeq: %lu\n", "DownCnvt",
249 task_pid_nr(osb->dc_task), osb->blocked_lock_count, 251 (osb->dc_task ? task_pid_nr(osb->dc_task) : -1),
250 osb->dc_wake_sequence, osb->dc_work_sequence); 252 osb->blocked_lock_count, osb->dc_wake_sequence,
253 osb->dc_work_sequence);
251 spin_unlock(&osb->dc_task_lock); 254 spin_unlock(&osb->dc_task_lock);
252 255
253 spin_lock(&osb->osb_lock); 256 spin_lock(&osb->osb_lock);
@@ -267,14 +270,15 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
267 270
268 out += snprintf(buf + out, len - out, 271 out += snprintf(buf + out, len - out,
269 "%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit", 272 "%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit",
270 task_pid_nr(osb->commit_task), osb->osb_commit_interval, 273 (osb->commit_task ? task_pid_nr(osb->commit_task) : -1),
274 osb->osb_commit_interval,
271 atomic_read(&osb->needs_checkpoint)); 275 atomic_read(&osb->needs_checkpoint));
272 276
273 out += snprintf(buf + out, len - out, 277 out += snprintf(buf + out, len - out,
274 "%10s => State: %d NumTxns: %d TxnId: %lu\n", 278 "%10s => State: %d TxnId: %lu NumTxns: %d\n",
275 "Journal", osb->journal->j_state, 279 "Journal", osb->journal->j_state,
276 atomic_read(&osb->journal->j_num_trans), 280 osb->journal->j_trans_id,
277 osb->journal->j_trans_id); 281 atomic_read(&osb->journal->j_num_trans));
278 282
279 out += snprintf(buf + out, len - out, 283 out += snprintf(buf + out, len - out,
280 "%10s => GlobalAllocs: %d LocalAllocs: %d " 284 "%10s => GlobalAllocs: %d LocalAllocs: %d "
@@ -300,9 +304,18 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
300 atomic_read(&osb->s_num_inodes_stolen)); 304 atomic_read(&osb->s_num_inodes_stolen));
301 spin_unlock(&osb->osb_lock); 305 spin_unlock(&osb->osb_lock);
302 306
307 out += snprintf(buf + out, len - out, "OrphanScan => ");
308 out += snprintf(buf + out, len - out, "Local: %u Global: %u ",
309 os->os_count, os->os_seqno);
310 out += snprintf(buf + out, len - out, " Last Scan: ");
311 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
312 out += snprintf(buf + out, len - out, "Disabled\n");
313 else
314 out += snprintf(buf + out, len - out, "%lu seconds ago\n",
315 (get_seconds() - os->os_scantime.tv_sec));
316
303 out += snprintf(buf + out, len - out, "%10s => %3s %10s\n", 317 out += snprintf(buf + out, len - out, "%10s => %3s %10s\n",
304 "Slots", "Num", "RecoGen"); 318 "Slots", "Num", "RecoGen");
305
306 for (i = 0; i < osb->max_slots; ++i) { 319 for (i = 0; i < osb->max_slots; ++i) {
307 out += snprintf(buf + out, len - out, 320 out += snprintf(buf + out, len - out,
308 "%10s %c %3d %10d\n", 321 "%10s %c %3d %10d\n",
@@ -311,13 +324,6 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
311 i, osb->slot_recovery_generations[i]); 324 i, osb->slot_recovery_generations[i]);
312 } 325 }
313 326
314 os = &osb->osb_orphan_scan;
315 out += snprintf(buf + out, len - out, "Orphan Scan=> ");
316 out += snprintf(buf + out, len - out, "Local: %u Global: %u ",
317 os->os_count, os->os_seqno);
318 out += snprintf(buf + out, len - out, " Last Scan: %lu seconds ago\n",
319 (get_seconds() - os->os_scantime.tv_sec));
320
321 return out; 327 return out;
322} 328}
323 329
@@ -1175,6 +1181,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1175 atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS); 1181 atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS);
1176 wake_up(&osb->osb_mount_event); 1182 wake_up(&osb->osb_mount_event);
1177 1183
1184 /* Start this when the mount is almost sure of being successful */
1185 ocfs2_orphan_scan_init(osb);
1186
1178 mlog_exit(status); 1187 mlog_exit(status);
1179 return status; 1188 return status;
1180 1189
@@ -1810,14 +1819,15 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1810 1819
1811 debugfs_remove(osb->osb_ctxt); 1820 debugfs_remove(osb->osb_ctxt);
1812 1821
1822 /* Orphan scan should be stopped as early as possible */
1823 ocfs2_orphan_scan_stop(osb);
1824
1813 ocfs2_disable_quotas(osb); 1825 ocfs2_disable_quotas(osb);
1814 1826
1815 ocfs2_shutdown_local_alloc(osb); 1827 ocfs2_shutdown_local_alloc(osb);
1816 1828
1817 ocfs2_truncate_log_shutdown(osb); 1829 ocfs2_truncate_log_shutdown(osb);
1818 1830
1819 ocfs2_orphan_scan_stop(osb);
1820
1821 /* This will disable recovery and flush any recovery work. */ 1831 /* This will disable recovery and flush any recovery work. */
1822 ocfs2_recovery_exit(osb); 1832 ocfs2_recovery_exit(osb);
1823 1833
@@ -1978,13 +1988,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
1978 goto bail; 1988 goto bail;
1979 } 1989 }
1980 1990
1981 status = ocfs2_orphan_scan_init(osb);
1982 if (status) {
1983 mlog(ML_ERROR, "Unable to initialize delayed orphan scan\n");
1984 mlog_errno(status);
1985 goto bail;
1986 }
1987
1988 init_waitqueue_head(&osb->checkpoint_event); 1991 init_waitqueue_head(&osb->checkpoint_event);
1989 atomic_set(&osb->needs_checkpoint, 0); 1992 atomic_set(&osb->needs_checkpoint, 0);
1990 1993
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index ab713ebdd546..40e53702948c 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -50,6 +50,10 @@ static inline int is_in_system_inode_array(struct ocfs2_super *osb,
50 int type, 50 int type,
51 u32 slot); 51 u32 slot);
52 52
53#ifdef CONFIG_DEBUG_LOCK_ALLOC
54static struct lock_class_key ocfs2_sysfile_cluster_lock_key[NUM_SYSTEM_INODES];
55#endif
56
53static inline int is_global_system_inode(int type) 57static inline int is_global_system_inode(int type)
54{ 58{
55 return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE && 59 return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE &&
@@ -118,6 +122,21 @@ static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
118 inode = NULL; 122 inode = NULL;
119 goto bail; 123 goto bail;
120 } 124 }
125#ifdef CONFIG_DEBUG_LOCK_ALLOC
126 if (type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
127 type == LOCAL_GROUP_QUOTA_SYSTEM_INODE ||
128 type == JOURNAL_SYSTEM_INODE) {
129 /* Ignore inode lock on these inodes as the lock does not
130 * really belong to any process and lockdep cannot handle
131 * that */
132 OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL;
133 } else {
134 lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres.
135 l_lockdep_map,
136 ocfs2_system_inodes[type].si_name,
137 &ocfs2_sysfile_cluster_lock_key[type], 0);
138 }
139#endif
121bail: 140bail:
122 141
123 return inode; 142 return inode;
diff --git a/fs/open.c b/fs/open.c
index 7200e23d9258..dd98e8076024 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -378,63 +378,63 @@ SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64);
378#endif 378#endif
379#endif /* BITS_PER_LONG == 32 */ 379#endif /* BITS_PER_LONG == 32 */
380 380
381SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) 381
382int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
382{ 383{
383 struct file *file; 384 struct inode *inode = file->f_path.dentry->d_inode;
384 struct inode *inode; 385 long ret;
385 long ret = -EINVAL;
386 386
387 if (offset < 0 || len <= 0) 387 if (offset < 0 || len <= 0)
388 goto out; 388 return -EINVAL;
389 389
390 /* Return error if mode is not supported */ 390 /* Return error if mode is not supported */
391 ret = -EOPNOTSUPP;
392 if (mode && !(mode & FALLOC_FL_KEEP_SIZE)) 391 if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
393 goto out; 392 return -EOPNOTSUPP;
394 393
395 ret = -EBADF;
396 file = fget(fd);
397 if (!file)
398 goto out;
399 if (!(file->f_mode & FMODE_WRITE)) 394 if (!(file->f_mode & FMODE_WRITE))
400 goto out_fput; 395 return -EBADF;
401 /* 396 /*
402 * Revalidate the write permissions, in case security policy has 397 * Revalidate the write permissions, in case security policy has
403 * changed since the files were opened. 398 * changed since the files were opened.
404 */ 399 */
405 ret = security_file_permission(file, MAY_WRITE); 400 ret = security_file_permission(file, MAY_WRITE);
406 if (ret) 401 if (ret)
407 goto out_fput; 402 return ret;
408 403
409 inode = file->f_path.dentry->d_inode;
410
411 ret = -ESPIPE;
412 if (S_ISFIFO(inode->i_mode)) 404 if (S_ISFIFO(inode->i_mode))
413 goto out_fput; 405 return -ESPIPE;
414 406
415 ret = -ENODEV;
416 /* 407 /*
417 * Let individual file system decide if it supports preallocation 408 * Let individual file system decide if it supports preallocation
418 * for directories or not. 409 * for directories or not.
419 */ 410 */
420 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) 411 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
421 goto out_fput; 412 return -ENODEV;
422 413
423 ret = -EFBIG;
424 /* Check for wrap through zero too */ 414 /* Check for wrap through zero too */
425 if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) 415 if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
426 goto out_fput; 416 return -EFBIG;
427 417
428 if (inode->i_op->fallocate) 418 if (!inode->i_op->fallocate)
429 ret = inode->i_op->fallocate(inode, mode, offset, len); 419 return -EOPNOTSUPP;
430 else
431 ret = -EOPNOTSUPP;
432 420
433out_fput: 421 return inode->i_op->fallocate(inode, mode, offset, len);
434 fput(file);
435out:
436 return ret;
437} 422}
423
424SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
425{
426 struct file *file;
427 int error = -EBADF;
428
429 file = fget(fd);
430 if (file) {
431 error = do_fallocate(file, mode, offset, len);
432 fput(file);
433 }
434
435 return error;
436}
437
438#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS 438#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
439asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len) 439asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len)
440{ 440{
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 6fd0f47e45db..a14d6cd9eeda 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1131,8 +1131,6 @@ static void init_inode(struct inode *inode, struct treepath *path)
1131 REISERFS_I(inode)->i_trans_id = 0; 1131 REISERFS_I(inode)->i_trans_id = 0;
1132 REISERFS_I(inode)->i_jl = NULL; 1132 REISERFS_I(inode)->i_jl = NULL;
1133 mutex_init(&(REISERFS_I(inode)->i_mmap)); 1133 mutex_init(&(REISERFS_I(inode)->i_mmap));
1134 reiserfs_init_acl_access(inode);
1135 reiserfs_init_acl_default(inode);
1136 reiserfs_init_xattr_rwsem(inode); 1134 reiserfs_init_xattr_rwsem(inode);
1137 1135
1138 if (stat_data_v1(ih)) { 1136 if (stat_data_v1(ih)) {
@@ -1834,8 +1832,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1834 REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK; 1832 REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
1835 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode); 1833 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
1836 mutex_init(&(REISERFS_I(inode)->i_mmap)); 1834 mutex_init(&(REISERFS_I(inode)->i_mmap));
1837 reiserfs_init_acl_access(inode);
1838 reiserfs_init_acl_default(inode);
1839 reiserfs_init_xattr_rwsem(inode); 1835 reiserfs_init_xattr_rwsem(inode);
1840 1836
1841 /* key to search for correct place for new stat data */ 1837 /* key to search for correct place for new stat data */
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 238e9d9b31e0..18b315d3d104 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -82,7 +82,6 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
82 if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) { 82 if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
83 printk 83 printk
84 ("reiserfs_resize: unable to allocate memory for journal bitmaps\n"); 84 ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
85 unlock_super(s);
86 return -ENOMEM; 85 return -ENOMEM;
87 } 86 }
88 /* the new journal bitmaps are zero filled, now we copy in the bitmap 87 /* the new journal bitmaps are zero filled, now we copy in the bitmap
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 2969773cfc22..d3aeb061612b 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -529,10 +529,6 @@ static void init_once(void *foo)
529 529
530 INIT_LIST_HEAD(&ei->i_prealloc_list); 530 INIT_LIST_HEAD(&ei->i_prealloc_list);
531 inode_init_once(&ei->vfs_inode); 531 inode_init_once(&ei->vfs_inode);
532#ifdef CONFIG_REISERFS_FS_POSIX_ACL
533 ei->i_acl_access = NULL;
534 ei->i_acl_default = NULL;
535#endif
536} 532}
537 533
538static int init_inodecache(void) 534static int init_inodecache(void)
@@ -580,25 +576,6 @@ static void reiserfs_dirty_inode(struct inode *inode)
580 reiserfs_write_unlock(inode->i_sb); 576 reiserfs_write_unlock(inode->i_sb);
581} 577}
582 578
583#ifdef CONFIG_REISERFS_FS_POSIX_ACL
584static void reiserfs_clear_inode(struct inode *inode)
585{
586 struct posix_acl *acl;
587
588 acl = REISERFS_I(inode)->i_acl_access;
589 if (acl && !IS_ERR(acl))
590 posix_acl_release(acl);
591 REISERFS_I(inode)->i_acl_access = NULL;
592
593 acl = REISERFS_I(inode)->i_acl_default;
594 if (acl && !IS_ERR(acl))
595 posix_acl_release(acl);
596 REISERFS_I(inode)->i_acl_default = NULL;
597}
598#else
599#define reiserfs_clear_inode NULL
600#endif
601
602#ifdef CONFIG_QUOTA 579#ifdef CONFIG_QUOTA
603static ssize_t reiserfs_quota_write(struct super_block *, int, const char *, 580static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
604 size_t, loff_t); 581 size_t, loff_t);
@@ -612,7 +589,6 @@ static const struct super_operations reiserfs_sops = {
612 .write_inode = reiserfs_write_inode, 589 .write_inode = reiserfs_write_inode,
613 .dirty_inode = reiserfs_dirty_inode, 590 .dirty_inode = reiserfs_dirty_inode,
614 .delete_inode = reiserfs_delete_inode, 591 .delete_inode = reiserfs_delete_inode,
615 .clear_inode = reiserfs_clear_inode,
616 .put_super = reiserfs_put_super, 592 .put_super = reiserfs_put_super,
617 .write_super = reiserfs_write_super, 593 .write_super = reiserfs_write_super,
618 .sync_fs = reiserfs_sync_fs, 594 .sync_fs = reiserfs_sync_fs,
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index c303c426fe2b..35d6e672a279 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -188,29 +188,6 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
188 return ERR_PTR(-EINVAL); 188 return ERR_PTR(-EINVAL);
189} 189}
190 190
191static inline void iset_acl(struct inode *inode, struct posix_acl **i_acl,
192 struct posix_acl *acl)
193{
194 spin_lock(&inode->i_lock);
195 if (*i_acl != ERR_PTR(-ENODATA))
196 posix_acl_release(*i_acl);
197 *i_acl = posix_acl_dup(acl);
198 spin_unlock(&inode->i_lock);
199}
200
201static inline struct posix_acl *iget_acl(struct inode *inode,
202 struct posix_acl **i_acl)
203{
204 struct posix_acl *acl = ERR_PTR(-ENODATA);
205
206 spin_lock(&inode->i_lock);
207 if (*i_acl != ERR_PTR(-ENODATA))
208 acl = posix_acl_dup(*i_acl);
209 spin_unlock(&inode->i_lock);
210
211 return acl;
212}
213
214/* 191/*
215 * Inode operation get_posix_acl(). 192 * Inode operation get_posix_acl().
216 * 193 *
@@ -220,34 +197,29 @@ static inline struct posix_acl *iget_acl(struct inode *inode,
220struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) 197struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
221{ 198{
222 char *name, *value; 199 char *name, *value;
223 struct posix_acl *acl, **p_acl; 200 struct posix_acl *acl;
224 int size; 201 int size;
225 int retval; 202 int retval;
226 struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode); 203
204 acl = get_cached_acl(inode, type);
205 if (acl != ACL_NOT_CACHED)
206 return acl;
227 207
228 switch (type) { 208 switch (type) {
229 case ACL_TYPE_ACCESS: 209 case ACL_TYPE_ACCESS:
230 name = POSIX_ACL_XATTR_ACCESS; 210 name = POSIX_ACL_XATTR_ACCESS;
231 p_acl = &reiserfs_i->i_acl_access;
232 break; 211 break;
233 case ACL_TYPE_DEFAULT: 212 case ACL_TYPE_DEFAULT:
234 name = POSIX_ACL_XATTR_DEFAULT; 213 name = POSIX_ACL_XATTR_DEFAULT;
235 p_acl = &reiserfs_i->i_acl_default;
236 break; 214 break;
237 default: 215 default:
238 return ERR_PTR(-EINVAL); 216 BUG();
239 } 217 }
240 218
241 acl = iget_acl(inode, p_acl);
242 if (acl && !IS_ERR(acl))
243 return acl;
244 else if (PTR_ERR(acl) == -ENODATA)
245 return NULL;
246
247 size = reiserfs_xattr_get(inode, name, NULL, 0); 219 size = reiserfs_xattr_get(inode, name, NULL, 0);
248 if (size < 0) { 220 if (size < 0) {
249 if (size == -ENODATA || size == -ENOSYS) { 221 if (size == -ENODATA || size == -ENOSYS) {
250 *p_acl = ERR_PTR(-ENODATA); 222 set_cached_acl(inode, type, NULL);
251 return NULL; 223 return NULL;
252 } 224 }
253 return ERR_PTR(size); 225 return ERR_PTR(size);
@@ -262,14 +234,13 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
262 /* This shouldn't actually happen as it should have 234 /* This shouldn't actually happen as it should have
263 been caught above.. but just in case */ 235 been caught above.. but just in case */
264 acl = NULL; 236 acl = NULL;
265 *p_acl = ERR_PTR(-ENODATA);
266 } else if (retval < 0) { 237 } else if (retval < 0) {
267 acl = ERR_PTR(retval); 238 acl = ERR_PTR(retval);
268 } else { 239 } else {
269 acl = posix_acl_from_disk(value, retval); 240 acl = posix_acl_from_disk(value, retval);
270 if (!IS_ERR(acl))
271 iset_acl(inode, p_acl, acl);
272 } 241 }
242 if (!IS_ERR(acl))
243 set_cached_acl(inode, type, acl);
273 244
274 kfree(value); 245 kfree(value);
275 return acl; 246 return acl;
@@ -287,10 +258,8 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
287{ 258{
288 char *name; 259 char *name;
289 void *value = NULL; 260 void *value = NULL;
290 struct posix_acl **p_acl;
291 size_t size = 0; 261 size_t size = 0;
292 int error; 262 int error;
293 struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
294 263
295 if (S_ISLNK(inode->i_mode)) 264 if (S_ISLNK(inode->i_mode))
296 return -EOPNOTSUPP; 265 return -EOPNOTSUPP;
@@ -298,7 +267,6 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
298 switch (type) { 267 switch (type) {
299 case ACL_TYPE_ACCESS: 268 case ACL_TYPE_ACCESS:
300 name = POSIX_ACL_XATTR_ACCESS; 269 name = POSIX_ACL_XATTR_ACCESS;
301 p_acl = &reiserfs_i->i_acl_access;
302 if (acl) { 270 if (acl) {
303 mode_t mode = inode->i_mode; 271 mode_t mode = inode->i_mode;
304 error = posix_acl_equiv_mode(acl, &mode); 272 error = posix_acl_equiv_mode(acl, &mode);
@@ -313,7 +281,6 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
313 break; 281 break;
314 case ACL_TYPE_DEFAULT: 282 case ACL_TYPE_DEFAULT:
315 name = POSIX_ACL_XATTR_DEFAULT; 283 name = POSIX_ACL_XATTR_DEFAULT;
316 p_acl = &reiserfs_i->i_acl_default;
317 if (!S_ISDIR(inode->i_mode)) 284 if (!S_ISDIR(inode->i_mode))
318 return acl ? -EACCES : 0; 285 return acl ? -EACCES : 0;
319 break; 286 break;
@@ -346,7 +313,7 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
346 kfree(value); 313 kfree(value);
347 314
348 if (!error) 315 if (!error)
349 iset_acl(inode, p_acl, acl); 316 set_cached_acl(inode, type, acl);
350 317
351 return error; 318 return error;
352} 319}
@@ -379,11 +346,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
379 } 346 }
380 347
381 acl = reiserfs_get_acl(dir, ACL_TYPE_DEFAULT); 348 acl = reiserfs_get_acl(dir, ACL_TYPE_DEFAULT);
382 if (IS_ERR(acl)) { 349 if (IS_ERR(acl))
383 if (PTR_ERR(acl) == -ENODATA)
384 goto apply_umask;
385 return PTR_ERR(acl); 350 return PTR_ERR(acl);
386 }
387 351
388 if (acl) { 352 if (acl) {
389 struct posix_acl *acl_copy; 353 struct posix_acl *acl_copy;
diff --git a/fs/super.c b/fs/super.c
index d40d53a22fb5..2761d3e22ed9 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -608,6 +608,7 @@ void emergency_remount(void)
608 608
609static DEFINE_IDA(unnamed_dev_ida); 609static DEFINE_IDA(unnamed_dev_ida);
610static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ 610static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
611static int unnamed_dev_start = 0; /* don't bother trying below it */
611 612
612int set_anon_super(struct super_block *s, void *data) 613int set_anon_super(struct super_block *s, void *data)
613{ 614{
@@ -618,7 +619,9 @@ int set_anon_super(struct super_block *s, void *data)
618 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) 619 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
619 return -ENOMEM; 620 return -ENOMEM;
620 spin_lock(&unnamed_dev_lock); 621 spin_lock(&unnamed_dev_lock);
621 error = ida_get_new(&unnamed_dev_ida, &dev); 622 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
623 if (!error)
624 unnamed_dev_start = dev + 1;
622 spin_unlock(&unnamed_dev_lock); 625 spin_unlock(&unnamed_dev_lock);
623 if (error == -EAGAIN) 626 if (error == -EAGAIN)
624 /* We raced and lost with another CPU. */ 627 /* We raced and lost with another CPU. */
@@ -629,6 +632,8 @@ int set_anon_super(struct super_block *s, void *data)
629 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { 632 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
630 spin_lock(&unnamed_dev_lock); 633 spin_lock(&unnamed_dev_lock);
631 ida_remove(&unnamed_dev_ida, dev); 634 ida_remove(&unnamed_dev_ida, dev);
635 if (unnamed_dev_start > dev)
636 unnamed_dev_start = dev;
632 spin_unlock(&unnamed_dev_lock); 637 spin_unlock(&unnamed_dev_lock);
633 return -EMFILE; 638 return -EMFILE;
634 } 639 }
@@ -645,6 +650,8 @@ void kill_anon_super(struct super_block *sb)
645 generic_shutdown_super(sb); 650 generic_shutdown_super(sb);
646 spin_lock(&unnamed_dev_lock); 651 spin_lock(&unnamed_dev_lock);
647 ida_remove(&unnamed_dev_ida, slot); 652 ida_remove(&unnamed_dev_ida, slot);
653 if (slot < unnamed_dev_start)
654 unnamed_dev_start = slot;
648 spin_unlock(&unnamed_dev_lock); 655 spin_unlock(&unnamed_dev_lock);
649} 656}
650 657
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index cfd31e229c89..adafcf556531 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -55,9 +55,9 @@
55 * ACL support is not implemented. 55 * ACL support is not implemented.
56 */ 56 */
57 57
58#include "ubifs.h"
58#include <linux/xattr.h> 59#include <linux/xattr.h>
59#include <linux/posix_acl_xattr.h> 60#include <linux/posix_acl_xattr.h>
60#include "ubifs.h"
61 61
62/* 62/*
63 * Limit the number of extended attributes per inode so that the total size 63 * Limit the number of extended attributes per inode so that the total size
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index e48e9a3af763..1e068535b58b 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -238,7 +238,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
238 238
239 mutex_lock(&sbi->s_alloc_mutex); 239 mutex_lock(&sbi->s_alloc_mutex);
240 part_len = sbi->s_partmaps[partition].s_partition_len; 240 part_len = sbi->s_partmaps[partition].s_partition_len;
241 if (first_block < 0 || first_block >= part_len) 241 if (first_block >= part_len)
242 goto out; 242 goto out;
243 243
244 if (first_block + block_count > part_len) 244 if (first_block + block_count > part_len)
@@ -297,7 +297,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
297 mutex_lock(&sbi->s_alloc_mutex); 297 mutex_lock(&sbi->s_alloc_mutex);
298 298
299repeat: 299repeat:
300 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) 300 if (goal >= sbi->s_partmaps[partition].s_partition_len)
301 goal = 0; 301 goal = 0;
302 302
303 nr_groups = bitmap->s_nr_groups; 303 nr_groups = bitmap->s_nr_groups;
@@ -666,8 +666,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
666 int8_t etype = -1; 666 int8_t etype = -1;
667 struct udf_inode_info *iinfo; 667 struct udf_inode_info *iinfo;
668 668
669 if (first_block < 0 || 669 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
670 first_block >= sbi->s_partmaps[partition].s_partition_len)
671 return 0; 670 return 0;
672 671
673 iinfo = UDF_I(table); 672 iinfo = UDF_I(table);
@@ -743,7 +742,7 @@ static int udf_table_new_block(struct super_block *sb,
743 return newblock; 742 return newblock;
744 743
745 mutex_lock(&sbi->s_alloc_mutex); 744 mutex_lock(&sbi->s_alloc_mutex);
746 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) 745 if (goal >= sbi->s_partmaps[partition].s_partition_len)
747 goal = 0; 746 goal = 0;
748 747
749 /* We search for the closest matching block to goal. If we find 748 /* We search for the closest matching block to goal. If we find
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 703843f30ffd..1b88fd5df05d 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -56,7 +56,12 @@ unsigned long udf_get_last_block(struct super_block *sb)
56 struct block_device *bdev = sb->s_bdev; 56 struct block_device *bdev = sb->s_bdev;
57 unsigned long lblock = 0; 57 unsigned long lblock = 0;
58 58
59 if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock)) 59 /*
60 * ioctl failed or returned obviously bogus value?
61 * Try using the device size...
62 */
63 if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) ||
64 lblock == 0)
60 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits; 65 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
61 66
62 if (lblock) 67 if (lblock)
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 1e9d1246eebc..b23a54506446 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -25,14 +25,10 @@
25#include <linux/posix_acl_xattr.h> 25#include <linux/posix_acl_xattr.h>
26 26
27 27
28#define XFS_ACL_NOT_CACHED ((void *)-1)
29
30/* 28/*
31 * Locking scheme: 29 * Locking scheme:
32 * - all ACL updates are protected by inode->i_mutex, which is taken before 30 * - all ACL updates are protected by inode->i_mutex, which is taken before
33 * calling into this file. 31 * calling into this file.
34 * - access and updates to the ip->i_acl and ip->i_default_acl pointers are
35 * protected by inode->i_lock.
36 */ 32 */
37 33
38STATIC struct posix_acl * 34STATIC struct posix_acl *
@@ -102,59 +98,35 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
102 } 98 }
103} 99}
104 100
105/*
106 * Update the cached ACL pointer in the inode.
107 *
108 * Because we don't hold any locks while reading/writing the attribute
109 * from/to disk another thread could have raced and updated the cached
110 * ACL value before us. In that case we release the previous cached value
111 * and update it with our new value.
112 */
113STATIC void
114xfs_update_cached_acl(struct inode *inode, struct posix_acl **p_acl,
115 struct posix_acl *acl)
116{
117 spin_lock(&inode->i_lock);
118 if (*p_acl && *p_acl != XFS_ACL_NOT_CACHED)
119 posix_acl_release(*p_acl);
120 *p_acl = posix_acl_dup(acl);
121 spin_unlock(&inode->i_lock);
122}
123
124struct posix_acl * 101struct posix_acl *
125xfs_get_acl(struct inode *inode, int type) 102xfs_get_acl(struct inode *inode, int type)
126{ 103{
127 struct xfs_inode *ip = XFS_I(inode); 104 struct xfs_inode *ip = XFS_I(inode);
128 struct posix_acl *acl = NULL, **p_acl; 105 struct posix_acl *acl;
129 struct xfs_acl *xfs_acl; 106 struct xfs_acl *xfs_acl;
130 int len = sizeof(struct xfs_acl); 107 int len = sizeof(struct xfs_acl);
131 char *ea_name; 108 char *ea_name;
132 int error; 109 int error;
133 110
111 acl = get_cached_acl(inode, type);
112 if (acl != ACL_NOT_CACHED)
113 return acl;
114
134 switch (type) { 115 switch (type) {
135 case ACL_TYPE_ACCESS: 116 case ACL_TYPE_ACCESS:
136 ea_name = SGI_ACL_FILE; 117 ea_name = SGI_ACL_FILE;
137 p_acl = &ip->i_acl;
138 break; 118 break;
139 case ACL_TYPE_DEFAULT: 119 case ACL_TYPE_DEFAULT:
140 ea_name = SGI_ACL_DEFAULT; 120 ea_name = SGI_ACL_DEFAULT;
141 p_acl = &ip->i_default_acl;
142 break; 121 break;
143 default: 122 default:
144 return ERR_PTR(-EINVAL); 123 BUG();
145 } 124 }
146 125
147 spin_lock(&inode->i_lock);
148 if (*p_acl != XFS_ACL_NOT_CACHED)
149 acl = posix_acl_dup(*p_acl);
150 spin_unlock(&inode->i_lock);
151
152 /* 126 /*
153 * If we have a cached ACLs value just return it, not need to 127 * If we have a cached ACLs value just return it, not need to
154 * go out to the disk. 128 * go out to the disk.
155 */ 129 */
156 if (acl)
157 return acl;
158 130
159 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); 131 xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
160 if (!xfs_acl) 132 if (!xfs_acl)
@@ -165,7 +137,7 @@ xfs_get_acl(struct inode *inode, int type)
165 /* 137 /*
166 * If the attribute doesn't exist make sure we have a negative 138 * If the attribute doesn't exist make sure we have a negative
167 * cache entry, for any other error assume it is transient and 139 * cache entry, for any other error assume it is transient and
168 * leave the cache entry as XFS_ACL_NOT_CACHED. 140 * leave the cache entry as ACL_NOT_CACHED.
169 */ 141 */
170 if (error == -ENOATTR) { 142 if (error == -ENOATTR) {
171 acl = NULL; 143 acl = NULL;
@@ -179,7 +151,7 @@ xfs_get_acl(struct inode *inode, int type)
179 goto out; 151 goto out;
180 152
181 out_update_cache: 153 out_update_cache:
182 xfs_update_cached_acl(inode, p_acl, acl); 154 set_cached_acl(inode, type, acl);
183 out: 155 out:
184 kfree(xfs_acl); 156 kfree(xfs_acl);
185 return acl; 157 return acl;
@@ -189,7 +161,6 @@ STATIC int
189xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) 161xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
190{ 162{
191 struct xfs_inode *ip = XFS_I(inode); 163 struct xfs_inode *ip = XFS_I(inode);
192 struct posix_acl **p_acl;
193 char *ea_name; 164 char *ea_name;
194 int error; 165 int error;
195 166
@@ -199,13 +170,11 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
199 switch (type) { 170 switch (type) {
200 case ACL_TYPE_ACCESS: 171 case ACL_TYPE_ACCESS:
201 ea_name = SGI_ACL_FILE; 172 ea_name = SGI_ACL_FILE;
202 p_acl = &ip->i_acl;
203 break; 173 break;
204 case ACL_TYPE_DEFAULT: 174 case ACL_TYPE_DEFAULT:
205 if (!S_ISDIR(inode->i_mode)) 175 if (!S_ISDIR(inode->i_mode))
206 return acl ? -EACCES : 0; 176 return acl ? -EACCES : 0;
207 ea_name = SGI_ACL_DEFAULT; 177 ea_name = SGI_ACL_DEFAULT;
208 p_acl = &ip->i_default_acl;
209 break; 178 break;
210 default: 179 default:
211 return -EINVAL; 180 return -EINVAL;
@@ -242,7 +211,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
242 } 211 }
243 212
244 if (!error) 213 if (!error)
245 xfs_update_cached_acl(inode, p_acl, acl); 214 set_cached_acl(inode, type, acl);
246 return error; 215 return error;
247} 216}
248 217
@@ -384,30 +353,6 @@ xfs_acl_chmod(struct inode *inode)
384 return error; 353 return error;
385} 354}
386 355
387void
388xfs_inode_init_acls(struct xfs_inode *ip)
389{
390 /*
391 * No need for locking, inode is not live yet.
392 */
393 ip->i_acl = XFS_ACL_NOT_CACHED;
394 ip->i_default_acl = XFS_ACL_NOT_CACHED;
395}
396
397void
398xfs_inode_clear_acls(struct xfs_inode *ip)
399{
400 /*
401 * No need for locking here, the inode is not live anymore
402 * and just about to be freed.
403 */
404 if (ip->i_acl != XFS_ACL_NOT_CACHED)
405 posix_acl_release(ip->i_acl);
406 if (ip->i_default_acl != XFS_ACL_NOT_CACHED)
407 posix_acl_release(ip->i_default_acl);
408}
409
410
411/* 356/*
412 * System xattr handlers. 357 * System xattr handlers.
413 * 358 *
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 63dc1f2efad5..947b150df8ed 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -46,8 +46,6 @@ extern int xfs_check_acl(struct inode *inode, int mask);
46extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); 46extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
47extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl); 47extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl);
48extern int xfs_acl_chmod(struct inode *inode); 48extern int xfs_acl_chmod(struct inode *inode);
49extern void xfs_inode_init_acls(struct xfs_inode *ip);
50extern void xfs_inode_clear_acls(struct xfs_inode *ip);
51extern int posix_acl_access_exists(struct inode *inode); 49extern int posix_acl_access_exists(struct inode *inode);
52extern int posix_acl_default_exists(struct inode *inode); 50extern int posix_acl_default_exists(struct inode *inode);
53 51
@@ -57,8 +55,6 @@ extern struct xattr_handler xfs_xattr_system_handler;
57# define xfs_get_acl(inode, type) NULL 55# define xfs_get_acl(inode, type) NULL
58# define xfs_inherit_acl(inode, default_acl) 0 56# define xfs_inherit_acl(inode, default_acl) 0
59# define xfs_acl_chmod(inode) 0 57# define xfs_acl_chmod(inode) 0
60# define xfs_inode_init_acls(ip)
61# define xfs_inode_clear_acls(ip)
62# define posix_acl_access_exists(inode) 0 58# define posix_acl_access_exists(inode) 0
63# define posix_acl_default_exists(inode) 0 59# define posix_acl_default_exists(inode) 0
64#endif /* CONFIG_XFS_POSIX_ACL */ 60#endif /* CONFIG_XFS_POSIX_ACL */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 76c540f719e4..5fcec6f020a7 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -83,7 +83,6 @@ xfs_inode_alloc(
83 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); 83 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
84 ip->i_size = 0; 84 ip->i_size = 0;
85 ip->i_new_size = 0; 85 ip->i_new_size = 0;
86 xfs_inode_init_acls(ip);
87 86
88 /* 87 /*
89 * Initialize inode's trace buffers. 88 * Initialize inode's trace buffers.
@@ -560,7 +559,6 @@ xfs_ireclaim(
560 ASSERT(atomic_read(&ip->i_pincount) == 0); 559 ASSERT(atomic_read(&ip->i_pincount) == 0);
561 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 560 ASSERT(!spin_is_locked(&ip->i_flags_lock));
562 ASSERT(completion_done(&ip->i_flush)); 561 ASSERT(completion_done(&ip->i_flush));
563 xfs_inode_clear_acls(ip);
564 kmem_zone_free(xfs_inode_zone, ip); 562 kmem_zone_free(xfs_inode_zone, ip);
565} 563}
566 564
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 77016702938b..1804f866a71d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -273,11 +273,6 @@ typedef struct xfs_inode {
273 /* VFS inode */ 273 /* VFS inode */
274 struct inode i_vnode; /* embedded VFS inode */ 274 struct inode i_vnode; /* embedded VFS inode */
275 275
276#ifdef CONFIG_XFS_POSIX_ACL
277 struct posix_acl *i_acl;
278 struct posix_acl *i_default_acl;
279#endif
280
281 /* Trace buffers per inode. */ 276 /* Trace buffers per inode. */
282#ifdef XFS_INODE_TRACE 277#ifdef XFS_INODE_TRACE
283 struct ktrace *i_trace; /* general inode trace */ 278 struct ktrace *i_trace; /* general inode trace */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 1731fb5fd775..4a2b162c256a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -126,6 +126,8 @@ extern int free_irte(int irq);
126extern int irq_remapped(int irq); 126extern int irq_remapped(int irq);
127extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); 127extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
128extern struct intel_iommu *map_ioapic_to_ir(int apic); 128extern struct intel_iommu *map_ioapic_to_ir(int apic);
129extern int set_ioapic_sid(struct irte *irte, int apic);
130extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
129#else 131#else
130static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 132static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
131{ 133{
@@ -156,6 +158,15 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
156{ 158{
157 return NULL; 159 return NULL;
158} 160}
161static inline int set_ioapic_sid(struct irte *irte, int apic)
162{
163 return 0;
164}
165static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
166{
167 return 0;
168}
169
159#define irq_remapped(irq) (0) 170#define irq_remapped(irq) (0)
160#define enable_intr_remapping(mode) (-1) 171#define enable_intr_remapping(mode) (-1)
161#define disable_intr_remapping() (0) 172#define disable_intr_remapping() (0)
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 7894dd0f3b77..ca1bfe90004f 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -103,10 +103,6 @@ struct ext3_inode_info {
103 */ 103 */
104 struct rw_semaphore xattr_sem; 104 struct rw_semaphore xattr_sem;
105#endif 105#endif
106#ifdef CONFIG_EXT3_FS_POSIX_ACL
107 struct posix_acl *i_acl;
108 struct posix_acl *i_default_acl;
109#endif
110 106
111 struct list_head i_orphan; /* unlinked but open inodes */ 107 struct list_head i_orphan; /* unlinked but open inodes */
112 108
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 8e912ab6a072..3c155107d61f 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -3,4 +3,25 @@
3 3
4#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */ 4#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
5 5
6#ifdef __KERNEL__
7
8/*
9 * Space reservation ioctls and argument structure
10 * are designed to be compatible with the legacy XFS ioctls.
11 */
12struct space_resv {
13 __s16 l_type;
14 __s16 l_whence;
15 __s64 l_start;
16 __s64 l_len; /* len == 0 means until end of file */
17 __s32 l_sysid;
18 __u32 l_pid;
19 __s32 l_pad[4]; /* reserved area */
20};
21
22#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
23#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
24
25#endif /* __KERNEL__ */
26
6#endif /* _FALLOC_H_ */ 27#endif /* _FALLOC_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1ff5e4e01952..0872372184fe 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -710,6 +710,9 @@ static inline int mapping_writably_mapped(struct address_space *mapping)
710#define i_size_ordered_init(inode) do { } while (0) 710#define i_size_ordered_init(inode) do { } while (0)
711#endif 711#endif
712 712
713struct posix_acl;
714#define ACL_NOT_CACHED ((void *)(-1))
715
713struct inode { 716struct inode {
714 struct hlist_node i_hash; 717 struct hlist_node i_hash;
715 struct list_head i_list; 718 struct list_head i_list;
@@ -773,6 +776,10 @@ struct inode {
773#ifdef CONFIG_SECURITY 776#ifdef CONFIG_SECURITY
774 void *i_security; 777 void *i_security;
775#endif 778#endif
779#ifdef CONFIG_FS_POSIX_ACL
780 struct posix_acl *i_acl;
781 struct posix_acl *i_default_acl;
782#endif
776 void *i_private; /* fs or device private pointer */ 783 void *i_private; /* fs or device private pointer */
777}; 784};
778 785
@@ -1906,6 +1913,8 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1906 1913
1907extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, 1914extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
1908 struct file *filp); 1915 struct file *filp);
1916extern int do_fallocate(struct file *file, int mode, loff_t offset,
1917 loff_t len);
1909extern long do_sys_open(int dfd, const char __user *filename, int flags, 1918extern long do_sys_open(int dfd, const char __user *filename, int flags,
1910 int mode); 1919 int mode);
1911extern struct file *filp_open(const char *, int, int); 1920extern struct file *filp_open(const char *, int, int);
@@ -1914,6 +1923,10 @@ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
1914extern int filp_close(struct file *, fl_owner_t id); 1923extern int filp_close(struct file *, fl_owner_t id);
1915extern char * getname(const char __user *); 1924extern char * getname(const char __user *);
1916 1925
1926/* fs/ioctl.c */
1927
1928extern int ioctl_preallocate(struct file *filp, void __user *argp);
1929
1917/* fs/dcache.c */ 1930/* fs/dcache.c */
1918extern void __init vfs_caches_init_early(void); 1931extern void __init vfs_caches_init_early(void);
1919extern void __init vfs_caches_init(unsigned long); 1932extern void __init vfs_caches_init(unsigned long);
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 10d701eec484..b6a85183c333 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -175,16 +175,16 @@ struct icmp6_filter {
175 175
176 176
177extern void icmpv6_send(struct sk_buff *skb, 177extern void icmpv6_send(struct sk_buff *skb,
178 int type, int code, 178 u8 type, u8 code,
179 __u32 info, 179 __u32 info,
180 struct net_device *dev); 180 struct net_device *dev);
181 181
182extern int icmpv6_init(void); 182extern int icmpv6_init(void);
183extern int icmpv6_err_convert(int type, int code, 183extern int icmpv6_err_convert(u8 type, u8 code,
184 int *err); 184 int *err);
185extern void icmpv6_cleanup(void); 185extern void icmpv6_cleanup(void);
186extern void icmpv6_param_prob(struct sk_buff *skb, 186extern void icmpv6_param_prob(struct sk_buff *skb,
187 int code, int pos); 187 u8 code, int pos);
188 188
189struct flowi; 189struct flowi;
190struct in6_addr; 190struct in6_addr;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index da5a5a1f4cd2..b25d1b53df0d 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -258,6 +258,16 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
258#define lockdep_set_subclass(lock, sub) \ 258#define lockdep_set_subclass(lock, sub) \
259 lockdep_init_map(&(lock)->dep_map, #lock, \ 259 lockdep_init_map(&(lock)->dep_map, #lock, \
260 (lock)->dep_map.key, sub) 260 (lock)->dep_map.key, sub)
261/*
262 * Compare locking classes
263 */
264#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
265
266static inline int lockdep_match_key(struct lockdep_map *lock,
267 struct lock_class_key *key)
268{
269 return lock->key == key;
270}
261 271
262/* 272/*
263 * Acquire a lock. 273 * Acquire a lock.
@@ -326,6 +336,11 @@ static inline void lockdep_on(void)
326#define lockdep_set_class_and_subclass(lock, key, sub) \ 336#define lockdep_set_class_and_subclass(lock, key, sub) \
327 do { (void)(key); } while (0) 337 do { (void)(key); } while (0)
328#define lockdep_set_subclass(lock, sub) do { } while (0) 338#define lockdep_set_subclass(lock, sub) do { } while (0)
339/*
340 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
341 * case since the result is not well defined and the caller should rather
342 * #ifdef the call himself.
343 */
329 344
330# define INIT_LOCKDEP 345# define INIT_LOCKDEP
331# define lockdep_reset() do { debug_locks = 1; } while (0) 346# define lockdep_reset() do { debug_locks = 1; } while (0)
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 4bc241290c24..0cdba01b7756 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -83,4 +83,68 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
83extern struct posix_acl *get_posix_acl(struct inode *, int); 83extern struct posix_acl *get_posix_acl(struct inode *, int);
84extern int set_posix_acl(struct inode *, int, struct posix_acl *); 84extern int set_posix_acl(struct inode *, int, struct posix_acl *);
85 85
86static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
87{
88 struct posix_acl **p, *acl;
89 switch (type) {
90 case ACL_TYPE_ACCESS:
91 p = &inode->i_acl;
92 break;
93 case ACL_TYPE_DEFAULT:
94 p = &inode->i_default_acl;
95 break;
96 default:
97 return ERR_PTR(-EINVAL);
98 }
99 acl = ACCESS_ONCE(*p);
100 if (acl) {
101 spin_lock(&inode->i_lock);
102 acl = *p;
103 if (acl != ACL_NOT_CACHED)
104 acl = posix_acl_dup(acl);
105 spin_unlock(&inode->i_lock);
106 }
107 return acl;
108}
109
110static inline void set_cached_acl(struct inode *inode,
111 int type,
112 struct posix_acl *acl)
113{
114 struct posix_acl *old = NULL;
115 spin_lock(&inode->i_lock);
116 switch (type) {
117 case ACL_TYPE_ACCESS:
118 old = inode->i_acl;
119 inode->i_acl = posix_acl_dup(acl);
120 break;
121 case ACL_TYPE_DEFAULT:
122 old = inode->i_default_acl;
123 inode->i_default_acl = posix_acl_dup(acl);
124 break;
125 }
126 spin_unlock(&inode->i_lock);
127 if (old != ACL_NOT_CACHED)
128 posix_acl_release(old);
129}
130
131static inline void forget_cached_acl(struct inode *inode, int type)
132{
133 struct posix_acl *old = NULL;
134 spin_lock(&inode->i_lock);
135 switch (type) {
136 case ACL_TYPE_ACCESS:
137 old = inode->i_acl;
138 inode->i_acl = ACL_NOT_CACHED;
139 break;
140 case ACL_TYPE_DEFAULT:
141 old = inode->i_default_acl;
142 inode->i_default_acl = ACL_NOT_CACHED;
143 break;
144 }
145 spin_unlock(&inode->i_lock);
146 if (old != ACL_NOT_CACHED)
147 posix_acl_release(old);
148}
149
86#endif /* __LINUX_POSIX_ACL_H */ 150#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index 8cc65757e47a..b4448853900e 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -56,15 +56,6 @@ int reiserfs_cache_default_acl(struct inode *dir);
56extern struct xattr_handler reiserfs_posix_acl_default_handler; 56extern struct xattr_handler reiserfs_posix_acl_default_handler;
57extern struct xattr_handler reiserfs_posix_acl_access_handler; 57extern struct xattr_handler reiserfs_posix_acl_access_handler;
58 58
59static inline void reiserfs_init_acl_access(struct inode *inode)
60{
61 REISERFS_I(inode)->i_acl_access = NULL;
62}
63
64static inline void reiserfs_init_acl_default(struct inode *inode)
65{
66 REISERFS_I(inode)->i_acl_default = NULL;
67}
68#else 59#else
69 60
70#define reiserfs_cache_default_acl(inode) 0 61#define reiserfs_cache_default_acl(inode) 0
@@ -86,12 +77,4 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
86{ 77{
87 return 0; 78 return 0;
88} 79}
89
90static inline void reiserfs_init_acl_access(struct inode *inode)
91{
92}
93
94static inline void reiserfs_init_acl_default(struct inode *inode)
95{
96}
97#endif 80#endif
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h
index 76360b36ac33..89f4d3abbf5a 100644
--- a/include/linux/reiserfs_fs_i.h
+++ b/include/linux/reiserfs_fs_i.h
@@ -54,10 +54,6 @@ struct reiserfs_inode_info {
54 unsigned int i_trans_id; 54 unsigned int i_trans_id;
55 struct reiserfs_journal_list *i_jl; 55 struct reiserfs_journal_list *i_jl;
56 struct mutex i_mmap; 56 struct mutex i_mmap;
57#ifdef CONFIG_REISERFS_FS_POSIX_ACL
58 struct posix_acl *i_acl_access;
59 struct posix_acl *i_acl_default;
60#endif
61#ifdef CONFIG_REISERFS_FS_XATTR 57#ifdef CONFIG_REISERFS_FS_XATTR
62 struct rw_semaphore i_xattr_sem; 58 struct rw_semaphore i_xattr_sem;
63#endif 59#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index fd83f2584b15..abff6c9b413c 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -19,10 +19,6 @@ struct shmem_inode_info {
19 swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */ 19 swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */
20 struct list_head swaplist; /* chain of maybes on swap */ 20 struct list_head swaplist; /* chain of maybes on swap */
21 struct inode vfs_inode; 21 struct inode vfs_inode;
22#ifdef CONFIG_TMPFS_POSIX_ACL
23 struct posix_acl *i_acl;
24 struct posix_acl *i_default_acl;
25#endif
26}; 22};
27 23
28struct shmem_sb_info { 24struct shmem_sb_info {
@@ -45,7 +41,6 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
45#ifdef CONFIG_TMPFS_POSIX_ACL 41#ifdef CONFIG_TMPFS_POSIX_ACL
46int shmem_permission(struct inode *, int); 42int shmem_permission(struct inode *, int);
47int shmem_acl_init(struct inode *, struct inode *); 43int shmem_acl_init(struct inode *, struct inode *);
48void shmem_acl_destroy_inode(struct inode *);
49 44
50extern struct xattr_handler shmem_xattr_acl_access_handler; 45extern struct xattr_handler shmem_xattr_acl_access_handler;
51extern struct xattr_handler shmem_xattr_acl_default_handler; 46extern struct xattr_handler shmem_xattr_acl_default_handler;
@@ -57,9 +52,6 @@ static inline int shmem_acl_init(struct inode *inode, struct inode *dir)
57{ 52{
58 return 0; 53 return 0;
59} 54}
60static inline void shmem_acl_destroy_inode(struct inode *inode)
61{
62}
63#endif /* CONFIG_TMPFS_POSIX_ACL */ 55#endif /* CONFIG_TMPFS_POSIX_ACL */
64 56
65#endif 57#endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
index ffa5b8b1f1df..1089d5aabd49 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -53,7 +53,7 @@ struct inet6_protocol
53 53
54 void (*err_handler)(struct sk_buff *skb, 54 void (*err_handler)(struct sk_buff *skb,
55 struct inet6_skb_parm *opt, 55 struct inet6_skb_parm *opt,
56 int type, int code, int offset, 56 u8 type, u8 code, int offset,
57 __be32 info); 57 __be32 info);
58 58
59 int (*gso_send_check)(struct sk_buff *skb); 59 int (*gso_send_check)(struct sk_buff *skb);
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 8a22599f26ba..f6b9b830df8c 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -6,7 +6,7 @@
6#include <net/protocol.h> 6#include <net/protocol.h>
7 7
8void raw6_icmp_error(struct sk_buff *, int nexthdr, 8void raw6_icmp_error(struct sk_buff *, int nexthdr,
9 int type, int code, int inner_offset, __be32); 9 u8 type, u8 code, int inner_offset, __be32);
10int raw6_local_deliver(struct sk_buff *, int); 10int raw6_local_deliver(struct sk_buff *, int);
11 11
12extern int rawv6_rcv(struct sock *sk, 12extern int rawv6_rcv(struct sock *sk,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 9f80a7668289..d16a304cbed4 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,6 +448,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
448{ 448{
449 struct sctp_ulpevent *event = sctp_skb2event(skb); 449 struct sctp_ulpevent *event = sctp_skb2event(skb);
450 450
451 skb_orphan(skb);
451 skb->sk = sk; 452 skb->sk = sk;
452 skb->destructor = sctp_sock_rfree; 453 skb->destructor = sctp_sock_rfree;
453 atomic_add(event->rmem_len, &sk->sk_rmem_alloc); 454 atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
diff --git a/include/net/sock.h b/include/net/sock.h
index 07133c5e9868..352f06bbd7a9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1252,6 +1252,7 @@ static inline int sk_has_allocations(const struct sock *sk)
1252 1252
1253static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1253static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1254{ 1254{
1255 skb_orphan(skb);
1255 skb->sk = sk; 1256 skb->sk = sk;
1256 skb->destructor = sock_wfree; 1257 skb->destructor = sock_wfree;
1257 /* 1258 /*
@@ -1264,6 +1265,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1264 1265
1265static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1266static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1266{ 1267{
1268 skb_orphan(skb);
1267 skb->sk = sk; 1269 skb->sk = sk;
1268 skb->destructor = sock_rfree; 1270 skb->destructor = sock_rfree;
1269 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1271 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 736bca450886..9e3a3f4c1f60 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1274,7 +1274,7 @@ struct xfrm_tunnel {
1274struct xfrm6_tunnel { 1274struct xfrm6_tunnel {
1275 int (*handler)(struct sk_buff *skb); 1275 int (*handler)(struct sk_buff *skb);
1276 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1276 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1277 int type, int code, int offset, __be32 info); 1277 u8 type, u8 code, int offset, __be32 info);
1278 struct xfrm6_tunnel *next; 1278 struct xfrm6_tunnel *next;
1279 int priority; 1279 int priority;
1280}; 1280};
diff --git a/mm/shmem.c b/mm/shmem.c
index e89d7ec18eda..5f2019fc7895 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2379,6 +2379,10 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
2379 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2379 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2380 if (!p) 2380 if (!p)
2381 return NULL; 2381 return NULL;
2382#ifdef CONFIG_TMPFS_POSIX_ACL
2383 p->vfs_inode.i_acl = NULL;
2384 p->vfs_inode.i_default_acl = NULL;
2385#endif
2382 return &p->vfs_inode; 2386 return &p->vfs_inode;
2383} 2387}
2384 2388
@@ -2388,7 +2392,6 @@ static void shmem_destroy_inode(struct inode *inode)
2388 /* only struct inode is valid if it's an inline symlink */ 2392 /* only struct inode is valid if it's an inline symlink */
2389 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2393 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2390 } 2394 }
2391 shmem_acl_destroy_inode(inode);
2392 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2395 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2393} 2396}
2394 2397
@@ -2397,10 +2400,6 @@ static void init_once(void *foo)
2397 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2400 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2398 2401
2399 inode_init_once(&p->vfs_inode); 2402 inode_init_once(&p->vfs_inode);
2400#ifdef CONFIG_TMPFS_POSIX_ACL
2401 p->i_acl = NULL;
2402 p->i_default_acl = NULL;
2403#endif
2404} 2403}
2405 2404
2406static int init_inodecache(void) 2405static int init_inodecache(void)
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 8e5aadd7dcd6..606a8e757a42 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -22,11 +22,11 @@ shmem_get_acl(struct inode *inode, int type)
22 spin_lock(&inode->i_lock); 22 spin_lock(&inode->i_lock);
23 switch(type) { 23 switch(type) {
24 case ACL_TYPE_ACCESS: 24 case ACL_TYPE_ACCESS:
25 acl = posix_acl_dup(SHMEM_I(inode)->i_acl); 25 acl = posix_acl_dup(inode->i_acl);
26 break; 26 break;
27 27
28 case ACL_TYPE_DEFAULT: 28 case ACL_TYPE_DEFAULT:
29 acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl); 29 acl = posix_acl_dup(inode->i_default_acl);
30 break; 30 break;
31 } 31 }
32 spin_unlock(&inode->i_lock); 32 spin_unlock(&inode->i_lock);
@@ -45,13 +45,13 @@ shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
45 spin_lock(&inode->i_lock); 45 spin_lock(&inode->i_lock);
46 switch(type) { 46 switch(type) {
47 case ACL_TYPE_ACCESS: 47 case ACL_TYPE_ACCESS:
48 free = SHMEM_I(inode)->i_acl; 48 free = inode->i_acl;
49 SHMEM_I(inode)->i_acl = posix_acl_dup(acl); 49 inode->i_acl = posix_acl_dup(acl);
50 break; 50 break;
51 51
52 case ACL_TYPE_DEFAULT: 52 case ACL_TYPE_DEFAULT:
53 free = SHMEM_I(inode)->i_default_acl; 53 free = inode->i_default_acl;
54 SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl); 54 inode->i_default_acl = posix_acl_dup(acl);
55 break; 55 break;
56 } 56 }
57 spin_unlock(&inode->i_lock); 57 spin_unlock(&inode->i_lock);
@@ -155,23 +155,6 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
155} 155}
156 156
157/** 157/**
158 * shmem_acl_destroy_inode - destroy acls hanging off the in-memory inode
159 *
160 * This is done before destroying the actual inode.
161 */
162
163void
164shmem_acl_destroy_inode(struct inode *inode)
165{
166 if (SHMEM_I(inode)->i_acl)
167 posix_acl_release(SHMEM_I(inode)->i_acl);
168 SHMEM_I(inode)->i_acl = NULL;
169 if (SHMEM_I(inode)->i_default_acl)
170 posix_acl_release(SHMEM_I(inode)->i_default_acl);
171 SHMEM_I(inode)->i_default_acl = NULL;
172}
173
174/**
175 * shmem_check_acl - check_acl() callback for generic_permission() 158 * shmem_check_acl - check_acl() callback for generic_permission()
176 */ 159 */
177static int 160static int
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 5f1d2107a1dd..de56d3983de0 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -437,8 +437,7 @@ free:
437int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, 437int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
438 struct packet_type *ptype, struct net_device *orig_dev) 438 struct packet_type *ptype, struct net_device *orig_dev)
439{ 439{
440 skb->sk = NULL; /* Initially we don't know who it's for */ 440 skb_orphan(skb);
441 skb->destructor = NULL; /* Who initializes this, dammit?! */
442 441
443 if (!net_eq(dev_net(dev), &init_net)) { 442 if (!net_eq(dev_net(dev), &init_net)) {
444 kfree_skb(skb); 443 kfree_skb(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index baf2dc13a34a..60b572812278 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2310,8 +2310,6 @@ ncls:
2310 if (!skb) 2310 if (!skb)
2311 goto out; 2311 goto out;
2312 2312
2313 skb_orphan(skb);
2314
2315 type = skb->protocol; 2313 type = skb->protocol;
2316 list_for_each_entry_rcu(ptype, 2314 list_for_each_entry_rcu(ptype,
2317 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2315 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 05ea7440d9e5..3e70faab2989 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -85,7 +85,7 @@ static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
85} 85}
86 86
87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 int type, int code, int offset, __be32 info) 88 u8 type, u8 code, int offset, __be32 info)
89{ 89{
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 65b3a8b11a6c..278f46f5011b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1093,8 +1093,27 @@ restart:
1093 * If we drop it here, the callers have no way to resolve routes 1093 * If we drop it here, the callers have no way to resolve routes
1094 * when we're not caching. Instead, just point *rp at rt, so 1094 * when we're not caching. Instead, just point *rp at rt, so
1095 * the caller gets a single use out of the route 1095 * the caller gets a single use out of the route
1096 * Note that we do rt_free on this new route entry, so that
1097 * once its refcount hits zero, we are still able to reap it
1098 * (Thanks Alexey)
1099 * Note also the rt_free uses call_rcu. We don't actually
1100 * need rcu protection here, this is just our path to get
1101 * on the route gc list.
1096 */ 1102 */
1097 goto report_and_exit; 1103
1104 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1105 int err = arp_bind_neighbour(&rt->u.dst);
1106 if (err) {
1107 if (net_ratelimit())
1108 printk(KERN_WARNING
1109 "Neighbour table failure & not caching routes.\n");
1110 rt_drop(rt);
1111 return err;
1112 }
1113 }
1114
1115 rt_free(rt);
1116 goto skip_hashing;
1098 } 1117 }
1099 1118
1100 rthp = &rt_hash_table[hash].chain; 1119 rthp = &rt_hash_table[hash].chain;
@@ -1211,7 +1230,8 @@ restart:
1211#if RT_CACHE_DEBUG >= 2 1230#if RT_CACHE_DEBUG >= 2
1212 if (rt->u.dst.rt_next) { 1231 if (rt->u.dst.rt_next) {
1213 struct rtable *trt; 1232 struct rtable *trt;
1214 printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst); 1233 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1234 hash, &rt->rt_dst);
1215 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) 1235 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1216 printk(" . %pI4", &trt->rt_dst); 1236 printk(" . %pI4", &trt->rt_dst);
1217 printk("\n"); 1237 printk("\n");
@@ -1226,7 +1246,7 @@ restart:
1226 1246
1227 spin_unlock_bh(rt_hash_lock_addr(hash)); 1247 spin_unlock_bh(rt_hash_lock_addr(hash));
1228 1248
1229report_and_exit: 1249skip_hashing:
1230 if (rp) 1250 if (rp)
1231 *rp = rt; 1251 *rp = rt;
1232 else 1252 else
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 52449f7a1b71..86f42a288c4b 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -405,7 +405,7 @@ out:
405} 405}
406 406
407static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 407static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
408 int type, int code, int offset, __be32 info) 408 u8 type, u8 code, int offset, __be32 info)
409{ 409{
410 struct net *net = dev_net(skb->dev); 410 struct net *net = dev_net(skb->dev);
411 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 411 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index c2f250150db1..678bb95b1525 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -354,7 +354,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
354} 354}
355 355
356static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 356static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
357 int type, int code, int offset, __be32 info) 357 u8 type, u8 code, int offset, __be32 info)
358{ 358{
359 struct net *net = dev_net(skb->dev); 359 struct net *net = dev_net(skb->dev);
360 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 360 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 36dff8807183..eab62a7a8f06 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -117,7 +117,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
117/* 117/*
118 * Slightly more convenient version of icmpv6_send. 118 * Slightly more convenient version of icmpv6_send.
119 */ 119 */
120void icmpv6_param_prob(struct sk_buff *skb, int code, int pos) 120void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
121{ 121{
122 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 122 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
123 kfree_skb(skb); 123 kfree_skb(skb);
@@ -161,7 +161,7 @@ static int is_ineligible(struct sk_buff *skb)
161/* 161/*
162 * Check the ICMP output rate limit 162 * Check the ICMP output rate limit
163 */ 163 */
164static inline int icmpv6_xrlim_allow(struct sock *sk, int type, 164static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
165 struct flowi *fl) 165 struct flowi *fl)
166{ 166{
167 struct dst_entry *dst; 167 struct dst_entry *dst;
@@ -305,7 +305,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
305/* 305/*
306 * Send an ICMP message in response to a packet in error 306 * Send an ICMP message in response to a packet in error
307 */ 307 */
308void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, 308void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
309 struct net_device *dev) 309 struct net_device *dev)
310{ 310{
311 struct net *net = dev_net(skb->dev); 311 struct net *net = dev_net(skb->dev);
@@ -590,7 +590,7 @@ out:
590 icmpv6_xmit_unlock(sk); 590 icmpv6_xmit_unlock(sk);
591} 591}
592 592
593static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) 593static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
594{ 594{
595 struct inet6_protocol *ipprot; 595 struct inet6_protocol *ipprot;
596 int inner_offset; 596 int inner_offset;
@@ -643,7 +643,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
643 struct in6_addr *saddr, *daddr; 643 struct in6_addr *saddr, *daddr;
644 struct ipv6hdr *orig_hdr; 644 struct ipv6hdr *orig_hdr;
645 struct icmp6hdr *hdr; 645 struct icmp6hdr *hdr;
646 int type; 646 u8 type;
647 647
648 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 648 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
649 struct sec_path *sp = skb_sec_path(skb); 649 struct sec_path *sp = skb_sec_path(skb);
@@ -914,7 +914,7 @@ static const struct icmp6_err {
914 }, 914 },
915}; 915};
916 916
917int icmpv6_err_convert(int type, int code, int *err) 917int icmpv6_err_convert(u8 type, u8 code, int *err)
918{ 918{
919 int fatal = 0; 919 int fatal = 0;
920 920
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 404d16a97d5c..51f410e7775a 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -394,13 +394,13 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
394 394
395static int 395static int
396ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 396ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
397 int *type, int *code, int *msg, __u32 *info, int offset) 397 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
398{ 398{
399 struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; 399 struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
400 struct ip6_tnl *t; 400 struct ip6_tnl *t;
401 int rel_msg = 0; 401 int rel_msg = 0;
402 int rel_type = ICMPV6_DEST_UNREACH; 402 u8 rel_type = ICMPV6_DEST_UNREACH;
403 int rel_code = ICMPV6_ADDR_UNREACH; 403 u8 rel_code = ICMPV6_ADDR_UNREACH;
404 __u32 rel_info = 0; 404 __u32 rel_info = 0;
405 __u16 len; 405 __u16 len;
406 int err = -ENOENT; 406 int err = -ENOENT;
@@ -488,11 +488,11 @@ out:
488 488
489static int 489static int
490ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 490ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
491 int type, int code, int offset, __be32 info) 491 u8 type, u8 code, int offset, __be32 info)
492{ 492{
493 int rel_msg = 0; 493 int rel_msg = 0;
494 int rel_type = type; 494 u8 rel_type = type;
495 int rel_code = code; 495 u8 rel_code = code;
496 __u32 rel_info = ntohl(info); 496 __u32 rel_info = ntohl(info);
497 int err; 497 int err;
498 struct sk_buff *skb2; 498 struct sk_buff *skb2;
@@ -586,11 +586,11 @@ out:
586 586
587static int 587static int
588ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 588ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
589 int type, int code, int offset, __be32 info) 589 u8 type, u8 code, int offset, __be32 info)
590{ 590{
591 int rel_msg = 0; 591 int rel_msg = 0;
592 int rel_type = type; 592 u8 rel_type = type;
593 int rel_code = code; 593 u8 rel_code = code;
594 __u32 rel_info = ntohl(info); 594 __u32 rel_info = ntohl(info);
595 int err; 595 int err;
596 596
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 3a0b3be7ece5..79c172f1ff01 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -51,7 +51,7 @@
51#include <linux/mutex.h> 51#include <linux/mutex.h>
52 52
53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
54 int type, int code, int offset, __be32 info) 54 u8 type, u8 code, int offset, __be32 info)
55{ 55{
56 __be32 spi; 56 __be32 spi;
57 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 57 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f995e19c87a9..f797e8c6f3b3 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -54,7 +54,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
54 return data + padlen; 54 return data + padlen;
55} 55}
56 56
57static inline void mip6_param_prob(struct sk_buff *skb, int code, int pos) 57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
58{ 58{
59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
60} 60}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8b0b6f948063..d6c3c1c34b2d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -310,7 +310,7 @@ out:
310 310
311static void rawv6_err(struct sock *sk, struct sk_buff *skb, 311static void rawv6_err(struct sock *sk, struct sk_buff *skb,
312 struct inet6_skb_parm *opt, 312 struct inet6_skb_parm *opt,
313 int type, int code, int offset, __be32 info) 313 u8 type, u8 code, int offset, __be32 info)
314{ 314{
315 struct inet_sock *inet = inet_sk(sk); 315 struct inet_sock *inet = inet_sk(sk);
316 struct ipv6_pinfo *np = inet6_sk(sk); 316 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -343,7 +343,7 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
343} 343}
344 344
345void raw6_icmp_error(struct sk_buff *skb, int nexthdr, 345void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
346 int type, int code, int inner_offset, __be32 info) 346 u8 type, u8 code, int inner_offset, __be32 info)
347{ 347{
348 struct sock *sk; 348 struct sock *sk;
349 int hash; 349 int hash;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 658293ea05ba..1473ee0a1f51 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1865,7 +1865,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1865 * Drop the packet on the floor 1865 * Drop the packet on the floor
1866 */ 1866 */
1867 1867
1868static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes) 1868static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1869{ 1869{
1870 int type; 1870 int type;
1871 struct dst_entry *dst = skb_dst(skb); 1871 struct dst_entry *dst = skb_dst(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 53b6a4192b16..58810c65b635 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -317,7 +317,7 @@ failure:
317} 317}
318 318
319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 int type, int code, int offset, __be32 info) 320 u8 type, u8 code, int offset, __be32 info)
321{ 321{
322 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 322 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 669f280989c3..633ad789effc 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -124,7 +124,7 @@ drop:
124} 124}
125 125
126static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 126static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
127 int type, int code, int offset, __be32 info) 127 u8 type, u8 code, int offset, __be32 info)
128{ 128{
129 struct xfrm6_tunnel *handler; 129 struct xfrm6_tunnel *handler;
130 130
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 023beda6b224..33b59bd92c4d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -312,7 +312,7 @@ csum_copy_err:
312} 312}
313 313
314void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 314void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
315 int type, int code, int offset, __be32 info, 315 u8 type, u8 code, int offset, __be32 info,
316 struct udp_table *udptable) 316 struct udp_table *udptable)
317{ 317{
318 struct ipv6_pinfo *np; 318 struct ipv6_pinfo *np;
@@ -346,8 +346,8 @@ out:
346} 346}
347 347
348static __inline__ void udpv6_err(struct sk_buff *skb, 348static __inline__ void udpv6_err(struct sk_buff *skb,
349 struct inet6_skb_parm *opt, int type, 349 struct inet6_skb_parm *opt, u8 type,
350 int code, int offset, __be32 info ) 350 u8 code, int offset, __be32 info )
351{ 351{
352 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 352 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
353} 353}
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 23779208c334..6bb303471e20 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -9,7 +9,7 @@
9 9
10extern int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int ); 10extern int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
11extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, 11extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
12 int , int , int , __be32 , struct udp_table *); 12 u8 , u8 , int , __be32 , struct udp_table *);
13 13
14extern int udp_v6_get_port(struct sock *sk, unsigned short snum); 14extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
15 15
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index ba162a824585..4818c48688f2 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -20,7 +20,7 @@ static int udplitev6_rcv(struct sk_buff *skb)
20 20
21static void udplitev6_err(struct sk_buff *skb, 21static void udplitev6_err(struct sk_buff *skb,
22 struct inet6_skb_parm *opt, 22 struct inet6_skb_parm *opt,
23 int type, int code, int offset, __be32 info) 23 u8 type, u8 code, int offset, __be32 info)
24{ 24{
25 __udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table); 25 __udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table);
26} 26}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 80193db224d9..81a95c00e503 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -262,7 +262,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
262} 262}
263 263
264static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 264static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
265 int type, int code, int offset, __be32 info) 265 u8 type, u8 code, int offset, __be32 info)
266{ 266{
267 /* xfrm6_tunnel native err handling */ 267 /* xfrm6_tunnel native err handling */
268 switch (type) { 268 switch (type) {
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 5922febe25c4..cb762c8723ea 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -913,9 +913,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
913 /* Clean up the original one to keep it in listen state */ 913 /* Clean up the original one to keep it in listen state */
914 irttp_listen(self->tsap); 914 irttp_listen(self->tsap);
915 915
916 /* Wow ! What is that ? Jean II */
917 skb->sk = NULL;
918 skb->destructor = NULL;
919 kfree_skb(skb); 916 kfree_skb(skb);
920 sk->sk_ack_backlog--; 917 sk->sk_ack_backlog--;
921 918
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 67c99d20857f..7ba96618660e 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -196,6 +196,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
196 /* Don't forget to refcount it - see ircomm_tty_do_softint() */ 196 /* Don't forget to refcount it - see ircomm_tty_do_softint() */
197 skb_get(skb); 197 skb_get(skb);
198 198
199 skb_orphan(skb);
199 skb->destructor = ircomm_lmp_flow_control; 200 skb->destructor = ircomm_lmp_flow_control;
200 201
201 if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { 202 if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5f72b94b4918..7508f11c5b39 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -335,7 +335,8 @@ begin:
335 h = __nf_conntrack_find(net, tuple); 335 h = __nf_conntrack_find(net, tuple);
336 if (h) { 336 if (h) {
337 ct = nf_ct_tuplehash_to_ctrack(h); 337 ct = nf_ct_tuplehash_to_ctrack(h);
338 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 338 if (unlikely(nf_ct_is_dying(ct) ||
339 !atomic_inc_not_zero(&ct->ct_general.use)))
339 h = NULL; 340 h = NULL;
340 else { 341 else {
341 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { 342 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
@@ -425,7 +426,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
425 /* Remove from unconfirmed list */ 426 /* Remove from unconfirmed list */
426 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 427 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
427 428
428 __nf_conntrack_hash_insert(ct, hash, repl_hash);
429 /* Timer relative to confirmation time, not original 429 /* Timer relative to confirmation time, not original
430 setting time, otherwise we'd get timer wrap in 430 setting time, otherwise we'd get timer wrap in
431 weird delay cases. */ 431 weird delay cases. */
@@ -433,8 +433,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
433 add_timer(&ct->timeout); 433 add_timer(&ct->timeout);
434 atomic_inc(&ct->ct_general.use); 434 atomic_inc(&ct->ct_general.use);
435 set_bit(IPS_CONFIRMED_BIT, &ct->status); 435 set_bit(IPS_CONFIRMED_BIT, &ct->status);
436
437 /* Since the lookup is lockless, hash insertion must be done after
438 * starting the timer and setting the CONFIRMED bit. The RCU barriers
439 * guarantee that no other CPU can find the conntrack before the above
440 * stores are visible.
441 */
442 __nf_conntrack_hash_insert(ct, hash, repl_hash);
436 NF_CT_STAT_INC(net, insert); 443 NF_CT_STAT_INC(net, insert);
437 spin_unlock_bh(&nf_conntrack_lock); 444 spin_unlock_bh(&nf_conntrack_lock);
445
438 help = nfct_help(ct); 446 help = nfct_help(ct);
439 if (help && help->helper) 447 if (help && help->helper)
440 nf_conntrack_event_cache(IPCT_HELPER, ct); 448 nf_conntrack_event_cache(IPCT_HELPER, ct);
@@ -503,7 +511,8 @@ static noinline int early_drop(struct net *net, unsigned int hash)
503 cnt++; 511 cnt++;
504 } 512 }
505 513
506 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 514 if (ct && unlikely(nf_ct_is_dying(ct) ||
515 !atomic_inc_not_zero(&ct->ct_general.use)))
507 ct = NULL; 516 ct = NULL;
508 if (ct || cnt >= NF_CT_EVICTION_RANGE) 517 if (ct || cnt >= NF_CT_EVICTION_RANGE)
509 break; 518 break;
@@ -1267,13 +1276,19 @@ err_cache:
1267 return ret; 1276 return ret;
1268} 1277}
1269 1278
1279/*
1280 * We need to use special "null" values, not used in hash table
1281 */
1282#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1283#define DYING_NULLS_VAL ((1<<30)+1)
1284
1270static int nf_conntrack_init_net(struct net *net) 1285static int nf_conntrack_init_net(struct net *net)
1271{ 1286{
1272 int ret; 1287 int ret;
1273 1288
1274 atomic_set(&net->ct.count, 0); 1289 atomic_set(&net->ct.count, 0);
1275 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0); 1290 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1276 INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0); 1291 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1277 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1292 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1278 if (!net->ct.stat) { 1293 if (!net->ct.stat) {
1279 ret = -ENOMEM; 1294 ret = -ENOMEM;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 2fefe147750a..4e620305f28c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -47,7 +47,6 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
47 mutex_lock(&nf_log_mutex); 47 mutex_lock(&nf_log_mutex);
48 48
49 if (pf == NFPROTO_UNSPEC) { 49 if (pf == NFPROTO_UNSPEC) {
50 int i;
51 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) 50 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
52 list_add_tail(&(logger->list[i]), &(nf_loggers_l[i])); 51 list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
53 } else { 52 } else {
@@ -216,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
216#endif /* PROC_FS */ 215#endif /* PROC_FS */
217 216
218#ifdef CONFIG_SYSCTL 217#ifdef CONFIG_SYSCTL
219struct ctl_path nf_log_sysctl_path[] = { 218static struct ctl_path nf_log_sysctl_path[] = {
220 { .procname = "net", .ctl_name = CTL_NET, }, 219 { .procname = "net", .ctl_name = CTL_NET, },
221 { .procname = "netfilter", .ctl_name = NET_NETFILTER, }, 220 { .procname = "netfilter", .ctl_name = NET_NETFILTER, },
222 { .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, }, 221 { .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, },
@@ -228,19 +227,26 @@ static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
228static struct ctl_table_header *nf_log_dir_header; 227static struct ctl_table_header *nf_log_dir_header;
229 228
230static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp, 229static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
231 void *buffer, size_t *lenp, loff_t *ppos) 230 void __user *buffer, size_t *lenp, loff_t *ppos)
232{ 231{
233 const struct nf_logger *logger; 232 const struct nf_logger *logger;
233 char buf[NFLOGGER_NAME_LEN];
234 size_t size = *lenp;
234 int r = 0; 235 int r = 0;
235 int tindex = (unsigned long)table->extra1; 236 int tindex = (unsigned long)table->extra1;
236 237
237 if (write) { 238 if (write) {
238 if (!strcmp(buffer, "NONE")) { 239 if (size > sizeof(buf))
240 size = sizeof(buf);
241 if (copy_from_user(buf, buffer, size))
242 return -EFAULT;
243
244 if (!strcmp(buf, "NONE")) {
239 nf_log_unbind_pf(tindex); 245 nf_log_unbind_pf(tindex);
240 return 0; 246 return 0;
241 } 247 }
242 mutex_lock(&nf_log_mutex); 248 mutex_lock(&nf_log_mutex);
243 logger = __find_logger(tindex, buffer); 249 logger = __find_logger(tindex, buf);
244 if (logger == NULL) { 250 if (logger == NULL) {
245 mutex_unlock(&nf_log_mutex); 251 mutex_unlock(&nf_log_mutex);
246 return -ENOENT; 252 return -ENOENT;
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 498b45101df7..f28f6a5fc02d 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -40,12 +40,12 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
40static u32 hash_v4(const struct sk_buff *skb) 40static u32 hash_v4(const struct sk_buff *skb)
41{ 41{
42 const struct iphdr *iph = ip_hdr(skb); 42 const struct iphdr *iph = ip_hdr(skb);
43 u32 ipaddr; 43 __be32 ipaddr;
44 44
45 /* packets in either direction go into same queue */ 45 /* packets in either direction go into same queue */
46 ipaddr = iph->saddr ^ iph->daddr; 46 ipaddr = iph->saddr ^ iph->daddr;
47 47
48 return jhash_2words(ipaddr, iph->protocol, jhash_initval); 48 return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
49} 49}
50 50
51static unsigned int 51static unsigned int
@@ -63,14 +63,14 @@ nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par)
63static u32 hash_v6(const struct sk_buff *skb) 63static u32 hash_v6(const struct sk_buff *skb)
64{ 64{
65 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 65 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
66 u32 addr[4]; 66 __be32 addr[4];
67 67
68 addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0]; 68 addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
69 addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1]; 69 addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
70 addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2]; 70 addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
71 addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3]; 71 addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
72 72
73 return jhash2(addr, ARRAY_SIZE(addr), jhash_initval); 73 return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
74} 74}
75 75
76static unsigned int 76static unsigned int
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 69a639f35403..225ee3ecd69d 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -15,14 +15,14 @@
15#include <net/netfilter/nf_conntrack.h> 15#include <net/netfilter/nf_conntrack.h>
16#include <linux/netfilter/xt_cluster.h> 16#include <linux/netfilter/xt_cluster.h>
17 17
18static inline u_int32_t nf_ct_orig_ipv4_src(const struct nf_conn *ct) 18static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct)
19{ 19{
20 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; 20 return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
21} 21}
22 22
23static inline const void *nf_ct_orig_ipv6_src(const struct nf_conn *ct) 23static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
24{ 24{
25 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; 25 return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
26} 26}
27 27
28static inline u_int32_t 28static inline u_int32_t
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 01dd07b764ec..98fc190e8f0e 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -54,6 +54,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
54 if (q->master == NULL) 54 if (q->master == NULL)
55 return -ENOMEM; 55 return -ENOMEM;
56 56
57 q->master->quota = q->quota;
57 return true; 58 return true;
58} 59}
59 60
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 220a1d588ee0..4fc6a917f6de 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -66,7 +66,7 @@ xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par)
66 if (info->flags & XT_RATEEST_MATCH_BPS) 66 if (info->flags & XT_RATEEST_MATCH_BPS)
67 ret &= bps1 == bps2; 67 ret &= bps1 == bps2;
68 if (info->flags & XT_RATEEST_MATCH_PPS) 68 if (info->flags & XT_RATEEST_MATCH_PPS)
69 ret &= pps2 == pps2; 69 ret &= pps1 == pps2;
70 break; 70 break;
71 } 71 }
72 72
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a63de3f7f185..6a4b19094143 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -133,7 +133,7 @@ static struct notifier_block sctp_inet6addr_notifier = {
133 133
134/* ICMP error handler. */ 134/* ICMP error handler. */
135SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 135SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
136 int type, int code, int offset, __be32 info) 136 u8 type, u8 code, int offset, __be32 info)
137{ 137{
138 struct inet6_dev *idev; 138 struct inet6_dev *idev;
139 struct sock *sk; 139 struct sock *sk;