aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/proc.txt38
-rw-r--r--arch/alpha/kernel/osf_sys.c1
-rw-r--r--arch/alpha/kernel/pci.c22
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/arm/mach-lh7a40x/arch-kev7a400.c5
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm6_7.S4
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S1
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S1
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S1
-rw-r--r--arch/arm/mm/proc-arm946.S1
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S1
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S1
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/kernel/crash.c4
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/mca.c77
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/sn/kernel/Makefile7
-rw-r--r--arch/ia64/sn/kernel/huberror.c4
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
-rw-r--r--drivers/base/bus.c3
-rw-r--r--drivers/base/power/main.c15
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/hid/hid-core.c19
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-input-quirks.c24
-rw-r--r--drivers/hid/usbhid/Kconfig12
-rw-r--r--drivers/hid/usbhid/Makefile3
-rw-r--r--drivers/hid/usbhid/hid-core.c69
-rw-r--r--drivers/hid/usbhid/hid-ff.c3
-rw-r--r--drivers/hid/usbhid/hid-lg2ff.c114
-rw-r--r--drivers/hid/usbhid/hid-quirks.c85
-rw-r--r--drivers/hid/usbhid/hiddev.c286
-rw-r--r--drivers/hid/usbhid/usbhid.h3
-rw-r--r--drivers/i2c/algos/Kconfig39
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c126
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.h26
-rw-r--r--drivers/i2c/busses/Kconfig73
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c475
-rw-r--r--drivers/i2c/busses/i2c-davinci.c9
-rw-r--r--drivers/i2c/busses/i2c-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c197
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c3
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c53
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c298
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c7
-rw-r--r--drivers/i2c/busses/i2c-pnx.c45
-rw-r--r--drivers/i2c/busses/i2c-powermac.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa.c3
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c5
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c577
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c500
-rw-r--r--drivers/i2c/busses/i2c-simtec.c3
-rw-r--r--drivers/i2c/busses/i2c-versatile.c1
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/chips/isp1301_omap.c28
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-dev.c329
-rw-r--r--drivers/leds/Kconfig1
-rw-r--r--drivers/mfd/htc-pasic3.c3
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/sgi-xp/Makefile11
-rw-r--r--drivers/misc/sgi-xp/xp.h (renamed from include/asm-ia64/sn/xp.h)94
-rw-r--r--drivers/misc/sgi-xp/xp_main.c (renamed from arch/ia64/sn/kernel/xp_main.c)141
-rw-r--r--drivers/misc/sgi-xp/xp_nofault.S (renamed from arch/ia64/sn/kernel/xp_nofault.S)3
-rw-r--r--drivers/misc/sgi-xp/xpc.h (renamed from include/asm-ia64/sn/xpc.h)500
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c (renamed from arch/ia64/sn/kernel/xpc_channel.c)528
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c (renamed from arch/ia64/sn/kernel/xpc_main.c)432
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c (renamed from arch/ia64/sn/kernel/xpc_partition.c)409
-rw-r--r--drivers/misc/sgi-xp/xpnet.c (renamed from arch/ia64/sn/kernel/xpnet.c)139
-rw-r--r--drivers/net/hamradio/dmascc.c3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig15
-rw-r--r--drivers/pci/setup-bus.c30
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c10
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/dcache.c114
-rw-r--r--fs/dlm/Makefile1
-rw-r--r--fs/dlm/config.c50
-rw-r--r--fs/dlm/config.h3
-rw-r--r--fs/dlm/dlm_internal.h8
-rw-r--r--fs/dlm/lock.c5
-rw-r--r--fs/dlm/lock.h1
-rw-r--r--fs/dlm/main.c7
-rw-r--r--fs/dlm/member.c34
-rw-r--r--fs/dlm/plock.c (renamed from fs/gfs2/locking/dlm/plock.c)169
-rw-r--r--fs/dlm/recoverd.c1
-rw-r--r--fs/gfs2/locking/dlm/Makefile2
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h12
-rw-r--r--fs/gfs2/locking/dlm/main.c8
-rw-r--r--fs/gfs2/locking/dlm/mount.c21
-rw-r--r--fs/internal.h11
-rw-r--r--fs/namespace.c331
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/pnode.c60
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/base.c125
-rw-r--r--fs/read_write.c6
-rw-r--r--fs/seq_file.c113
-rw-r--r--fs/super.c1
-rw-r--r--fs/udf/Makefile2
-rw-r--r--fs/udf/balloc.c13
-rw-r--r--fs/udf/crc.c172
-rw-r--r--fs/udf/dir.c83
-rw-r--r--fs/udf/ecma_167.h13
-rw-r--r--fs/udf/file.c47
-rw-r--r--fs/udf/ialloc.c13
-rw-r--r--fs/udf/inode.c208
-rw-r--r--fs/udf/lowlevel.c1
-rw-r--r--fs/udf/misc.c26
-rw-r--r--fs/udf/namei.c218
-rw-r--r--fs/udf/partition.c67
-rw-r--r--fs/udf/super.c1262
-rw-r--r--fs/udf/symlink.c1
-rw-r--r--fs/udf/truncate.c81
-rw-r--r--fs/udf/udf_i.h30
-rw-r--r--fs/udf/udf_sb.h109
-rw-r--r--fs/udf/udfdecl.h67
-rw-r--r--fs/udf/udfend.h22
-rw-r--r--fs/udf/udftime.c35
-rw-r--r--fs/udf/unicode.c62
-rw-r--r--fs/xattr.c1
-rw-r--r--include/asm-ia64/mca.h1
-rw-r--r--include/asm-sh/i2c-sh7760.h22
-rw-r--r--include/linux/Kbuild6
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/dlm.h7
-rw-r--r--include/linux/dlm_device.h11
-rw-r--r--include/linux/dlm_plock.h50
-rw-r--r--include/linux/dlmconstants.h4
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/hid.h17
-rw-r--r--include/linux/hidraw.h1
-rw-r--r--include/linux/i2c-algo-pca.h37
-rw-r--r--include/linux/i2c-pca-platform.h12
-rw-r--r--include/linux/lock_dlm_plock.h41
-rw-r--r--include/linux/mnt_namespace.h12
-rw-r--r--include/linux/mount.h4
-rw-r--r--include/linux/security.h52
-rw-r--r--include/linux/seq_file.h6
-rw-r--r--include/linux/udf_fs.h51
-rw-r--r--include/linux/udf_fs_i.h31
-rw-r--r--include/linux/udf_fs_sb.h117
-rw-r--r--include/net/xfrm.h3
-rw-r--r--kernel/exit.c27
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/time.c1
-rw-r--r--mm/slub.c4
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/dccp/probe.c17
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/ip_options.c12
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/socket.c3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--security/dummy.c10
-rw-r--r--security/security.c20
-rw-r--r--security/selinux/avc.c9
-rw-r--r--security/selinux/hooks.c23
-rw-r--r--security/selinux/netif.c2
-rw-r--r--security/smack/smack_lsm.c4
194 files changed, 5998 insertions, 4318 deletions
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 518ebe609e2b..2a99116edc47 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -43,6 +43,7 @@ Table of Contents
43 2.13 /proc/<pid>/oom_score - Display current oom-killer score 43 2.13 /proc/<pid>/oom_score - Display current oom-killer score
44 2.14 /proc/<pid>/io - Display the IO accounting fields 44 2.14 /proc/<pid>/io - Display the IO accounting fields
45 2.15 /proc/<pid>/coredump_filter - Core dump filtering settings 45 2.15 /proc/<pid>/coredump_filter - Core dump filtering settings
46 2.16 /proc/<pid>/mountinfo - Information about mounts
46 47
47------------------------------------------------------------------------------ 48------------------------------------------------------------------------------
48Preface 49Preface
@@ -2348,4 +2349,41 @@ For example:
2348 $ echo 0x7 > /proc/self/coredump_filter 2349 $ echo 0x7 > /proc/self/coredump_filter
2349 $ ./some_program 2350 $ ./some_program
2350 2351
23522.16 /proc/<pid>/mountinfo - Information about mounts
2353--------------------------------------------------------
2354
2355This file contains lines of the form:
2356
235736 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
2358(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
2359
2360(1) mount ID: unique identifier of the mount (may be reused after umount)
2361(2) parent ID: ID of parent (or of self for the top of the mount tree)
2362(3) major:minor: value of st_dev for files on filesystem
2363(4) root: root of the mount within the filesystem
2364(5) mount point: mount point relative to the process's root
2365(6) mount options: per mount options
2366(7) optional fields: zero or more fields of the form "tag[:value]"
2367(8) separator: marks the end of the optional fields
2368(9) filesystem type: name of filesystem of the form "type[.subtype]"
2369(10) mount source: filesystem specific information or "none"
2370(11) super options: per super block options
2371
2372Parsers should ignore all unrecognised optional fields. Currently the
2373possible optional fields are:
2374
2375shared:X mount is shared in peer group X
2376master:X mount is slave to peer group X
2377propagate_from:X mount is slave and receives propagation from peer group X (*)
2378unbindable mount is unbindable
2379
2380(*) X is the closest dominant peer group under the process's root. If
2381X is the immediate master of the mount, or if there's no dominant peer
2382group under the same root, then only the "master:X" field is present
2383and not the "propagate_from:X" field.
2384
2385For more information on mount propagation see:
2386
2387 Documentation/filesystems/sharedsubtree.txt
2388
2351------------------------------------------------------------------------------ 2389------------------------------------------------------------------------------
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 8c71daf94a59..9fee37e2596f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -75,6 +75,7 @@ osf_set_program_attributes(unsigned long text_start, unsigned long text_len,
75 lock_kernel(); 75 lock_kernel();
76 mm = current->mm; 76 mm = current->mm;
77 mm->end_code = bss_start + bss_len; 77 mm->end_code = bss_start + bss_len;
78 mm->start_brk = bss_start + bss_len;
78 mm->brk = bss_start + bss_len; 79 mm->brk = bss_start + bss_len;
79#if 0 80#if 0
80 printk("set_program_attributes(%lx %lx %lx %lx)\n", 81 printk("set_program_attributes(%lx %lx %lx %lx)\n",
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index c107cc08daf4..78357798b6fd 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -71,25 +71,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_i
71static void __init 71static void __init
72quirk_cypress(struct pci_dev *dev) 72quirk_cypress(struct pci_dev *dev)
73{ 73{
74 /* The Notorious Cy82C693 chip. */
75
76 /* The Cypress IDE controller doesn't support native mode, but it
77 has programmable addresses of IDE command/control registers.
78 This violates PCI specifications, confuses the IDE subsystem and
79 causes resource conflicts between the primary HD_CMD register and
80 the floppy controller. Ugh. Fix that. */
81 if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
82 dev->resource[0].flags = 0;
83 dev->resource[1].flags = 0;
84 }
85
86 /* The Cypress bridge responds on the PCI bus in the address range 74 /* The Cypress bridge responds on the PCI bus in the address range
87 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no 75 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no
88 way to turn this off. The bridge also supports several extended 76 way to turn this off. The bridge also supports several extended
89 BIOS ranges (disabled after power-up), and some consoles do turn 77 BIOS ranges (disabled after power-up), and some consoles do turn
90 them on. So if we use a large direct-map window, or a large SG 78 them on. So if we use a large direct-map window, or a large SG
91 window, we must avoid the entire 0xfff00000-0xffffffff region. */ 79 window, we must avoid the entire 0xfff00000-0xffffffff region. */
92 else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) { 80 if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
93 if (__direct_map_base + __direct_map_size >= 0xfff00000UL) 81 if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
94 __direct_map_size = 0xfff00000UL - __direct_map_base; 82 __direct_map_size = 0xfff00000UL - __direct_map_base;
95 else { 83 else {
@@ -391,7 +379,7 @@ pcibios_set_master(struct pci_dev *dev)
391 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); 379 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
392} 380}
393 381
394static void __init 382void __init
395pcibios_claim_one_bus(struct pci_bus *b) 383pcibios_claim_one_bus(struct pci_bus *b)
396{ 384{
397 struct pci_dev *dev; 385 struct pci_dev *dev;
@@ -405,7 +393,8 @@ pcibios_claim_one_bus(struct pci_bus *b)
405 393
406 if (r->parent || !r->start || !r->flags) 394 if (r->parent || !r->start || !r->flags)
407 continue; 395 continue;
408 pci_claim_resource(dev, i); 396 if (pci_probe_only || (r->flags & IORESOURCE_PCI_FIXED))
397 pci_claim_resource(dev, i);
409 } 398 }
410 } 399 }
411 400
@@ -444,8 +433,7 @@ common_init_pci(void)
444 } 433 }
445 } 434 }
446 435
447 if (pci_probe_only) 436 pcibios_claim_console_setup();
448 pcibios_claim_console_setup();
449 437
450 pci_assign_unassigned_resources(); 438 pci_assign_unassigned_resources();
451 pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); 439 pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 920196bcbb61..a7f23b5ab814 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -187,6 +187,7 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
187} 187}
188 188
189extern void free_reserved_mem(void *, void *); 189extern void free_reserved_mem(void *, void *);
190extern void pcibios_claim_one_bus(struct pci_bus *);
190 191
191static struct resource irongate_mem = { 192static struct resource irongate_mem = {
192 .name = "Irongate PCI MEM", 193 .name = "Irongate PCI MEM",
@@ -205,6 +206,7 @@ nautilus_init_pci(void)
205 /* Scan our single hose. */ 206 /* Scan our single hose. */
206 bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); 207 bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
207 hose->bus = bus; 208 hose->bus = bus;
209 pcibios_claim_one_bus(bus);
208 210
209 irongate = pci_get_bus_and_slot(0, 0); 211 irongate = pci_get_bus_and_slot(0, 0);
210 bus->self = irongate; 212 bus->self = irongate;
diff --git a/arch/arm/mach-lh7a40x/arch-kev7a400.c b/arch/arm/mach-lh7a40x/arch-kev7a400.c
index 6d26661d99f6..2ef7d0097b38 100644
--- a/arch/arm/mach-lh7a40x/arch-kev7a400.c
+++ b/arch/arm/mach-lh7a40x/arch-kev7a400.c
@@ -75,10 +75,9 @@ static void kev7a400_cpld_handler (unsigned int irq, struct irq_desc *desc)
75{ 75{
76 u32 mask = CPLD_LATCHED_INTS; 76 u32 mask = CPLD_LATCHED_INTS;
77 irq = IRQ_KEV7A400_CPLD; 77 irq = IRQ_KEV7A400_CPLD;
78 for (; mask; mask >>= 1, ++irq) { 78 for (; mask; mask >>= 1, ++irq)
79 if (mask & 1) 79 if (mask & 1)
80 desc[irq].handle (irq, desc); 80 desc_handle_irq(irq, desc);
81 }
82} 81}
83 82
84void __init lh7a40x_init_board_irq (void) 83void __init lh7a40x_init_board_irq (void)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 746cbb7c8e95..1b8229d9c9d5 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -32,6 +32,7 @@ config CPU_ARM7TDMI
32 depends on !MMU 32 depends on !MMU
33 select CPU_32v4T 33 select CPU_32v4T
34 select CPU_ABRT_LV4T 34 select CPU_ABRT_LV4T
35 select CPU_PABRT_NOIFAR
35 select CPU_CACHE_V4 36 select CPU_CACHE_V4
36 help 37 help
37 A 32-bit RISC microprocessor based on the ARM7 processor core 38 A 32-bit RISC microprocessor based on the ARM7 processor core
@@ -85,6 +86,7 @@ config CPU_ARM740T
85 depends on !MMU 86 depends on !MMU
86 select CPU_32v4T 87 select CPU_32v4T
87 select CPU_ABRT_LV4T 88 select CPU_ABRT_LV4T
89 select CPU_PABRT_NOIFAR
88 select CPU_CACHE_V3 # although the core is v4t 90 select CPU_CACHE_V3 # although the core is v4t
89 select CPU_CP15_MPU 91 select CPU_CP15_MPU
90 help 92 help
@@ -101,6 +103,7 @@ config CPU_ARM9TDMI
101 depends on !MMU 103 depends on !MMU
102 select CPU_32v4T 104 select CPU_32v4T
103 select CPU_ABRT_NOMMU 105 select CPU_ABRT_NOMMU
106 select CPU_PABRT_NOIFAR
104 select CPU_CACHE_V4 107 select CPU_CACHE_V4
105 help 108 help
106 A 32-bit RISC microprocessor based on the ARM9 processor core 109 A 32-bit RISC microprocessor based on the ARM9 processor core
@@ -200,6 +203,7 @@ config CPU_ARM940T
200 depends on !MMU 203 depends on !MMU
201 select CPU_32v4T 204 select CPU_32v4T
202 select CPU_ABRT_NOMMU 205 select CPU_ABRT_NOMMU
206 select CPU_PABRT_NOIFAR
203 select CPU_CACHE_VIVT 207 select CPU_CACHE_VIVT
204 select CPU_CP15_MPU 208 select CPU_CP15_MPU
205 help 209 help
@@ -217,6 +221,7 @@ config CPU_ARM946E
217 depends on !MMU 221 depends on !MMU
218 select CPU_32v5 222 select CPU_32v5
219 select CPU_ABRT_NOMMU 223 select CPU_ABRT_NOMMU
224 select CPU_PABRT_NOIFAR
220 select CPU_CACHE_VIVT 225 select CPU_CACHE_VIVT
221 select CPU_CP15_MPU 226 select CPU_CP15_MPU
222 help 227 help
@@ -351,6 +356,7 @@ config CPU_XSC3
351 default y 356 default y
352 select CPU_32v5 357 select CPU_32v5
353 select CPU_ABRT_EV5T 358 select CPU_ABRT_EV5T
359 select CPU_PABRT_NOIFAR
354 select CPU_CACHE_VIVT 360 select CPU_CACHE_VIVT
355 select CPU_CP15_MMU 361 select CPU_CP15_MMU
356 select CPU_TLB_V4WBI if MMU 362 select CPU_TLB_V4WBI if MMU
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 32fd7ea533f2..5673f4d6113b 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -471,6 +471,7 @@ arm1020_crval:
471 .type arm1020_processor_functions, #object 471 .type arm1020_processor_functions, #object
472arm1020_processor_functions: 472arm1020_processor_functions:
473 .word v4t_early_abort 473 .word v4t_early_abort
474 .word pabort_noifar
474 .word cpu_arm1020_proc_init 475 .word cpu_arm1020_proc_init
475 .word cpu_arm1020_proc_fin 476 .word cpu_arm1020_proc_fin
476 .word cpu_arm1020_reset 477 .word cpu_arm1020_reset
@@ -478,7 +479,6 @@ arm1020_processor_functions:
478 .word cpu_arm1020_dcache_clean_area 479 .word cpu_arm1020_dcache_clean_area
479 .word cpu_arm1020_switch_mm 480 .word cpu_arm1020_switch_mm
480 .word cpu_arm1020_set_pte_ext 481 .word cpu_arm1020_set_pte_ext
481 .word pabort_noifar
482 .size arm1020_processor_functions, . - arm1020_processor_functions 482 .size arm1020_processor_functions, . - arm1020_processor_functions
483 483
484 .section ".rodata" 484 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index fe2b0ae70274..4343fdb0e9e5 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -452,6 +452,7 @@ arm1020e_crval:
452 .type arm1020e_processor_functions, #object 452 .type arm1020e_processor_functions, #object
453arm1020e_processor_functions: 453arm1020e_processor_functions:
454 .word v4t_early_abort 454 .word v4t_early_abort
455 .word pabort_noifar
455 .word cpu_arm1020e_proc_init 456 .word cpu_arm1020e_proc_init
456 .word cpu_arm1020e_proc_fin 457 .word cpu_arm1020e_proc_fin
457 .word cpu_arm1020e_reset 458 .word cpu_arm1020e_reset
@@ -459,7 +460,6 @@ arm1020e_processor_functions:
459 .word cpu_arm1020e_dcache_clean_area 460 .word cpu_arm1020e_dcache_clean_area
460 .word cpu_arm1020e_switch_mm 461 .word cpu_arm1020e_switch_mm
461 .word cpu_arm1020e_set_pte_ext 462 .word cpu_arm1020e_set_pte_ext
462 .word pabort_noifar
463 .size arm1020e_processor_functions, . - arm1020e_processor_functions 463 .size arm1020e_processor_functions, . - arm1020e_processor_functions
464 464
465 .section ".rodata" 465 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 06dde678e19d..2a4ea1659e96 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -435,6 +435,7 @@ arm1022_crval:
435 .type arm1022_processor_functions, #object 435 .type arm1022_processor_functions, #object
436arm1022_processor_functions: 436arm1022_processor_functions:
437 .word v4t_early_abort 437 .word v4t_early_abort
438 .word pabort_noifar
438 .word cpu_arm1022_proc_init 439 .word cpu_arm1022_proc_init
439 .word cpu_arm1022_proc_fin 440 .word cpu_arm1022_proc_fin
440 .word cpu_arm1022_reset 441 .word cpu_arm1022_reset
@@ -442,7 +443,6 @@ arm1022_processor_functions:
442 .word cpu_arm1022_dcache_clean_area 443 .word cpu_arm1022_dcache_clean_area
443 .word cpu_arm1022_switch_mm 444 .word cpu_arm1022_switch_mm
444 .word cpu_arm1022_set_pte_ext 445 .word cpu_arm1022_set_pte_ext
445 .word pabort_noifar
446 .size arm1022_processor_functions, . - arm1022_processor_functions 446 .size arm1022_processor_functions, . - arm1022_processor_functions
447 447
448 .section ".rodata" 448 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index f5506e6e681e..77a1babd421c 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -430,6 +430,7 @@ arm1026_crval:
430 .type arm1026_processor_functions, #object 430 .type arm1026_processor_functions, #object
431arm1026_processor_functions: 431arm1026_processor_functions:
432 .word v5t_early_abort 432 .word v5t_early_abort
433 .word pabort_noifar
433 .word cpu_arm1026_proc_init 434 .word cpu_arm1026_proc_init
434 .word cpu_arm1026_proc_fin 435 .word cpu_arm1026_proc_fin
435 .word cpu_arm1026_reset 436 .word cpu_arm1026_reset
@@ -437,7 +438,6 @@ arm1026_processor_functions:
437 .word cpu_arm1026_dcache_clean_area 438 .word cpu_arm1026_dcache_clean_area
438 .word cpu_arm1026_switch_mm 439 .word cpu_arm1026_switch_mm
439 .word cpu_arm1026_set_pte_ext 440 .word cpu_arm1026_set_pte_ext
440 .word pabort_noifar
441 .size arm1026_processor_functions, . - arm1026_processor_functions 441 .size arm1026_processor_functions, . - arm1026_processor_functions
442 442
443 .section .rodata 443 .section .rodata
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 14b6a95c8d45..c371fc87776e 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -293,6 +293,7 @@ __arm7_setup: mov r0, #0
293 .type arm6_processor_functions, #object 293 .type arm6_processor_functions, #object
294ENTRY(arm6_processor_functions) 294ENTRY(arm6_processor_functions)
295 .word cpu_arm6_data_abort 295 .word cpu_arm6_data_abort
296 .word pabort_noifar
296 .word cpu_arm6_proc_init 297 .word cpu_arm6_proc_init
297 .word cpu_arm6_proc_fin 298 .word cpu_arm6_proc_fin
298 .word cpu_arm6_reset 299 .word cpu_arm6_reset
@@ -300,7 +301,6 @@ ENTRY(arm6_processor_functions)
300 .word cpu_arm6_dcache_clean_area 301 .word cpu_arm6_dcache_clean_area
301 .word cpu_arm6_switch_mm 302 .word cpu_arm6_switch_mm
302 .word cpu_arm6_set_pte_ext 303 .word cpu_arm6_set_pte_ext
303 .word pabort_noifar
304 .size arm6_processor_functions, . - arm6_processor_functions 304 .size arm6_processor_functions, . - arm6_processor_functions
305 305
306/* 306/*
@@ -310,6 +310,7 @@ ENTRY(arm6_processor_functions)
310 .type arm7_processor_functions, #object 310 .type arm7_processor_functions, #object
311ENTRY(arm7_processor_functions) 311ENTRY(arm7_processor_functions)
312 .word cpu_arm7_data_abort 312 .word cpu_arm7_data_abort
313 .word pabort_noifar
313 .word cpu_arm7_proc_init 314 .word cpu_arm7_proc_init
314 .word cpu_arm7_proc_fin 315 .word cpu_arm7_proc_fin
315 .word cpu_arm7_reset 316 .word cpu_arm7_reset
@@ -317,7 +318,6 @@ ENTRY(arm7_processor_functions)
317 .word cpu_arm7_dcache_clean_area 318 .word cpu_arm7_dcache_clean_area
318 .word cpu_arm7_switch_mm 319 .word cpu_arm7_switch_mm
319 .word cpu_arm7_set_pte_ext 320 .word cpu_arm7_set_pte_ext
320 .word pabort_noifar
321 .size arm7_processor_functions, . - arm7_processor_functions 321 .size arm7_processor_functions, . - arm7_processor_functions
322 322
323 .section ".rodata" 323 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index ca5e7aac2da7..d64f8e6f75ab 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -198,6 +198,7 @@ arm720_crval:
198 .type arm720_processor_functions, #object 198 .type arm720_processor_functions, #object
199ENTRY(arm720_processor_functions) 199ENTRY(arm720_processor_functions)
200 .word v4t_late_abort 200 .word v4t_late_abort
201 .word pabort_noifar
201 .word cpu_arm720_proc_init 202 .word cpu_arm720_proc_init
202 .word cpu_arm720_proc_fin 203 .word cpu_arm720_proc_fin
203 .word cpu_arm720_reset 204 .word cpu_arm720_reset
@@ -205,7 +206,6 @@ ENTRY(arm720_processor_functions)
205 .word cpu_arm720_dcache_clean_area 206 .word cpu_arm720_dcache_clean_area
206 .word cpu_arm720_switch_mm 207 .word cpu_arm720_switch_mm
207 .word cpu_arm720_set_pte_ext 208 .word cpu_arm720_set_pte_ext
208 .word pabort_noifar
209 .size arm720_processor_functions, . - arm720_processor_functions 209 .size arm720_processor_functions, . - arm720_processor_functions
210 210
211 .section ".rodata" 211 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 7069f495cf9b..3a57376c8bc9 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -126,6 +126,7 @@ __arm740_setup:
126 .type arm740_processor_functions, #object 126 .type arm740_processor_functions, #object
127ENTRY(arm740_processor_functions) 127ENTRY(arm740_processor_functions)
128 .word v4t_late_abort 128 .word v4t_late_abort
129 .word pabort_noifar
129 .word cpu_arm740_proc_init 130 .word cpu_arm740_proc_init
130 .word cpu_arm740_proc_fin 131 .word cpu_arm740_proc_fin
131 .word cpu_arm740_reset 132 .word cpu_arm740_reset
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index d091c2571823..7b3ecdeb5370 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -64,6 +64,7 @@ __arm7tdmi_setup:
64 .type arm7tdmi_processor_functions, #object 64 .type arm7tdmi_processor_functions, #object
65ENTRY(arm7tdmi_processor_functions) 65ENTRY(arm7tdmi_processor_functions)
66 .word v4t_late_abort 66 .word v4t_late_abort
67 .word pabort_noifar
67 .word cpu_arm7tdmi_proc_init 68 .word cpu_arm7tdmi_proc_init
68 .word cpu_arm7tdmi_proc_fin 69 .word cpu_arm7tdmi_proc_fin
69 .word cpu_arm7tdmi_reset 70 .word cpu_arm7tdmi_reset
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 0170d4f466ea..28cdb060df45 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -417,6 +417,7 @@ arm920_crval:
417 .type arm920_processor_functions, #object 417 .type arm920_processor_functions, #object
418arm920_processor_functions: 418arm920_processor_functions:
419 .word v4t_early_abort 419 .word v4t_early_abort
420 .word pabort_noifar
420 .word cpu_arm920_proc_init 421 .word cpu_arm920_proc_init
421 .word cpu_arm920_proc_fin 422 .word cpu_arm920_proc_fin
422 .word cpu_arm920_reset 423 .word cpu_arm920_reset
@@ -424,7 +425,6 @@ arm920_processor_functions:
424 .word cpu_arm920_dcache_clean_area 425 .word cpu_arm920_dcache_clean_area
425 .word cpu_arm920_switch_mm 426 .word cpu_arm920_switch_mm
426 .word cpu_arm920_set_pte_ext 427 .word cpu_arm920_set_pte_ext
427 .word pabort_noifar
428 .size arm920_processor_functions, . - arm920_processor_functions 428 .size arm920_processor_functions, . - arm920_processor_functions
429 429
430 .section ".rodata" 430 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index b7952493d404..94ddcb4a4b76 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -421,6 +421,7 @@ arm922_crval:
421 .type arm922_processor_functions, #object 421 .type arm922_processor_functions, #object
422arm922_processor_functions: 422arm922_processor_functions:
423 .word v4t_early_abort 423 .word v4t_early_abort
424 .word pabort_noifar
424 .word cpu_arm922_proc_init 425 .word cpu_arm922_proc_init
425 .word cpu_arm922_proc_fin 426 .word cpu_arm922_proc_fin
426 .word cpu_arm922_reset 427 .word cpu_arm922_reset
@@ -428,7 +429,6 @@ arm922_processor_functions:
428 .word cpu_arm922_dcache_clean_area 429 .word cpu_arm922_dcache_clean_area
429 .word cpu_arm922_switch_mm 430 .word cpu_arm922_switch_mm
430 .word cpu_arm922_set_pte_ext 431 .word cpu_arm922_set_pte_ext
431 .word pabort_noifar
432 .size arm922_processor_functions, . - arm922_processor_functions 432 .size arm922_processor_functions, . - arm922_processor_functions
433 433
434 .section ".rodata" 434 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index e2988eba4cf6..065087afb772 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -484,6 +484,7 @@ arm925_crval:
484 .type arm925_processor_functions, #object 484 .type arm925_processor_functions, #object
485arm925_processor_functions: 485arm925_processor_functions:
486 .word v4t_early_abort 486 .word v4t_early_abort
487 .word pabort_noifar
487 .word cpu_arm925_proc_init 488 .word cpu_arm925_proc_init
488 .word cpu_arm925_proc_fin 489 .word cpu_arm925_proc_fin
489 .word cpu_arm925_reset 490 .word cpu_arm925_reset
@@ -491,7 +492,6 @@ arm925_processor_functions:
491 .word cpu_arm925_dcache_clean_area 492 .word cpu_arm925_dcache_clean_area
492 .word cpu_arm925_switch_mm 493 .word cpu_arm925_switch_mm
493 .word cpu_arm925_set_pte_ext 494 .word cpu_arm925_set_pte_ext
494 .word pabort_noifar
495 .size arm925_processor_functions, . - arm925_processor_functions 495 .size arm925_processor_functions, . - arm925_processor_functions
496 496
497 .section ".rodata" 497 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 62f7d1dfe016..997db8472b5c 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -437,6 +437,7 @@ arm926_crval:
437 .type arm926_processor_functions, #object 437 .type arm926_processor_functions, #object
438arm926_processor_functions: 438arm926_processor_functions:
439 .word v5tj_early_abort 439 .word v5tj_early_abort
440 .word pabort_noifar
440 .word cpu_arm926_proc_init 441 .word cpu_arm926_proc_init
441 .word cpu_arm926_proc_fin 442 .word cpu_arm926_proc_fin
442 .word cpu_arm926_reset 443 .word cpu_arm926_reset
@@ -444,7 +445,6 @@ arm926_processor_functions:
444 .word cpu_arm926_dcache_clean_area 445 .word cpu_arm926_dcache_clean_area
445 .word cpu_arm926_switch_mm 446 .word cpu_arm926_switch_mm
446 .word cpu_arm926_set_pte_ext 447 .word cpu_arm926_set_pte_ext
447 .word pabort_noifar
448 .size arm926_processor_functions, . - arm926_processor_functions 448 .size arm926_processor_functions, . - arm926_processor_functions
449 449
450 .section ".rodata" 450 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 786c593778f0..44ead902bd54 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -321,6 +321,7 @@ __arm940_setup:
321 .type arm940_processor_functions, #object 321 .type arm940_processor_functions, #object
322ENTRY(arm940_processor_functions) 322ENTRY(arm940_processor_functions)
323 .word nommu_early_abort 323 .word nommu_early_abort
324 .word pabort_noifar
324 .word cpu_arm940_proc_init 325 .word cpu_arm940_proc_init
325 .word cpu_arm940_proc_fin 326 .word cpu_arm940_proc_fin
326 .word cpu_arm940_reset 327 .word cpu_arm940_reset
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index a60c1421d450..2218b0c01330 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -376,6 +376,7 @@ __arm946_setup:
376 .type arm946_processor_functions, #object 376 .type arm946_processor_functions, #object
377ENTRY(arm946_processor_functions) 377ENTRY(arm946_processor_functions)
378 .word nommu_early_abort 378 .word nommu_early_abort
379 .word pabort_noifar
379 .word cpu_arm946_proc_init 380 .word cpu_arm946_proc_init
380 .word cpu_arm946_proc_fin 381 .word cpu_arm946_proc_fin
381 .word cpu_arm946_reset 382 .word cpu_arm946_reset
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 4848eeac86b6..c85c1f50e396 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -64,6 +64,7 @@ __arm9tdmi_setup:
64 .type arm9tdmi_processor_functions, #object 64 .type arm9tdmi_processor_functions, #object
65ENTRY(arm9tdmi_processor_functions) 65ENTRY(arm9tdmi_processor_functions)
66 .word nommu_early_abort 66 .word nommu_early_abort
67 .word pabort_noifar
67 .word cpu_arm9tdmi_proc_init 68 .word cpu_arm9tdmi_proc_init
68 .word cpu_arm9tdmi_proc_fin 69 .word cpu_arm9tdmi_proc_fin
69 .word cpu_arm9tdmi_reset 70 .word cpu_arm9tdmi_reset
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 2f169b28e938..90e7594e29b1 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -423,6 +423,7 @@ feroceon_crval:
423 .type feroceon_processor_functions, #object 423 .type feroceon_processor_functions, #object
424feroceon_processor_functions: 424feroceon_processor_functions:
425 .word v5t_early_abort 425 .word v5t_early_abort
426 .word pabort_noifar
426 .word cpu_feroceon_proc_init 427 .word cpu_feroceon_proc_init
427 .word cpu_feroceon_proc_fin 428 .word cpu_feroceon_proc_fin
428 .word cpu_feroceon_reset 429 .word cpu_feroceon_reset
@@ -430,7 +431,6 @@ feroceon_processor_functions:
430 .word cpu_feroceon_dcache_clean_area 431 .word cpu_feroceon_dcache_clean_area
431 .word cpu_feroceon_switch_mm 432 .word cpu_feroceon_switch_mm
432 .word cpu_feroceon_set_pte_ext 433 .word cpu_feroceon_set_pte_ext
433 .word pabort_noifar
434 .size feroceon_processor_functions, . - feroceon_processor_functions 434 .size feroceon_processor_functions, . - feroceon_processor_functions
435 435
436 .section ".rodata" 436 .section ".rodata"
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 4db3d6299a2b..9818195dbf11 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -216,6 +216,7 @@ sa110_crval:
216 .type sa110_processor_functions, #object 216 .type sa110_processor_functions, #object
217ENTRY(sa110_processor_functions) 217ENTRY(sa110_processor_functions)
218 .word v4_early_abort 218 .word v4_early_abort
219 .word pabort_noifar
219 .word cpu_sa110_proc_init 220 .word cpu_sa110_proc_init
220 .word cpu_sa110_proc_fin 221 .word cpu_sa110_proc_fin
221 .word cpu_sa110_reset 222 .word cpu_sa110_reset
@@ -223,7 +224,6 @@ ENTRY(sa110_processor_functions)
223 .word cpu_sa110_dcache_clean_area 224 .word cpu_sa110_dcache_clean_area
224 .word cpu_sa110_switch_mm 225 .word cpu_sa110_switch_mm
225 .word cpu_sa110_set_pte_ext 226 .word cpu_sa110_set_pte_ext
226 .word pabort_noifar
227 .size sa110_processor_functions, . - sa110_processor_functions 227 .size sa110_processor_functions, . - sa110_processor_functions
228 228
229 .section ".rodata" 229 .section ".rodata"
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3cdef043760f..c5fe27ad2892 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -231,6 +231,7 @@ sa1100_crval:
231 .type sa1100_processor_functions, #object 231 .type sa1100_processor_functions, #object
232ENTRY(sa1100_processor_functions) 232ENTRY(sa1100_processor_functions)
233 .word v4_early_abort 233 .word v4_early_abort
234 .word pabort_noifar
234 .word cpu_sa1100_proc_init 235 .word cpu_sa1100_proc_init
235 .word cpu_sa1100_proc_fin 236 .word cpu_sa1100_proc_fin
236 .word cpu_sa1100_reset 237 .word cpu_sa1100_reset
@@ -238,7 +239,6 @@ ENTRY(sa1100_processor_functions)
238 .word cpu_sa1100_dcache_clean_area 239 .word cpu_sa1100_dcache_clean_area
239 .word cpu_sa1100_switch_mm 240 .word cpu_sa1100_switch_mm
240 .word cpu_sa1100_set_pte_ext 241 .word cpu_sa1100_set_pte_ext
241 .word pabort_noifar
242 .size sa1100_processor_functions, . - sa1100_processor_functions 242 .size sa1100_processor_functions, . - sa1100_processor_functions
243 243
244 .section ".rodata" 244 .section ".rodata"
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bf760ea2f789..5702ec58b2a2 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -219,6 +219,7 @@ v6_crval:
219 .type v6_processor_functions, #object 219 .type v6_processor_functions, #object
220ENTRY(v6_processor_functions) 220ENTRY(v6_processor_functions)
221 .word v6_early_abort 221 .word v6_early_abort
222 .word pabort_noifar
222 .word cpu_v6_proc_init 223 .word cpu_v6_proc_init
223 .word cpu_v6_proc_fin 224 .word cpu_v6_proc_fin
224 .word cpu_v6_reset 225 .word cpu_v6_reset
@@ -226,7 +227,6 @@ ENTRY(v6_processor_functions)
226 .word cpu_v6_dcache_clean_area 227 .word cpu_v6_dcache_clean_area
227 .word cpu_v6_switch_mm 228 .word cpu_v6_switch_mm
228 .word cpu_v6_set_pte_ext 229 .word cpu_v6_set_pte_ext
229 .word pabort_noifar
230 .size v6_processor_functions, . - v6_processor_functions 230 .size v6_processor_functions, . - v6_processor_functions
231 231
232 .type cpu_arch_name, #object 232 .type cpu_arch_name, #object
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a1d7331cd64c..b49f9a4c82c8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -205,6 +205,7 @@ __v7_setup_stack:
205 .type v7_processor_functions, #object 205 .type v7_processor_functions, #object
206ENTRY(v7_processor_functions) 206ENTRY(v7_processor_functions)
207 .word v7_early_abort 207 .word v7_early_abort
208 .word pabort_ifar
208 .word cpu_v7_proc_init 209 .word cpu_v7_proc_init
209 .word cpu_v7_proc_fin 210 .word cpu_v7_proc_fin
210 .word cpu_v7_reset 211 .word cpu_v7_reset
@@ -212,7 +213,6 @@ ENTRY(v7_processor_functions)
212 .word cpu_v7_dcache_clean_area 213 .word cpu_v7_dcache_clean_area
213 .word cpu_v7_switch_mm 214 .word cpu_v7_switch_mm
214 .word cpu_v7_set_pte_ext 215 .word cpu_v7_set_pte_ext
215 .word pabort_ifar
216 .size v7_processor_functions, . - v7_processor_functions 216 .size v7_processor_functions, . - v7_processor_functions
217 217
218 .type cpu_arch_name, #object 218 .type cpu_arch_name, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index d95921a2ab99..3533741a76f6 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -450,6 +450,7 @@ xsc3_crval:
450 .type xsc3_processor_functions, #object 450 .type xsc3_processor_functions, #object
451ENTRY(xsc3_processor_functions) 451ENTRY(xsc3_processor_functions)
452 .word v5t_early_abort 452 .word v5t_early_abort
453 .word pabort_noifar
453 .word cpu_xsc3_proc_init 454 .word cpu_xsc3_proc_init
454 .word cpu_xsc3_proc_fin 455 .word cpu_xsc3_proc_fin
455 .word cpu_xsc3_reset 456 .word cpu_xsc3_reset
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 1a6d89823dff..2dd85273976f 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -527,6 +527,7 @@ xscale_crval:
527 .type xscale_processor_functions, #object 527 .type xscale_processor_functions, #object
528ENTRY(xscale_processor_functions) 528ENTRY(xscale_processor_functions)
529 .word v5t_early_abort 529 .word v5t_early_abort
530 .word pabort_noifar
530 .word cpu_xscale_proc_init 531 .word cpu_xscale_proc_init
531 .word cpu_xscale_proc_fin 532 .word cpu_xscale_proc_fin
532 .word cpu_xscale_reset 533 .word cpu_xscale_reset
@@ -534,7 +535,6 @@ ENTRY(xscale_processor_functions)
534 .word cpu_xscale_dcache_clean_area 535 .word cpu_xscale_dcache_clean_area
535 .word cpu_xscale_switch_mm 536 .word cpu_xscale_switch_mm
536 .word cpu_xscale_set_pte_ext 537 .word cpu_xscale_set_pte_ext
537 .word pabort_noifar
538 .size xscale_processor_functions, . - xscale_processor_functions 538 .size xscale_processor_functions, . - xscale_processor_functions
539 539
540 .section ".rodata" 540 .section ".rodata"
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ed21737a00c5..cd13e138bd03 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -266,17 +266,6 @@ config IOSAPIC
266 depends on !IA64_HP_SIM 266 depends on !IA64_HP_SIM
267 default y 267 default y
268 268
269config IA64_SGI_SN_XP
270 tristate "Support communication between SGI SSIs"
271 depends on IA64_GENERIC || IA64_SGI_SN2
272 select IA64_UNCACHED_ALLOCATOR
273 help
274 An SGI machine can be divided into multiple Single System
275 Images which act independently of each other and have
276 hardware based memory protection from the others. Enabling
277 this feature will allow for direct communication between SSIs
278 based on a network adapter and DMA messaging.
279
280config FORCE_MAX_ZONEORDER 269config FORCE_MAX_ZONEORDER
281 int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE 270 int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
282 range 11 17 if !HUGETLB_PAGE 271 range 11 17 if !HUGETLB_PAGE
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 90ef338cf46f..f065093f8e9b 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
194 unw_init_running(kdump_cpu_freeze, NULL); 194 unw_init_running(kdump_cpu_freeze, NULL);
195 break; 195 break;
196 case DIE_MCA_MONARCH_LEAVE: 196 case DIE_MCA_MONARCH_LEAVE:
197 /* die_register->signr indicate if MCA is recoverable */ 197 /* *(nd->data) indicate if MCA is recoverable */
198 if (kdump_on_fatal_mca && !args->signr) { 198 if (kdump_on_fatal_mca && !(*(nd->data))) {
199 atomic_set(&kdump_in_progress, 1); 199 atomic_set(&kdump_in_progress, 1);
200 *(nd->monarch_cpu) = -1; 200 *(nd->monarch_cpu) = -1;
201 machine_kdump_on_init(); 201 machine_kdump_on_init();
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index b0be4a280174..e49ad8c5dc69 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -570,6 +570,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
570 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value 570 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
571.ret3: 571.ret3:
572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
573(pUStk) rsm psr.i // disable interrupts
573 br.cond.sptk .work_pending_syscall_end 574 br.cond.sptk .work_pending_syscall_end
574 575
575strace_error: 576strace_error:
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index e51bced3b0fa..705176b434b3 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -109,6 +109,20 @@
109# define IA64_MCA_DEBUG(fmt...) 109# define IA64_MCA_DEBUG(fmt...)
110#endif 110#endif
111 111
112#define NOTIFY_INIT(event, regs, arg, spin) \
113do { \
114 if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
115 == NOTIFY_STOP) && ((spin) == 1)) \
116 ia64_mca_spin(__func__); \
117} while (0)
118
119#define NOTIFY_MCA(event, regs, arg, spin) \
120do { \
121 if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
122 == NOTIFY_STOP) && ((spin) == 1)) \
123 ia64_mca_spin(__func__); \
124} while (0)
125
112/* Used by mca_asm.S */ 126/* Used by mca_asm.S */
113DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 127DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
114DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 128DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
766 780
767 /* Mask all interrupts */ 781 /* Mask all interrupts */
768 local_irq_save(flags); 782 local_irq_save(flags);
769 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 783
770 (long)&nd, 0, 0) == NOTIFY_STOP) 784 NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
771 ia64_mca_spin(__func__);
772 785
773 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 786 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
774 /* Register with the SAL monarch that the slave has 787 /* Register with the SAL monarch that the slave has
@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
776 */ 789 */
777 ia64_sal_mc_rendez(); 790 ia64_sal_mc_rendez();
778 791
779 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 792 NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
780 (long)&nd, 0, 0) == NOTIFY_STOP)
781 ia64_mca_spin(__func__);
782 793
783 /* Wait for the monarch cpu to exit. */ 794 /* Wait for the monarch cpu to exit. */
784 while (monarch_cpu != -1) 795 while (monarch_cpu != -1)
785 cpu_relax(); /* spin until monarch leaves */ 796 cpu_relax(); /* spin until monarch leaves */
786 797
787 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 798 NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
788 (long)&nd, 0, 0) == NOTIFY_STOP)
789 ia64_mca_spin(__func__);
790 799
791 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 800 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
792 /* Enable all interrupts */ 801 /* Enable all interrupts */
@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1256 int recover, cpu = smp_processor_id(); 1265 int recover, cpu = smp_processor_id();
1257 struct task_struct *previous_current; 1266 struct task_struct *previous_current;
1258 struct ia64_mca_notify_die nd = 1267 struct ia64_mca_notify_die nd =
1259 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1268 { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
1260 static atomic_t mca_count; 1269 static atomic_t mca_count;
1261 static cpumask_t mca_cpu; 1270 static cpumask_t mca_cpu;
1262 1271
@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1272 1281
1273 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1282 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1274 1283
1275 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1284 NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
1276 == NOTIFY_STOP)
1277 ia64_mca_spin(__func__);
1278 1285
1279 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1286 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1280 if (sos->monarch) { 1287 if (sos->monarch) {
@@ -1288,13 +1295,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1288 * does not work. 1295 * does not work.
1289 */ 1296 */
1290 ia64_mca_wakeup_all(); 1297 ia64_mca_wakeup_all();
1291 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1292 == NOTIFY_STOP)
1293 ia64_mca_spin(__func__);
1294 } else { 1298 } else {
1295 while (cpu_isset(cpu, mca_cpu)) 1299 while (cpu_isset(cpu, mca_cpu))
1296 cpu_relax(); /* spin until monarch wakes us */ 1300 cpu_relax(); /* spin until monarch wakes us */
1297 } 1301 }
1302
1303 NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
1298 1304
1299 /* Get the MCA error record and log it */ 1305 /* Get the MCA error record and log it */
1300 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 1306 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -1320,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1320 mca_insert_tr(0x2); /*Reload dynamic itrs*/ 1326 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1321 } 1327 }
1322 1328
1323 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1329 NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
1324 == NOTIFY_STOP)
1325 ia64_mca_spin(__func__);
1326 1330
1327 if (atomic_dec_return(&mca_count) > 0) { 1331 if (atomic_dec_return(&mca_count) > 0) {
1328 int i; 1332 int i;
@@ -1643,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1643 struct ia64_mca_notify_die nd = 1647 struct ia64_mca_notify_die nd =
1644 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1648 { .sos = sos, .monarch_cpu = &monarch_cpu };
1645 1649
1646 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); 1650 NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
1647 1651
1648 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1652 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1649 sos->proc_state_param, cpu, sos->monarch); 1653 sos->proc_state_param, cpu, sos->monarch);
@@ -1680,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1680 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1684 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1681 while (monarch_cpu == -1) 1685 while (monarch_cpu == -1)
1682 cpu_relax(); /* spin until monarch enters */ 1686 cpu_relax(); /* spin until monarch enters */
1683 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1687
1684 == NOTIFY_STOP) 1688 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1685 ia64_mca_spin(__func__); 1689 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1686 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1690
1687 == NOTIFY_STOP)
1688 ia64_mca_spin(__func__);
1689 while (monarch_cpu != -1) 1691 while (monarch_cpu != -1)
1690 cpu_relax(); /* spin until monarch leaves */ 1692 cpu_relax(); /* spin until monarch leaves */
1691 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1693
1692 == NOTIFY_STOP) 1694 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1693 ia64_mca_spin(__func__); 1695
1694 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1696 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1695 set_curr_task(cpu, previous_current); 1697 set_curr_task(cpu, previous_current);
1696 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1698 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1699,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1699 } 1701 }
1700 1702
1701 monarch_cpu = cpu; 1703 monarch_cpu = cpu;
1702 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1704 NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
1703 == NOTIFY_STOP)
1704 ia64_mca_spin(__func__);
1705 1705
1706 /* 1706 /*
1707 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1707 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1716,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1716 * to default_monarch_init_process() above and just print all the 1716 * to default_monarch_init_process() above and just print all the
1717 * tasks. 1717 * tasks.
1718 */ 1718 */
1719 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1719 NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
1720 == NOTIFY_STOP) 1720 NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
1721 ia64_mca_spin(__func__); 1721
1722 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1723 == NOTIFY_STOP)
1724 ia64_mca_spin(__func__);
1725 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1722 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1726 atomic_dec(&monarchs); 1723 atomic_dec(&monarchs);
1727 set_curr_task(cpu, previous_current); 1724 set_curr_task(cpu, previous_current);
@@ -1953,7 +1950,7 @@ ia64_mca_init(void)
1953 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1950 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1954 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1951 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1955 timeout = isrv.v0; 1952 timeout = isrv.v0;
1956 (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); 1953 NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
1957 continue; 1954 continue;
1958 } 1955 }
1959 printk(KERN_ERR "Failed to register rendezvous interrupt " 1956 printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index d1d24f4598da..c8e403752a0c 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5511,7 +5511,7 @@ stop_monitoring:
5511} 5511}
5512 5512
5513static int 5513static int
5514pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) 5514pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5515{ 5515{
5516 struct task_struct *task; 5516 struct task_struct *task;
5517 pfm_context_t *ctx; 5517 pfm_context_t *ctx;
@@ -5591,7 +5591,7 @@ pfm_interrupt_handler(int irq, void *arg)
5591 5591
5592 start_cycles = ia64_get_itc(); 5592 start_cycles = ia64_get_itc();
5593 5593
5594 ret = pfm_do_interrupt_handler(irq, arg, regs); 5594 ret = pfm_do_interrupt_handler(arg, regs);
5595 5595
5596 total_cycles = ia64_get_itc(); 5596 total_cycles = ia64_get_itc();
5597 5597
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 688a3c27e0f6..0591038735af 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,7 +4,7 @@
4# License. See the file "COPYING" in the main directory of this archive 4# License. See the file "COPYING" in the main directory of this archive
5# for more details. 5# for more details.
6# 6#
7# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10EXTRA_CFLAGS += -Iarch/ia64/sn/include 10EXTRA_CFLAGS += -Iarch/ia64/sn/include
@@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
15 sn2/ 15 sn2/
16obj-$(CONFIG_IA64_GENERIC) += machvec.o 16obj-$(CONFIG_IA64_GENERIC) += machvec.o
17obj-$(CONFIG_SGI_TIOCX) += tiocx.o 17obj-$(CONFIG_SGI_TIOCX) += tiocx.o
18obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
19xp-y := xp_main.o xp_nofault.o
20obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
21xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
22obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
23obj-$(CONFIG_PCI_MSI) += msi_sn.o 18obj-$(CONFIG_PCI_MSI) += msi_sn.o
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 0101c7924a4d..08b0d9bb62ec 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -187,8 +187,8 @@ void hub_error_init(struct hubdev_info *hubdev_info)
187{ 187{
188 188
189 if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, 189 if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
190 "SN_hub_error", (void *)hubdev_info)) { 190 "SN_hub_error", hubdev_info)) {
191 printk("hub_error_init: Failed to request_irq for 0x%p\n", 191 printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
192 hubdev_info); 192 hubdev_info);
193 return; 193 return;
194 } 194 }
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 9b3c11373022..94e584527f48 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -655,7 +655,8 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
655 * 655 *
656 * Simply call tioce_do_dma_map() to create a map with the barrier bit set 656 * Simply call tioce_do_dma_map() to create a map with the barrier bit set
657 * in the address. 657 * in the address.
658 */ static u64 658 */
659static u64
659tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) 660tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
660{ 661{
661 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); 662 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
@@ -668,7 +669,8 @@ tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma
668 * 669 *
669 * Handle a CE error interrupt. Simply a wrapper around a SAL call which 670 * Handle a CE error interrupt. Simply a wrapper around a SAL call which
670 * defers processing to the SGI prom. 671 * defers processing to the SGI prom.
671 */ static irqreturn_t 672 */
673static irqreturn_t
672tioce_error_intr_handler(int irq, void *arg) 674tioce_error_intr_handler(int irq, void *arg)
673{ 675{
674 struct tioce_common *soft = arg; 676 struct tioce_common *soft = arg;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index be1cc5143354..ef522ae55480 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -530,7 +530,8 @@ void bus_remove_device(struct device *dev)
530 sysfs_remove_link(&dev->bus->p->devices_kset->kobj, 530 sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
531 dev->bus_id); 531 dev->bus_id);
532 device_remove_attrs(dev->bus, dev); 532 device_remove_attrs(dev->bus, dev);
533 klist_del(&dev->knode_bus); 533 if (klist_node_attached(&dev->knode_bus))
534 klist_del(&dev->knode_bus);
534 535
535 pr_debug("bus: '%s': remove device %s\n", 536 pr_debug("bus: '%s': remove device %s\n",
536 dev->bus->name, dev->bus_id); 537 dev->bus->name, dev->bus_id);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c4568b82875b..7b76fd3b93a4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -62,7 +62,7 @@ static bool all_sleeping;
62 */ 62 */
63int device_pm_add(struct device *dev) 63int device_pm_add(struct device *dev)
64{ 64{
65 int error = 0; 65 int error;
66 66
67 pr_debug("PM: Adding info for %s:%s\n", 67 pr_debug("PM: Adding info for %s:%s\n",
68 dev->bus ? dev->bus->name : "No Bus", 68 dev->bus ? dev->bus->name : "No Bus",
@@ -70,18 +70,15 @@ int device_pm_add(struct device *dev)
70 mutex_lock(&dpm_list_mtx); 70 mutex_lock(&dpm_list_mtx);
71 if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { 71 if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) {
72 if (dev->parent->power.sleeping) 72 if (dev->parent->power.sleeping)
73 dev_warn(dev, 73 dev_warn(dev, "parent %s is sleeping\n",
74 "parent %s is sleeping, will not add\n",
75 dev->parent->bus_id); 74 dev->parent->bus_id);
76 else 75 else
77 dev_warn(dev, "devices are sleeping, will not add\n"); 76 dev_warn(dev, "all devices are sleeping\n");
78 WARN_ON(true); 77 WARN_ON(true);
79 error = -EBUSY;
80 } else {
81 error = dpm_sysfs_add(dev);
82 if (!error)
83 list_add_tail(&dev->power.entry, &dpm_active);
84 } 78 }
79 error = dpm_sysfs_add(dev);
80 if (!error)
81 list_add_tail(&dev->power.entry, &dpm_active);
85 mutex_unlock(&dpm_list_mtx); 82 mutex_unlock(&dpm_list_mtx);
86 return error; 83 return error;
87} 84}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 85364804364f..7bd76639544c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -108,7 +108,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
108#ifndef CONFIG_BLK_DEV_XIP 108#ifndef CONFIG_BLK_DEV_XIP
109 gfp_flags |= __GFP_HIGHMEM; 109 gfp_flags |= __GFP_HIGHMEM;
110#endif 110#endif
111 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO); 111 page = alloc_page(gfp_flags);
112 if (!page) 112 if (!page)
113 return NULL; 113 return NULL;
114 114
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f0b00ec1e47e..e03c67dd3e63 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -44,8 +44,8 @@
44 44
45#ifdef CONFIG_HID_DEBUG 45#ifdef CONFIG_HID_DEBUG
46int hid_debug = 0; 46int hid_debug = 0;
47module_param_named(debug, hid_debug, bool, 0600); 47module_param_named(debug, hid_debug, int, 0600);
48MODULE_PARM_DESC(debug, "Turn HID debugging mode on and off"); 48MODULE_PARM_DESC(debug, "HID debugging (0=off, 1=probing info, 2=continuous data dumping)");
49EXPORT_SYMBOL_GPL(hid_debug); 49EXPORT_SYMBOL_GPL(hid_debug);
50#endif 50#endif
51 51
@@ -97,7 +97,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
97 field->index = report->maxfield++; 97 field->index = report->maxfield++;
98 report->field[field->index] = field; 98 report->field[field->index] = field;
99 field->usage = (struct hid_usage *)(field + 1); 99 field->usage = (struct hid_usage *)(field + 1);
100 field->value = (unsigned *)(field->usage + usages); 100 field->value = (s32 *)(field->usage + usages);
101 field->report = report; 101 field->report = report;
102 102
103 return field; 103 return field;
@@ -830,7 +830,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, s
830 * reporting to the layer). 830 * reporting to the layer).
831 */ 831 */
832 832
833void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt) 833static void hid_input_field(struct hid_device *hid, struct hid_field *field,
834 __u8 *data, int interrupt)
834{ 835{
835 unsigned n; 836 unsigned n;
836 unsigned count = field->report_count; 837 unsigned count = field->report_count;
@@ -876,7 +877,6 @@ void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data
876exit: 877exit:
877 kfree(value); 878 kfree(value);
878} 879}
879EXPORT_SYMBOL_GPL(hid_input_field);
880 880
881/* 881/*
882 * Output the field into the report. 882 * Output the field into the report.
@@ -988,8 +988,13 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
988 988
989 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 989 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
990 hid->hiddev_report_event(hid, report); 990 hid->hiddev_report_event(hid, report);
991 if (hid->claimed & HID_CLAIMED_HIDRAW) 991 if (hid->claimed & HID_CLAIMED_HIDRAW) {
992 hidraw_report_event(hid, data, size); 992 /* numbered reports need to be passed with the report num */
993 if (report_enum->numbered)
994 hidraw_report_event(hid, data - 1, size + 1);
995 else
996 hidraw_report_event(hid, data, size);
997 }
993 998
994 for (n = 0; n < report->maxfield; n++) 999 for (n = 0; n < report->maxfield; n++)
995 hid_input_field(hid, report->field[n], data, interrupt); 1000 hid_input_field(hid, report->field[n], data, interrupt);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 5c24fe46d8eb..f88714b06000 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -498,7 +498,7 @@ void hid_dump_device(struct hid_device *device) {
498EXPORT_SYMBOL_GPL(hid_dump_device); 498EXPORT_SYMBOL_GPL(hid_dump_device);
499 499
500void hid_dump_input(struct hid_usage *usage, __s32 value) { 500void hid_dump_input(struct hid_usage *usage, __s32 value) {
501 if (!hid_debug) 501 if (hid_debug < 2)
502 return; 502 return;
503 503
504 printk(KERN_DEBUG "hid-debug: input "); 504 printk(KERN_DEBUG "hid-debug: input ");
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index dceadd0c1419..4c2052c658f1 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -276,6 +276,21 @@ static int quirk_btc_8193(struct hid_usage *usage, struct input_dev *input,
276 return 1; 276 return 1;
277} 277}
278 278
279static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *input,
280 unsigned long **bit, int *max)
281{
282 if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
283 return 0;
284
285 switch (usage->hid & HID_USAGE) {
286 case 0x2003: map_key_clear(KEY_ZOOMIN); break;
287 case 0x2103: map_key_clear(KEY_ZOOMOUT); break;
288 default:
289 return 0;
290 }
291 return 1;
292}
293
279#define VENDOR_ID_BELKIN 0x1020 294#define VENDOR_ID_BELKIN 0x1020
280#define DEVICE_ID_BELKIN_WIRELESS_KEYBOARD 0x0006 295#define DEVICE_ID_BELKIN_WIRELESS_KEYBOARD 0x0006
281 296
@@ -306,6 +321,9 @@ static int quirk_btc_8193(struct hid_usage *usage, struct input_dev *input,
306#define VENDOR_ID_PETALYNX 0x18b1 321#define VENDOR_ID_PETALYNX 0x18b1
307#define DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 322#define DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
308 323
324#define VENDOR_ID_SUNPLUS 0x04fc
325#define DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8
326
309static const struct hid_input_blacklist { 327static const struct hid_input_blacklist {
310 __u16 idVendor; 328 __u16 idVendor;
311 __u16 idProduct; 329 __u16 idProduct;
@@ -332,8 +350,10 @@ static const struct hid_input_blacklist {
332 { VENDOR_ID_MONTEREY, DEVICE_ID_GENIUS_KB29E, quirk_cherry_genius_29e }, 350 { VENDOR_ID_MONTEREY, DEVICE_ID_GENIUS_KB29E, quirk_cherry_genius_29e },
333 351
334 { VENDOR_ID_PETALYNX, DEVICE_ID_PETALYNX_MAXTER_REMOTE, quirk_petalynx_remote }, 352 { VENDOR_ID_PETALYNX, DEVICE_ID_PETALYNX_MAXTER_REMOTE, quirk_petalynx_remote },
335 353
336 { 0, 0, 0 } 354 { VENDOR_ID_SUNPLUS, DEVICE_ID_SUNPLUS_WDESKTOP, quirk_sunplus_wdesktop },
355
356 { 0, 0, NULL }
337}; 357};
338 358
339int hidinput_mapping_quirks(struct hid_usage *usage, 359int hidinput_mapping_quirks(struct hid_usage *usage,
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 7160fa65d79b..18f09104765c 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -71,6 +71,14 @@ config LOGITECH_FF
71 Note: if you say N here, this device will still be supported, but without 71 Note: if you say N here, this device will still be supported, but without
72 force feedback. 72 force feedback.
73 73
74config LOGIRUMBLEPAD2_FF
75 bool "Logitech Rumblepad 2 support"
76 depends on HID_FF
77 select INPUT_FF_MEMLESS if USB_HID
78 help
79 Say Y here if you want to enable force feedback support for Logitech
80 Rumblepad 2 devices.
81
74config PANTHERLORD_FF 82config PANTHERLORD_FF
75 bool "PantherLord/GreenAsia based device support" 83 bool "PantherLord/GreenAsia based device support"
76 depends on HID_FF 84 depends on HID_FF
@@ -80,8 +88,8 @@ config PANTHERLORD_FF
80 or adapter and want to enable force feedback support for it. 88 or adapter and want to enable force feedback support for it.
81 89
82config THRUSTMASTER_FF 90config THRUSTMASTER_FF
83 bool "ThrustMaster devices support (EXPERIMENTAL)" 91 bool "ThrustMaster devices support"
84 depends on HID_FF && EXPERIMENTAL 92 depends on HID_FF
85 select INPUT_FF_MEMLESS if USB_HID 93 select INPUT_FF_MEMLESS if USB_HID
86 help 94 help
87 Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or 95 Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 8e6ab5b164a2..00a7b7090192 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -16,6 +16,9 @@ endif
16ifeq ($(CONFIG_LOGITECH_FF),y) 16ifeq ($(CONFIG_LOGITECH_FF),y)
17 usbhid-objs += hid-lgff.o 17 usbhid-objs += hid-lgff.o
18endif 18endif
19ifeq ($(CONFIG_LOGIRUMBLEPAD2_FF),y)
20 usbhid-objs += hid-lg2ff.o
21endif
19ifeq ($(CONFIG_PANTHERLORD_FF),y) 22ifeq ($(CONFIG_PANTHERLORD_FF),y)
20 usbhid-objs += hid-plff.o 23 usbhid-objs += hid-plff.o
21endif 24endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index d95979f0e028..e0d805f1b2bf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -82,6 +82,7 @@ static int hid_start_in(struct hid_device *hid)
82 82
83 spin_lock_irqsave(&usbhid->inlock, flags); 83 spin_lock_irqsave(&usbhid->inlock, flags);
84 if (hid->open > 0 && !test_bit(HID_SUSPENDED, &usbhid->iofl) && 84 if (hid->open > 0 && !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
85 !test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
85 !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { 86 !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
86 rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC); 87 rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC);
87 if (rc != 0) 88 if (rc != 0)
@@ -155,7 +156,7 @@ static void hid_io_error(struct hid_device *hid)
155 spin_lock_irqsave(&usbhid->inlock, flags); 156 spin_lock_irqsave(&usbhid->inlock, flags);
156 157
157 /* Stop when disconnected */ 158 /* Stop when disconnected */
158 if (usb_get_intfdata(usbhid->intf) == NULL) 159 if (test_bit(HID_DISCONNECTED, &usbhid->iofl))
159 goto done; 160 goto done;
160 161
161 /* If it has been a while since the last error, we'll assume 162 /* If it has been a while since the last error, we'll assume
@@ -341,7 +342,7 @@ static void hid_irq_out(struct urb *urb)
341 if (usbhid->outhead != usbhid->outtail) { 342 if (usbhid->outhead != usbhid->outtail) {
342 if (hid_submit_out(hid)) { 343 if (hid_submit_out(hid)) {
343 clear_bit(HID_OUT_RUNNING, &usbhid->iofl); 344 clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
344 wake_up(&hid->wait); 345 wake_up(&usbhid->wait);
345 } 346 }
346 spin_unlock_irqrestore(&usbhid->outlock, flags); 347 spin_unlock_irqrestore(&usbhid->outlock, flags);
347 return; 348 return;
@@ -349,7 +350,7 @@ static void hid_irq_out(struct urb *urb)
349 350
350 clear_bit(HID_OUT_RUNNING, &usbhid->iofl); 351 clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
351 spin_unlock_irqrestore(&usbhid->outlock, flags); 352 spin_unlock_irqrestore(&usbhid->outlock, flags);
352 wake_up(&hid->wait); 353 wake_up(&usbhid->wait);
353} 354}
354 355
355/* 356/*
@@ -391,7 +392,7 @@ static void hid_ctrl(struct urb *urb)
391 if (usbhid->ctrlhead != usbhid->ctrltail) { 392 if (usbhid->ctrlhead != usbhid->ctrltail) {
392 if (hid_submit_ctrl(hid)) { 393 if (hid_submit_ctrl(hid)) {
393 clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); 394 clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
394 wake_up(&hid->wait); 395 wake_up(&usbhid->wait);
395 } 396 }
396 spin_unlock_irqrestore(&usbhid->ctrllock, flags); 397 spin_unlock_irqrestore(&usbhid->ctrllock, flags);
397 return; 398 return;
@@ -399,7 +400,7 @@ static void hid_ctrl(struct urb *urb)
399 400
400 clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); 401 clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
401 spin_unlock_irqrestore(&usbhid->ctrllock, flags); 402 spin_unlock_irqrestore(&usbhid->ctrllock, flags);
402 wake_up(&hid->wait); 403 wake_up(&usbhid->wait);
403} 404}
404 405
405void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) 406void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir)
@@ -478,8 +479,9 @@ int usbhid_wait_io(struct hid_device *hid)
478{ 479{
479 struct usbhid_device *usbhid = hid->driver_data; 480 struct usbhid_device *usbhid = hid->driver_data;
480 481
481 if (!wait_event_timeout(hid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && 482 if (!wait_event_timeout(usbhid->wait,
482 !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), 483 (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) &&
484 !test_bit(HID_OUT_RUNNING, &usbhid->iofl)),
483 10*HZ)) { 485 10*HZ)) {
484 dbg_hid("timeout waiting for ctrl or out queue to clear\n"); 486 dbg_hid("timeout waiting for ctrl or out queue to clear\n");
485 return -1; 487 return -1;
@@ -610,10 +612,11 @@ static void usbhid_set_leds(struct hid_device *hid)
610/* 612/*
611 * Traverse the supplied list of reports and find the longest 613 * Traverse the supplied list of reports and find the longest
612 */ 614 */
613static void hid_find_max_report(struct hid_device *hid, unsigned int type, int *max) 615static void hid_find_max_report(struct hid_device *hid, unsigned int type,
616 unsigned int *max)
614{ 617{
615 struct hid_report *report; 618 struct hid_report *report;
616 int size; 619 unsigned int size;
617 620
618 list_for_each_entry(report, &hid->report_enum[type].report_list, list) { 621 list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
619 size = ((report->size - 1) >> 3) + 1; 622 size = ((report->size - 1) >> 3) + 1;
@@ -705,9 +708,9 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
705 struct hid_descriptor *hdesc; 708 struct hid_descriptor *hdesc;
706 struct hid_device *hid; 709 struct hid_device *hid;
707 u32 quirks = 0; 710 u32 quirks = 0;
708 unsigned rsize = 0; 711 unsigned int insize = 0, rsize = 0;
709 char *rdesc; 712 char *rdesc;
710 int n, len, insize = 0; 713 int n, len;
711 struct usbhid_device *usbhid; 714 struct usbhid_device *usbhid;
712 715
713 quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), 716 quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
@@ -800,6 +803,22 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
800 goto fail; 803 goto fail;
801 } 804 }
802 805
806 hid->name[0] = 0;
807
808 if (dev->manufacturer)
809 strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
810
811 if (dev->product) {
812 if (dev->manufacturer)
813 strlcat(hid->name, " ", sizeof(hid->name));
814 strlcat(hid->name, dev->product, sizeof(hid->name));
815 }
816
817 if (!strlen(hid->name))
818 snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x",
819 le16_to_cpu(dev->descriptor.idVendor),
820 le16_to_cpu(dev->descriptor.idProduct));
821
803 for (n = 0; n < interface->desc.bNumEndpoints; n++) { 822 for (n = 0; n < interface->desc.bNumEndpoints; n++) {
804 823
805 struct usb_endpoint_descriptor *endpoint; 824 struct usb_endpoint_descriptor *endpoint;
@@ -812,6 +831,14 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
812 831
813 interval = endpoint->bInterval; 832 interval = endpoint->bInterval;
814 833
834 /* Some vendors give fullspeed interval on highspeed devides */
835 if (quirks & HID_QUIRK_FULLSPEED_INTERVAL &&
836 dev->speed == USB_SPEED_HIGH) {
837 interval = fls(endpoint->bInterval*8);
838 printk(KERN_INFO "%s: Fixing fullspeed to highspeed interval: %d -> %d\n",
839 hid->name, endpoint->bInterval, interval);
840 }
841
815 /* Change the polling interval of mice. */ 842 /* Change the polling interval of mice. */
816 if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0) 843 if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
817 interval = hid_mousepoll_interval; 844 interval = hid_mousepoll_interval;
@@ -844,8 +871,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
844 goto fail; 871 goto fail;
845 } 872 }
846 873
847 init_waitqueue_head(&hid->wait); 874 init_waitqueue_head(&usbhid->wait);
848
849 INIT_WORK(&usbhid->reset_work, hid_reset); 875 INIT_WORK(&usbhid->reset_work, hid_reset);
850 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); 876 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
851 877
@@ -859,22 +885,6 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
859 usbhid->intf = intf; 885 usbhid->intf = intf;
860 usbhid->ifnum = interface->desc.bInterfaceNumber; 886 usbhid->ifnum = interface->desc.bInterfaceNumber;
861 887
862 hid->name[0] = 0;
863
864 if (dev->manufacturer)
865 strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
866
867 if (dev->product) {
868 if (dev->manufacturer)
869 strlcat(hid->name, " ", sizeof(hid->name));
870 strlcat(hid->name, dev->product, sizeof(hid->name));
871 }
872
873 if (!strlen(hid->name))
874 snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x",
875 le16_to_cpu(dev->descriptor.idVendor),
876 le16_to_cpu(dev->descriptor.idProduct));
877
878 hid->bus = BUS_USB; 888 hid->bus = BUS_USB;
879 hid->vendor = le16_to_cpu(dev->descriptor.idVendor); 889 hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
880 hid->product = le16_to_cpu(dev->descriptor.idProduct); 890 hid->product = le16_to_cpu(dev->descriptor.idProduct);
@@ -932,6 +942,7 @@ static void hid_disconnect(struct usb_interface *intf)
932 942
933 spin_lock_irq(&usbhid->inlock); /* Sync with error handler */ 943 spin_lock_irq(&usbhid->inlock); /* Sync with error handler */
934 usb_set_intfdata(intf, NULL); 944 usb_set_intfdata(intf, NULL);
945 set_bit(HID_DISCONNECTED, &usbhid->iofl);
935 spin_unlock_irq(&usbhid->inlock); 946 spin_unlock_irq(&usbhid->inlock);
936 usb_kill_urb(usbhid->urbin); 947 usb_kill_urb(usbhid->urbin);
937 usb_kill_urb(usbhid->urbout); 948 usb_kill_urb(usbhid->urbout);
diff --git a/drivers/hid/usbhid/hid-ff.c b/drivers/hid/usbhid/hid-ff.c
index 4c210e16b1b4..1d0dac52f166 100644
--- a/drivers/hid/usbhid/hid-ff.c
+++ b/drivers/hid/usbhid/hid-ff.c
@@ -59,6 +59,9 @@ static struct hid_ff_initializer inits[] = {
59 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */ 59 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */
60 { 0x46d, 0xca03, hid_lgff_init }, /* Logitech MOMO force wheel */ 60 { 0x46d, 0xca03, hid_lgff_init }, /* Logitech MOMO force wheel */
61#endif 61#endif
62#ifdef CONFIG_LOGIRUMBLEPAD2_FF
63 { 0x46d, 0xc218, hid_lg2ff_init }, /* Logitech Rumblepad 2 */
64#endif
62#ifdef CONFIG_PANTHERLORD_FF 65#ifdef CONFIG_PANTHERLORD_FF
63 { 0x810, 0x0001, hid_plff_init }, /* "Twin USB Joystick" */ 66 { 0x810, 0x0001, hid_plff_init }, /* "Twin USB Joystick" */
64 { 0xe8f, 0x0003, hid_plff_init }, /* "GreenAsia Inc. USB Joystick " */ 67 { 0xe8f, 0x0003, hid_plff_init }, /* "GreenAsia Inc. USB Joystick " */
diff --git a/drivers/hid/usbhid/hid-lg2ff.c b/drivers/hid/usbhid/hid-lg2ff.c
new file mode 100644
index 000000000000..d469bd0061c9
--- /dev/null
+++ b/drivers/hid/usbhid/hid-lg2ff.c
@@ -0,0 +1,114 @@
1/*
2 * Force feedback support for Logitech Rumblepad 2
3 *
4 * Copyright (c) 2008 Anssi Hannula <anssi.hannula@gmail.com>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23
24#include <linux/input.h>
25#include <linux/usb.h>
26#include <linux/hid.h>
27#include "usbhid.h"
28
29struct lg2ff_device {
30 struct hid_report *report;
31};
32
33static int play_effect(struct input_dev *dev, void *data,
34 struct ff_effect *effect)
35{
36 struct hid_device *hid = input_get_drvdata(dev);
37 struct lg2ff_device *lg2ff = data;
38 int weak, strong;
39
40 strong = effect->u.rumble.strong_magnitude;
41 weak = effect->u.rumble.weak_magnitude;
42
43 if (weak || strong) {
44 weak = weak * 0xff / 0xffff;
45 strong = strong * 0xff / 0xffff;
46
47 lg2ff->report->field[0]->value[0] = 0x51;
48 lg2ff->report->field[0]->value[2] = weak;
49 lg2ff->report->field[0]->value[4] = strong;
50 } else {
51 lg2ff->report->field[0]->value[0] = 0xf3;
52 lg2ff->report->field[0]->value[2] = 0x00;
53 lg2ff->report->field[0]->value[4] = 0x00;
54 }
55
56 usbhid_submit_report(hid, lg2ff->report, USB_DIR_OUT);
57 return 0;
58}
59
60int hid_lg2ff_init(struct hid_device *hid)
61{
62 struct lg2ff_device *lg2ff;
63 struct hid_report *report;
64 struct hid_input *hidinput = list_entry(hid->inputs.next,
65 struct hid_input, list);
66 struct list_head *report_list =
67 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
68 struct input_dev *dev = hidinput->input;
69 int error;
70
71 if (list_empty(report_list)) {
72 printk(KERN_ERR "hid-lg2ff: no output report found\n");
73 return -ENODEV;
74 }
75
76 report = list_entry(report_list->next, struct hid_report, list);
77
78 if (report->maxfield < 1) {
79 printk(KERN_ERR "hid-lg2ff: output report is empty\n");
80 return -ENODEV;
81 }
82 if (report->field[0]->report_count < 7) {
83 printk(KERN_ERR "hid-lg2ff: not enough values in the field\n");
84 return -ENODEV;
85 }
86
87 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
88 if (!lg2ff)
89 return -ENOMEM;
90
91 set_bit(FF_RUMBLE, dev->ffbit);
92
93 error = input_ff_create_memless(dev, lg2ff, play_effect);
94 if (error) {
95 kfree(lg2ff);
96 return error;
97 }
98
99 lg2ff->report = report;
100 report->field[0]->value[0] = 0xf3;
101 report->field[0]->value[1] = 0x00;
102 report->field[0]->value[2] = 0x00;
103 report->field[0]->value[3] = 0x00;
104 report->field[0]->value[4] = 0x00;
105 report->field[0]->value[5] = 0x00;
106 report->field[0]->value[6] = 0x00;
107
108 usbhid_submit_report(hid, report, USB_DIR_OUT);
109
110 printk(KERN_INFO "Force feedback for Logitech Rumblepad 2 by "
111 "Anssi Hannula <anssi.hannula@gmail.com>\n");
112
113 return 0;
114}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e29a057cbea2..28ddc3fdd3d1 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -32,6 +32,9 @@
32#define USB_VENDOR_ID_ADS_TECH 0x06e1 32#define USB_VENDOR_ID_ADS_TECH 0x06e1
33#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 33#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
34 34
35#define USB_VENDOR_ID_AFATECH 0x15a4
36#define USB_DEVICE_ID_AFATECH_AF9016 0x9016
37
35#define USB_VENDOR_ID_AIPTEK 0x08ca 38#define USB_VENDOR_ID_AIPTEK 0x08ca
36#define USB_DEVICE_ID_AIPTEK_01 0x0001 39#define USB_DEVICE_ID_AIPTEK_01 0x0001
37#define USB_DEVICE_ID_AIPTEK_10 0x0010 40#define USB_DEVICE_ID_AIPTEK_10 0x0010
@@ -124,6 +127,9 @@
124#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 127#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
125#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 128#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
126 129
130#define USB_VENDOR_ID_DMI 0x0c0b
131#define USB_DEVICE_ID_DMI_ENC 0x5fab
132
127#define USB_VENDOR_ID_ELO 0x04E7 133#define USB_VENDOR_ID_ELO 0x04E7
128#define USB_DEVICE_ID_ELO_TS2700 0x0020 134#define USB_DEVICE_ID_ELO_TS2700 0x0020
129 135
@@ -199,17 +205,6 @@
199#define USB_DEVICE_ID_GTCO_502 0x0502 205#define USB_DEVICE_ID_GTCO_502 0x0502
200#define USB_DEVICE_ID_GTCO_503 0x0503 206#define USB_DEVICE_ID_GTCO_503 0x0503
201#define USB_DEVICE_ID_GTCO_504 0x0504 207#define USB_DEVICE_ID_GTCO_504 0x0504
202#define USB_DEVICE_ID_GTCO_600 0x0600
203#define USB_DEVICE_ID_GTCO_601 0x0601
204#define USB_DEVICE_ID_GTCO_602 0x0602
205#define USB_DEVICE_ID_GTCO_603 0x0603
206#define USB_DEVICE_ID_GTCO_604 0x0604
207#define USB_DEVICE_ID_GTCO_605 0x0605
208#define USB_DEVICE_ID_GTCO_606 0x0606
209#define USB_DEVICE_ID_GTCO_607 0x0607
210#define USB_DEVICE_ID_GTCO_608 0x0608
211#define USB_DEVICE_ID_GTCO_609 0x0609
212#define USB_DEVICE_ID_GTCO_609 0x0609
213#define USB_DEVICE_ID_GTCO_1000 0x1000 208#define USB_DEVICE_ID_GTCO_1000 0x1000
214#define USB_DEVICE_ID_GTCO_1001 0x1001 209#define USB_DEVICE_ID_GTCO_1001 0x1001
215#define USB_DEVICE_ID_GTCO_1002 0x1002 210#define USB_DEVICE_ID_GTCO_1002 0x1002
@@ -320,6 +315,7 @@
320#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 315#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
321#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 316#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
322#define USB_DEVICE_ID_DINOVO_EDGE 0xc714 317#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
318#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
323 319
324#define USB_VENDOR_ID_MCC 0x09db 320#define USB_VENDOR_ID_MCC 0x09db
325#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 321#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -332,6 +328,7 @@
332#define USB_VENDOR_ID_MICROSOFT 0x045e 328#define USB_VENDOR_ID_MICROSOFT 0x045e
333#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b 329#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
334#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d 330#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
331#define USB_DEVICE_ID_DESKTOP_RECV_1028 0x00f9
335#define USB_DEVICE_ID_MS_NE4K 0x00db 332#define USB_DEVICE_ID_MS_NE4K 0x00db
336#define USB_DEVICE_ID_MS_LK6K 0x00f9 333#define USB_DEVICE_ID_MS_LK6K 0x00f9
337 334
@@ -377,6 +374,9 @@
377#define USB_VENDOR_ID_SUN 0x0430 374#define USB_VENDOR_ID_SUN 0x0430
378#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab 375#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
379 376
377#define USB_VENDOR_ID_SUNPLUS 0x04fc
378#define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8
379
380#define USB_VENDOR_ID_TOPMAX 0x0663 380#define USB_VENDOR_ID_TOPMAX 0x0663
381#define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103 381#define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103
382 382
@@ -435,9 +435,13 @@ static const struct hid_blacklist {
435 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, 435 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
436 436
437 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, 437 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
438 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
439
440 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
438 441
439 { USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV }, 442 { USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV },
440 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT }, 443 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT },
444 { USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_HIDDEV | HID_QUIRK_IGNORE_HIDINPUT },
441 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT }, 445 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT },
442 446
443 { USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193, HID_QUIRK_HWHEEL_WHEEL_INVERT }, 447 { USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193, HID_QUIRK_HWHEEL_WHEEL_INVERT },
@@ -518,16 +522,6 @@ static const struct hid_blacklist {
518 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE }, 522 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE },
519 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE }, 523 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE },
520 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE }, 524 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE },
521 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_600, HID_QUIRK_IGNORE },
522 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_601, HID_QUIRK_IGNORE },
523 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_602, HID_QUIRK_IGNORE },
524 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_603, HID_QUIRK_IGNORE },
525 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_604, HID_QUIRK_IGNORE },
526 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_605, HID_QUIRK_IGNORE },
527 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_606, HID_QUIRK_IGNORE },
528 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_607, HID_QUIRK_IGNORE },
529 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_608, HID_QUIRK_IGNORE },
530 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_609, HID_QUIRK_IGNORE },
531 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE }, 525 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE },
532 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE }, 526 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE },
533 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE }, 527 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE },
@@ -601,6 +595,7 @@ static const struct hid_blacklist {
601 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 595 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
602 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, 596 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
603 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, 597 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
598 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
604 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 599 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
605 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D, HID_QUIRK_NOGET }, 600 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D, HID_QUIRK_NOGET },
606 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET }, 601 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET },
@@ -608,7 +603,7 @@ static const struct hid_blacklist {
608 { USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET }, 603 { USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET },
609 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 604 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
610 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 605 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
611 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 606 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
612 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 607 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
613 608
614 { USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 609 { USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -719,6 +714,7 @@ static const struct hid_rdesc_blacklist {
719 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_RDESC_LOGITECH }, 714 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
720 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH }, 715 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
721 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH }, 716 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH },
717 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_DESKTOP_RECV_1028, HID_QUIRK_RDESC_MICROSOFT_RECV_1028 },
722 718
723 { USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E, HID_QUIRK_RDESC_BUTTON_CONSUMER }, 719 { USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E, HID_QUIRK_RDESC_BUTTON_CONSUMER },
724 720
@@ -728,6 +724,8 @@ static const struct hid_rdesc_blacklist {
728 724
729 { USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_RDESC_SAMSUNG_REMOTE }, 725 { USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE, HID_QUIRK_RDESC_SAMSUNG_REMOTE },
730 726
727 { USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP, HID_QUIRK_RDESC_SUNPLUS_WDESKTOP },
728
731 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX }, 729 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
732 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX }, 730 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
733 731
@@ -793,8 +791,8 @@ static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor,
793 * 791 *
794 * Returns: 0 OK, -error on failure. 792 * Returns: 0 OK, -error on failure.
795 */ 793 */
796int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, 794static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
797 const u32 quirks) 795 const u32 quirks)
798{ 796{
799 struct quirks_list_struct *q_new, *q; 797 struct quirks_list_struct *q_new, *q;
800 int list_edited = 0; 798 int list_edited = 0;
@@ -1002,6 +1000,17 @@ static void usbhid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
1002 } 1000 }
1003} 1001}
1004 1002
1003static void usbhid_fixup_sunplus_wdesktop(unsigned char *rdesc, int rsize)
1004{
1005 if (rsize >= 107 && rdesc[104] == 0x26
1006 && rdesc[105] == 0x80
1007 && rdesc[106] == 0x03) {
1008 printk(KERN_INFO "Fixing up Sunplus Wireless Desktop report descriptor\n");
1009 rdesc[105] = rdesc[110] = 0x03;
1010 rdesc[106] = rdesc[111] = 0x21;
1011 }
1012}
1013
1005/* 1014/*
1006 * Samsung IrDA remote controller (reports as Cypress USB Mouse). 1015 * Samsung IrDA remote controller (reports as Cypress USB Mouse).
1007 * 1016 *
@@ -1089,6 +1098,28 @@ static void usbhid_fixup_button_consumer_descriptor(unsigned char *rdesc, int rs
1089 } 1098 }
1090} 1099}
1091 1100
1101/*
1102 * Microsoft Wireless Desktop Receiver (Model 1028) has several
1103 * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
1104 */
1105static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
1106{
1107 if (rsize == 571 && rdesc[284] == 0x19
1108 && rdesc[286] == 0x2a
1109 && rdesc[304] == 0x19
1110 && rdesc[306] == 0x29
1111 && rdesc[352] == 0x1a
1112 && rdesc[355] == 0x2a
1113 && rdesc[557] == 0x19
1114 && rdesc[559] == 0x29) {
1115 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
1116 rdesc[284] = rdesc[304] = rdesc[558] = 0x35;
1117 rdesc[352] = 0x36;
1118 rdesc[286] = rdesc[355] = 0x46;
1119 rdesc[306] = rdesc[559] = 0x45;
1120 }
1121}
1122
1092static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize) 1123static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize)
1093{ 1124{
1094 if ((quirks & HID_QUIRK_RDESC_CYMOTION)) 1125 if ((quirks & HID_QUIRK_RDESC_CYMOTION))
@@ -1112,6 +1143,11 @@ static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned
1112 if (quirks & HID_QUIRK_RDESC_SAMSUNG_REMOTE) 1143 if (quirks & HID_QUIRK_RDESC_SAMSUNG_REMOTE)
1113 usbhid_fixup_samsung_irda_descriptor(rdesc, rsize); 1144 usbhid_fixup_samsung_irda_descriptor(rdesc, rsize);
1114 1145
1146 if (quirks & HID_QUIRK_RDESC_MICROSOFT_RECV_1028)
1147 usbhid_fixup_microsoft_descriptor(rdesc, rsize);
1148
1149 if (quirks & HID_QUIRK_RDESC_SUNPLUS_WDESKTOP)
1150 usbhid_fixup_sunplus_wdesktop(rdesc, rsize);
1115} 1151}
1116 1152
1117/** 1153/**
@@ -1150,5 +1186,4 @@ void usbhid_fixup_report_descriptor(const u16 idVendor, const u16 idProduct,
1150 else if (paramVendor == idVendor && paramProduct == idProduct) 1186 else if (paramVendor == idVendor && paramProduct == idProduct)
1151 __usbhid_fixup_report_descriptor(quirks, rdesc, rsize); 1187 __usbhid_fixup_report_descriptor(quirks, rdesc, rsize);
1152 } 1188 }
1153
1154} 1189}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 5fc4019956ba..95cc192bc7af 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -393,6 +393,153 @@ static unsigned int hiddev_poll(struct file *file, poll_table *wait)
393/* 393/*
394 * "ioctl" file op 394 * "ioctl" file op
395 */ 395 */
396static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg)
397{
398 struct hid_device *hid = hiddev->hid;
399 struct hiddev_report_info rinfo;
400 struct hiddev_usage_ref_multi *uref_multi = NULL;
401 struct hiddev_usage_ref *uref;
402 struct hid_report *report;
403 struct hid_field *field;
404 int i;
405
406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
407 if (!uref_multi)
408 return -ENOMEM;
409 uref = &uref_multi->uref;
410 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
411 if (copy_from_user(uref_multi, user_arg,
412 sizeof(*uref_multi)))
413 goto fault;
414 } else {
415 if (copy_from_user(uref, user_arg, sizeof(*uref)))
416 goto fault;
417 }
418
419 switch (cmd) {
420 case HIDIOCGUCODE:
421 rinfo.report_type = uref->report_type;
422 rinfo.report_id = uref->report_id;
423 if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
424 goto inval;
425
426 if (uref->field_index >= report->maxfield)
427 goto inval;
428
429 field = report->field[uref->field_index];
430 if (uref->usage_index >= field->maxusage)
431 goto inval;
432
433 uref->usage_code = field->usage[uref->usage_index].hid;
434
435 if (copy_to_user(user_arg, uref, sizeof(*uref)))
436 goto fault;
437
438 kfree(uref_multi);
439 return 0;
440
441 default:
442 if (cmd != HIDIOCGUSAGE &&
443 cmd != HIDIOCGUSAGES &&
444 uref->report_type == HID_REPORT_TYPE_INPUT)
445 goto inval;
446
447 if (uref->report_id == HID_REPORT_ID_UNKNOWN) {
448 field = hiddev_lookup_usage(hid, uref);
449 if (field == NULL)
450 goto inval;
451 } else {
452 rinfo.report_type = uref->report_type;
453 rinfo.report_id = uref->report_id;
454 if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
455 goto inval;
456
457 if (uref->field_index >= report->maxfield)
458 goto inval;
459
460 field = report->field[uref->field_index];
461
462 if (cmd == HIDIOCGCOLLECTIONINDEX) {
463 if (uref->usage_index >= field->maxusage)
464 goto inval;
465 } else if (uref->usage_index >= field->report_count)
466 goto inval;
467
468 else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
469 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
470 uref->usage_index + uref_multi->num_values > field->report_count))
471 goto inval;
472 }
473
474 switch (cmd) {
475 case HIDIOCGUSAGE:
476 uref->value = field->value[uref->usage_index];
477 if (copy_to_user(user_arg, uref, sizeof(*uref)))
478 goto fault;
479 goto goodreturn;
480
481 case HIDIOCSUSAGE:
482 field->value[uref->usage_index] = uref->value;
483 goto goodreturn;
484
485 case HIDIOCGCOLLECTIONINDEX:
486 kfree(uref_multi);
487 return field->usage[uref->usage_index].collection_index;
488 case HIDIOCGUSAGES:
489 for (i = 0; i < uref_multi->num_values; i++)
490 uref_multi->values[i] =
491 field->value[uref->usage_index + i];
492 if (copy_to_user(user_arg, uref_multi,
493 sizeof(*uref_multi)))
494 goto fault;
495 goto goodreturn;
496 case HIDIOCSUSAGES:
497 for (i = 0; i < uref_multi->num_values; i++)
498 field->value[uref->usage_index + i] =
499 uref_multi->values[i];
500 goto goodreturn;
501 }
502
503goodreturn:
504 kfree(uref_multi);
505 return 0;
506fault:
507 kfree(uref_multi);
508 return -EFAULT;
509inval:
510 kfree(uref_multi);
511 return -EINVAL;
512 }
513}
514
515static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd, void __user *user_arg)
516{
517 struct hid_device *hid = hiddev->hid;
518 struct usb_device *dev = hid_to_usb_dev(hid);
519 int idx, len;
520 char *buf;
521
522 if (get_user(idx, (int __user *)user_arg))
523 return -EFAULT;
524
525 if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL)
526 return -ENOMEM;
527
528 if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) {
529 kfree(buf);
530 return -EINVAL;
531 }
532
533 if (copy_to_user(user_arg+sizeof(int), buf, len+1)) {
534 kfree(buf);
535 return -EFAULT;
536 }
537
538 kfree(buf);
539
540 return len;
541}
542
396static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 543static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
397{ 544{
398 struct hiddev_list *list = file->private_data; 545 struct hiddev_list *list = file->private_data;
@@ -402,8 +549,6 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
402 struct hiddev_collection_info cinfo; 549 struct hiddev_collection_info cinfo;
403 struct hiddev_report_info rinfo; 550 struct hiddev_report_info rinfo;
404 struct hiddev_field_info finfo; 551 struct hiddev_field_info finfo;
405 struct hiddev_usage_ref_multi *uref_multi = NULL;
406 struct hiddev_usage_ref *uref;
407 struct hiddev_devinfo dinfo; 552 struct hiddev_devinfo dinfo;
408 struct hid_report *report; 553 struct hid_report *report;
409 struct hid_field *field; 554 struct hid_field *field;
@@ -470,30 +615,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
470 } 615 }
471 616
472 case HIDIOCGSTRING: 617 case HIDIOCGSTRING:
473 { 618 return hiddev_ioctl_string(hiddev, cmd, user_arg);
474 int idx, len;
475 char *buf;
476
477 if (get_user(idx, (int __user *)arg))
478 return -EFAULT;
479
480 if ((buf = kmalloc(HID_STRING_SIZE, GFP_KERNEL)) == NULL)
481 return -ENOMEM;
482
483 if ((len = usb_string(dev, idx, buf, HID_STRING_SIZE-1)) < 0) {
484 kfree(buf);
485 return -EINVAL;
486 }
487
488 if (copy_to_user(user_arg+sizeof(int), buf, len+1)) {
489 kfree(buf);
490 return -EFAULT;
491 }
492
493 kfree(buf);
494
495 return len;
496 }
497 619
498 case HIDIOCINITREPORT: 620 case HIDIOCINITREPORT:
499 usbhid_init_reports(hid); 621 usbhid_init_reports(hid);
@@ -578,121 +700,13 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
578 return 0; 700 return 0;
579 701
580 case HIDIOCGUCODE: 702 case HIDIOCGUCODE:
581 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); 703 /* fall through */
582 if (!uref_multi)
583 return -ENOMEM;
584 uref = &uref_multi->uref;
585 if (copy_from_user(uref, user_arg, sizeof(*uref)))
586 goto fault;
587
588 rinfo.report_type = uref->report_type;
589 rinfo.report_id = uref->report_id;
590 if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
591 goto inval;
592
593 if (uref->field_index >= report->maxfield)
594 goto inval;
595
596 field = report->field[uref->field_index];
597 if (uref->usage_index >= field->maxusage)
598 goto inval;
599
600 uref->usage_code = field->usage[uref->usage_index].hid;
601
602 if (copy_to_user(user_arg, uref, sizeof(*uref)))
603 goto fault;
604
605 kfree(uref_multi);
606 return 0;
607
608 case HIDIOCGUSAGE: 704 case HIDIOCGUSAGE:
609 case HIDIOCSUSAGE: 705 case HIDIOCSUSAGE:
610 case HIDIOCGUSAGES: 706 case HIDIOCGUSAGES:
611 case HIDIOCSUSAGES: 707 case HIDIOCSUSAGES:
612 case HIDIOCGCOLLECTIONINDEX: 708 case HIDIOCGCOLLECTIONINDEX:
613 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); 709 return hiddev_ioctl_usage(hiddev, cmd, user_arg);
614 if (!uref_multi)
615 return -ENOMEM;
616 uref = &uref_multi->uref;
617 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
618 if (copy_from_user(uref_multi, user_arg,
619 sizeof(*uref_multi)))
620 goto fault;
621 } else {
622 if (copy_from_user(uref, user_arg, sizeof(*uref)))
623 goto fault;
624 }
625
626 if (cmd != HIDIOCGUSAGE &&
627 cmd != HIDIOCGUSAGES &&
628 uref->report_type == HID_REPORT_TYPE_INPUT)
629 goto inval;
630
631 if (uref->report_id == HID_REPORT_ID_UNKNOWN) {
632 field = hiddev_lookup_usage(hid, uref);
633 if (field == NULL)
634 goto inval;
635 } else {
636 rinfo.report_type = uref->report_type;
637 rinfo.report_id = uref->report_id;
638 if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
639 goto inval;
640
641 if (uref->field_index >= report->maxfield)
642 goto inval;
643
644 field = report->field[uref->field_index];
645
646 if (cmd == HIDIOCGCOLLECTIONINDEX) {
647 if (uref->usage_index >= field->maxusage)
648 goto inval;
649 } else if (uref->usage_index >= field->report_count)
650 goto inval;
651
652 else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
653 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
654 uref->usage_index + uref_multi->num_values > field->report_count))
655 goto inval;
656 }
657
658 switch (cmd) {
659 case HIDIOCGUSAGE:
660 uref->value = field->value[uref->usage_index];
661 if (copy_to_user(user_arg, uref, sizeof(*uref)))
662 goto fault;
663 goto goodreturn;
664
665 case HIDIOCSUSAGE:
666 field->value[uref->usage_index] = uref->value;
667 goto goodreturn;
668
669 case HIDIOCGCOLLECTIONINDEX:
670 kfree(uref_multi);
671 return field->usage[uref->usage_index].collection_index;
672 case HIDIOCGUSAGES:
673 for (i = 0; i < uref_multi->num_values; i++)
674 uref_multi->values[i] =
675 field->value[uref->usage_index + i];
676 if (copy_to_user(user_arg, uref_multi,
677 sizeof(*uref_multi)))
678 goto fault;
679 goto goodreturn;
680 case HIDIOCSUSAGES:
681 for (i = 0; i < uref_multi->num_values; i++)
682 field->value[uref->usage_index + i] =
683 uref_multi->values[i];
684 goto goodreturn;
685 }
686
687goodreturn:
688 kfree(uref_multi);
689 return 0;
690fault:
691 kfree(uref_multi);
692 return -EFAULT;
693inval:
694 kfree(uref_multi);
695 return -EINVAL;
696 710
697 case HIDIOCGCOLLECTIONINFO: 711 case HIDIOCGCOLLECTIONINFO:
698 if (copy_from_user(&cinfo, user_arg, sizeof(cinfo))) 712 if (copy_from_user(&cinfo, user_arg, sizeof(cinfo)))
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 0023f96d4294..62d2d7c925bd 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/wait.h>
31#include <linux/workqueue.h> 32#include <linux/workqueue.h>
32#include <linux/input.h> 33#include <linux/input.h>
33 34
@@ -77,7 +78,7 @@ struct usbhid_device {
77 unsigned long stop_retry; /* Time to give up, in jiffies */ 78 unsigned long stop_retry; /* Time to give up, in jiffies */
78 unsigned int retry_delay; /* Delay length in ms */ 79 unsigned int retry_delay; /* Delay length in ms */
79 struct work_struct reset_work; /* Task context for resets */ 80 struct work_struct reset_work; /* Task context for resets */
80 81 wait_queue_head_t wait; /* For sleeping */
81}; 82};
82 83
83#define hid_to_usb_dev(hid_dev) \ 84#define hid_to_usb_dev(hid_dev) \
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 014dfa575be7..7137a17402fe 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -1,45 +1,16 @@
1# 1#
2# Character device configuration 2# I2C algorithm drivers configuration
3# 3#
4 4
5menu "I2C Algorithms"
6
7config I2C_ALGOBIT 5config I2C_ALGOBIT
8 tristate "I2C bit-banging interfaces" 6 tristate
9 help
10 This allows you to use a range of I2C adapters called bit-banging
11 adapters. Say Y if you own an I2C adapter belonging to this class
12 and then say Y to the specific driver for you adapter below.
13
14 This support is also available as a module. If so, the module
15 will be called i2c-algo-bit.
16 7
17config I2C_ALGOPCF 8config I2C_ALGOPCF
18 tristate "I2C PCF 8584 interfaces" 9 tristate
19 help
20 This allows you to use a range of I2C adapters called PCF adapters.
21 Say Y if you own an I2C adapter belonging to this class and then say
22 Y to the specific driver for you adapter below.
23
24 This support is also available as a module. If so, the module
25 will be called i2c-algo-pcf.
26 10
27config I2C_ALGOPCA 11config I2C_ALGOPCA
28 tristate "I2C PCA 9564 interfaces" 12 tristate
29 help
30 This allows you to use a range of I2C adapters called PCA adapters.
31 Say Y if you own an I2C adapter belonging to this class and then say
32 Y to the specific driver for you adapter below.
33
34 This support is also available as a module. If so, the module
35 will be called i2c-algo-pca.
36 13
37config I2C_ALGO_SGI 14config I2C_ALGO_SGI
38 tristate "I2C SGI interfaces" 15 tristate
39 depends on SGI_IP22 || SGI_IP32 || X86_VISWS 16 depends on SGI_IP22 || SGI_IP32 || X86_VISWS
40 help
41 Supports the SGI interfaces like the ones found on SGI Indy VINO
42 or SGI O2 MACE.
43
44endmenu
45
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 2a16211f12e5..e954a20b97a6 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters 2 * i2c-algo-pca.c i2c driver algorithms for PCA9564 adapters
3 * Copyright (C) 2004 Arcom Control Systems 3 * Copyright (C) 2004 Arcom Control Systems
4 * Copyright (C) 2008 Pengutronix
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -21,14 +22,10 @@
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/slab.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/i2c-algo-pca.h> 28#include <linux/i2c-algo-pca.h>
29#include "i2c-algo-pca.h"
30
31#define DRIVER "i2c-algo-pca"
32 29
33#define DEB1(fmt, args...) do { if (i2c_debug>=1) printk(fmt, ## args); } while(0) 30#define DEB1(fmt, args...) do { if (i2c_debug>=1) printk(fmt, ## args); } while(0)
34#define DEB2(fmt, args...) do { if (i2c_debug>=2) printk(fmt, ## args); } while(0) 31#define DEB2(fmt, args...) do { if (i2c_debug>=2) printk(fmt, ## args); } while(0)
@@ -36,15 +33,15 @@
36 33
37static int i2c_debug; 34static int i2c_debug;
38 35
39#define pca_outw(adap, reg, val) adap->write_byte(adap, reg, val) 36#define pca_outw(adap, reg, val) adap->write_byte(adap->data, reg, val)
40#define pca_inw(adap, reg) adap->read_byte(adap, reg) 37#define pca_inw(adap, reg) adap->read_byte(adap->data, reg)
41 38
42#define pca_status(adap) pca_inw(adap, I2C_PCA_STA) 39#define pca_status(adap) pca_inw(adap, I2C_PCA_STA)
43#define pca_clock(adap) adap->get_clock(adap) 40#define pca_clock(adap) adap->i2c_clock
44#define pca_own(adap) adap->get_own(adap)
45#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val) 41#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val)
46#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON) 42#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON)
47#define pca_wait(adap) adap->wait_for_interrupt(adap) 43#define pca_wait(adap) adap->wait_for_completion(adap->data)
44#define pca_reset(adap) adap->reset_chip(adap->data)
48 45
49/* 46/*
50 * Generate a start condition on the i2c bus. 47 * Generate a start condition on the i2c bus.
@@ -99,7 +96,7 @@ static void pca_stop(struct i2c_algo_pca_data *adap)
99 * 96 *
100 * returns after the address has been sent 97 * returns after the address has been sent
101 */ 98 */
102static void pca_address(struct i2c_algo_pca_data *adap, 99static void pca_address(struct i2c_algo_pca_data *adap,
103 struct i2c_msg *msg) 100 struct i2c_msg *msg)
104{ 101{
105 int sta = pca_get_con(adap); 102 int sta = pca_get_con(adap);
@@ -108,9 +105,9 @@ static void pca_address(struct i2c_algo_pca_data *adap,
108 addr = ( (0x7f & msg->addr) << 1 ); 105 addr = ( (0x7f & msg->addr) << 1 );
109 if (msg->flags & I2C_M_RD ) 106 if (msg->flags & I2C_M_RD )
110 addr |= 1; 107 addr |= 1;
111 DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n", 108 DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
112 msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr); 109 msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
113 110
114 pca_outw(adap, I2C_PCA_DAT, addr); 111 pca_outw(adap, I2C_PCA_DAT, addr);
115 112
116 sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI); 113 sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI);
@@ -124,7 +121,7 @@ static void pca_address(struct i2c_algo_pca_data *adap,
124 * 121 *
125 * Returns after the byte has been transmitted 122 * Returns after the byte has been transmitted
126 */ 123 */
127static void pca_tx_byte(struct i2c_algo_pca_data *adap, 124static void pca_tx_byte(struct i2c_algo_pca_data *adap,
128 __u8 b) 125 __u8 b)
129{ 126{
130 int sta = pca_get_con(adap); 127 int sta = pca_get_con(adap);
@@ -142,19 +139,19 @@ static void pca_tx_byte(struct i2c_algo_pca_data *adap,
142 * 139 *
143 * returns immediately. 140 * returns immediately.
144 */ 141 */
145static void pca_rx_byte(struct i2c_algo_pca_data *adap, 142static void pca_rx_byte(struct i2c_algo_pca_data *adap,
146 __u8 *b, int ack) 143 __u8 *b, int ack)
147{ 144{
148 *b = pca_inw(adap, I2C_PCA_DAT); 145 *b = pca_inw(adap, I2C_PCA_DAT);
149 DEB2("=== READ %#04x %s\n", *b, ack ? "ACK" : "NACK"); 146 DEB2("=== READ %#04x %s\n", *b, ack ? "ACK" : "NACK");
150} 147}
151 148
152/* 149/*
153 * Setup ACK or NACK for next received byte and wait for it to arrive. 150 * Setup ACK or NACK for next received byte and wait for it to arrive.
154 * 151 *
155 * Returns after next byte has arrived. 152 * Returns after next byte has arrived.
156 */ 153 */
157static void pca_rx_ack(struct i2c_algo_pca_data *adap, 154static void pca_rx_ack(struct i2c_algo_pca_data *adap,
158 int ack) 155 int ack)
159{ 156{
160 int sta = pca_get_con(adap); 157 int sta = pca_get_con(adap);
@@ -168,15 +165,6 @@ static void pca_rx_ack(struct i2c_algo_pca_data *adap,
168 pca_wait(adap); 165 pca_wait(adap);
169} 166}
170 167
171/*
172 * Reset the i2c bus / SIO
173 */
174static void pca_reset(struct i2c_algo_pca_data *adap)
175{
176 /* apparently only an external reset will do it. not a lot can be done */
177 printk(KERN_ERR DRIVER ": Haven't figured out how to do a reset yet\n");
178}
179
180static int pca_xfer(struct i2c_adapter *i2c_adap, 168static int pca_xfer(struct i2c_adapter *i2c_adap,
181 struct i2c_msg *msgs, 169 struct i2c_msg *msgs,
182 int num) 170 int num)
@@ -187,7 +175,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
187 int numbytes = 0; 175 int numbytes = 0;
188 int state; 176 int state;
189 int ret; 177 int ret;
190 int timeout = 100; 178 int timeout = i2c_adap->timeout;
191 179
192 while ((state = pca_status(adap)) != 0xf8 && timeout--) { 180 while ((state = pca_status(adap)) != 0xf8 && timeout--) {
193 msleep(10); 181 msleep(10);
@@ -203,14 +191,14 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
203 for (curmsg = 0; curmsg < num; curmsg++) { 191 for (curmsg = 0; curmsg < num; curmsg++) {
204 int addr, i; 192 int addr, i;
205 msg = &msgs[curmsg]; 193 msg = &msgs[curmsg];
206 194
207 addr = (0x7f & msg->addr) ; 195 addr = (0x7f & msg->addr) ;
208 196
209 if (msg->flags & I2C_M_RD ) 197 if (msg->flags & I2C_M_RD )
210 printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n", 198 printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
211 curmsg, msg->len, addr, (addr<<1) | 1); 199 curmsg, msg->len, addr, (addr<<1) | 1);
212 else { 200 else {
213 printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s", 201 printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s",
214 curmsg, msg->len, addr, addr<<1, 202 curmsg, msg->len, addr, addr<<1,
215 msg->len == 0 ? "" : ", "); 203 msg->len == 0 ? "" : ", ");
216 for(i=0; i < msg->len; i++) 204 for(i=0; i < msg->len; i++)
@@ -237,7 +225,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
237 case 0x10: /* A repeated start condition has been transmitted */ 225 case 0x10: /* A repeated start condition has been transmitted */
238 pca_address(adap, msg); 226 pca_address(adap, msg);
239 break; 227 break;
240 228
241 case 0x18: /* SLA+W has been transmitted; ACK has been received */ 229 case 0x18: /* SLA+W has been transmitted; ACK has been received */
242 case 0x28: /* Data byte in I2CDAT has been transmitted; ACK has been received */ 230 case 0x28: /* Data byte in I2CDAT has been transmitted; ACK has been received */
243 if (numbytes < msg->len) { 231 if (numbytes < msg->len) {
@@ -287,7 +275,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
287 case 0x38: /* Arbitration lost during SLA+W, SLA+R or data bytes */ 275 case 0x38: /* Arbitration lost during SLA+W, SLA+R or data bytes */
288 DEB2("Arbitration lost\n"); 276 DEB2("Arbitration lost\n");
289 goto out; 277 goto out;
290 278
291 case 0x58: /* Data byte has been received; NOT ACK has been returned */ 279 case 0x58: /* Data byte has been received; NOT ACK has been returned */
292 if ( numbytes == msg->len - 1 ) { 280 if ( numbytes == msg->len - 1 ) {
293 pca_rx_byte(adap, &msg->buf[numbytes], 0); 281 pca_rx_byte(adap, &msg->buf[numbytes], 0);
@@ -317,16 +305,16 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
317 pca_reset(adap); 305 pca_reset(adap);
318 goto out; 306 goto out;
319 default: 307 default:
320 printk(KERN_ERR DRIVER ": unhandled SIO state 0x%02x\n", state); 308 dev_err(&i2c_adap->dev, "unhandled SIO state 0x%02x\n", state);
321 break; 309 break;
322 } 310 }
323 311
324 } 312 }
325 313
326 ret = curmsg; 314 ret = curmsg;
327 out: 315 out:
328 DEB1(KERN_CRIT "}}} transfered %d/%d messages. " 316 DEB1(KERN_CRIT "}}} transfered %d/%d messages. "
329 "status is %#04x. control is %#04x\n", 317 "status is %#04x. control is %#04x\n",
330 curmsg, num, pca_status(adap), 318 curmsg, num, pca_status(adap),
331 pca_get_con(adap)); 319 pca_get_con(adap));
332 return ret; 320 return ret;
@@ -337,53 +325,65 @@ static u32 pca_func(struct i2c_adapter *adap)
337 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 325 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
338} 326}
339 327
340static int pca_init(struct i2c_algo_pca_data *adap) 328static const struct i2c_algorithm pca_algo = {
329 .master_xfer = pca_xfer,
330 .functionality = pca_func,
331};
332
333static int pca_init(struct i2c_adapter *adap)
341{ 334{
342 static int freqs[] = {330,288,217,146,88,59,44,36}; 335 static int freqs[] = {330,288,217,146,88,59,44,36};
343 int own, clock; 336 int clock;
337 struct i2c_algo_pca_data *pca_data = adap->algo_data;
338
339 if (pca_data->i2c_clock > 7) {
340 printk(KERN_WARNING "%s: Invalid I2C clock speed selected. Trying default.\n",
341 adap->name);
342 pca_data->i2c_clock = I2C_PCA_CON_59kHz;
343 }
344
345 adap->algo = &pca_algo;
344 346
345 own = pca_own(adap); 347 pca_reset(pca_data);
346 clock = pca_clock(adap);
347 DEB1(KERN_INFO DRIVER ": own address is %#04x\n", own);
348 DEB1(KERN_INFO DRIVER ": clock freqeuncy is %dkHz\n", freqs[clock]);
349 348
350 pca_outw(adap, I2C_PCA_ADR, own << 1); 349 clock = pca_clock(pca_data);
350 DEB1(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]);
351 351
352 pca_set_con(adap, I2C_PCA_CON_ENSIO | clock); 352 pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
353 udelay(500); /* 500 µs for oscilator to stabilise */ 353 udelay(500); /* 500 us for oscilator to stabilise */
354 354
355 return 0; 355 return 0;
356} 356}
357 357
358static const struct i2c_algorithm pca_algo = { 358/*
359 .master_xfer = pca_xfer, 359 * registering functions to load algorithms at runtime
360 .functionality = pca_func,
361};
362
363/*
364 * registering functions to load algorithms at runtime
365 */ 360 */
366int i2c_pca_add_bus(struct i2c_adapter *adap) 361int i2c_pca_add_bus(struct i2c_adapter *adap)
367{ 362{
368 struct i2c_algo_pca_data *pca_adap = adap->algo_data;
369 int rval; 363 int rval;
370 364
371 /* register new adapter to i2c module... */ 365 rval = pca_init(adap);
372 adap->algo = &pca_algo; 366 if (rval)
367 return rval;
373 368
374 adap->timeout = 100; /* default values, should */ 369 return i2c_add_adapter(adap);
375 adap->retries = 3; /* be replaced by defines */ 370}
371EXPORT_SYMBOL(i2c_pca_add_bus);
376 372
377 if ((rval = pca_init(pca_adap))) 373int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
378 return rval; 374{
375 int rval;
379 376
380 rval = i2c_add_adapter(adap); 377 rval = pca_init(adap);
378 if (rval)
379 return rval;
381 380
382 return rval; 381 return i2c_add_numbered_adapter(adap);
383} 382}
384EXPORT_SYMBOL(i2c_pca_add_bus); 383EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
385 384
386MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>"); 385MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
386 "Wolfram Sang <w.sang@pengutronix.de>");
387MODULE_DESCRIPTION("I2C-Bus PCA9564 algorithm"); 387MODULE_DESCRIPTION("I2C-Bus PCA9564 algorithm");
388MODULE_LICENSE("GPL"); 388MODULE_LICENSE("GPL");
389 389
diff --git a/drivers/i2c/algos/i2c-algo-pca.h b/drivers/i2c/algos/i2c-algo-pca.h
deleted file mode 100644
index 2fee07e05211..000000000000
--- a/drivers/i2c/algos/i2c-algo-pca.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef I2C_PCA9564_H
2#define I2C_PCA9564_H 1
3
4#define I2C_PCA_STA 0x00 /* STATUS Read Only */
5#define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */
6#define I2C_PCA_DAT 0x01 /* DATA Read/Write */
7#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */
8#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */
9
10#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */
11#define I2C_PCA_CON_ENSIO 0x40 /* Enable */
12#define I2C_PCA_CON_STA 0x20 /* Start */
13#define I2C_PCA_CON_STO 0x10 /* Stop */
14#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
15#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
16
17#define I2C_PCA_CON_330kHz 0x00
18#define I2C_PCA_CON_288kHz 0x01
19#define I2C_PCA_CON_217kHz 0x02
20#define I2C_PCA_CON_146kHz 0x03
21#define I2C_PCA_CON_88kHz 0x04
22#define I2C_PCA_CON_59kHz 0x05
23#define I2C_PCA_CON_44kHz 0x06
24#define I2C_PCA_CON_36kHz 0x07
25
26#endif /* I2C_PCA9564_H */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index b04c99580d0d..48438cc5d0ca 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -100,9 +100,12 @@ config I2C_AU1550
100 100
101config I2C_BLACKFIN_TWI 101config I2C_BLACKFIN_TWI
102 tristate "Blackfin TWI I2C support" 102 tristate "Blackfin TWI I2C support"
103 depends on BF534 || BF536 || BF537 103 depends on BLACKFIN
104 help 104 help
105 This is the TWI I2C device driver for Blackfin 534/536/537/54x. 105 This is the TWI I2C device driver for Blackfin BF522, BF525,
106 BF527, BF534, BF536, BF537 and BF54x. For other Blackfin processors,
107 please don't use this driver.
108
106 This driver can also be built as a module. If so, the module 109 This driver can also be built as a module. If so, the module
107 will be called i2c-bfin-twi. 110 will be called i2c-bfin-twi.
108 111
@@ -135,7 +138,7 @@ config I2C_ELEKTOR
135 This supports the PCF8584 ISA bus I2C adapter. Say Y if you own 138 This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
136 such an adapter. 139 such an adapter.
137 140
138 This support is also available as a module. If so, the module 141 This support is also available as a module. If so, the module
139 will be called i2c-elektor. 142 will be called i2c-elektor.
140 143
141config I2C_GPIO 144config I2C_GPIO
@@ -190,7 +193,7 @@ config I2C_I810
190 select I2C_ALGOBIT 193 select I2C_ALGOBIT
191 help 194 help
192 If you say yes to this option, support will be included for the Intel 195 If you say yes to this option, support will be included for the Intel
193 810/815 family of mainboard I2C interfaces. Specifically, the 196 810/815 family of mainboard I2C interfaces. Specifically, the
194 following versions of the chipset are supported: 197 following versions of the chipset are supported:
195 i810AA 198 i810AA
196 i810AB 199 i810AB
@@ -246,10 +249,10 @@ config I2C_PIIX4
246 249
247config I2C_IBM_IIC 250config I2C_IBM_IIC
248 tristate "IBM PPC 4xx on-chip I2C interface" 251 tristate "IBM PPC 4xx on-chip I2C interface"
249 depends on IBM_OCP 252 depends on 4xx
250 help 253 help
251 Say Y here if you want to use IIC peripheral found on 254 Say Y here if you want to use IIC peripheral found on
252 embedded IBM PPC 4xx based systems. 255 embedded IBM PPC 4xx based systems.
253 256
254 This driver can also be built as a module. If so, the module 257 This driver can also be built as a module. If so, the module
255 will be called i2c-ibm_iic. 258 will be called i2c-ibm_iic.
@@ -269,7 +272,7 @@ config I2C_IXP2000
269 depends on ARCH_IXP2000 272 depends on ARCH_IXP2000
270 select I2C_ALGOBIT 273 select I2C_ALGOBIT
271 help 274 help
272 Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based 275 Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
273 system and are using GPIO lines for an I2C bus. 276 system and are using GPIO lines for an I2C bus.
274 277
275 This support is also available as a module. If so, the module 278 This support is also available as a module. If so, the module
@@ -354,7 +357,7 @@ config I2C_PARPORT
354 on the parport driver. This is meant for embedded systems. Don't say 357 on the parport driver. This is meant for embedded systems. Don't say
355 Y here if you intend to say Y or M there. 358 Y here if you intend to say Y or M there.
356 359
357 This support is also available as a module. If so, the module 360 This support is also available as a module. If so, the module
358 will be called i2c-parport. 361 will be called i2c-parport.
359 362
360config I2C_PARPORT_LIGHT 363config I2C_PARPORT_LIGHT
@@ -372,12 +375,12 @@ config I2C_PARPORT_LIGHT
372 the clean but heavy parport handling is not an option. The 375 the clean but heavy parport handling is not an option. The
373 drawback is a reduced portability and the impossibility to 376 drawback is a reduced portability and the impossibility to
374 daisy-chain other parallel port devices. 377 daisy-chain other parallel port devices.
375 378
376 Don't say Y here if you said Y or M to i2c-parport. Saying M to 379 Don't say Y here if you said Y or M to i2c-parport. Saying M to
377 both is possible but both modules should not be loaded at the same 380 both is possible but both modules should not be loaded at the same
378 time. 381 time.
379 382
380 This support is also available as a module. If so, the module 383 This support is also available as a module. If so, the module
381 will be called i2c-parport-light. 384 will be called i2c-parport-light.
382 385
383config I2C_PASEMI 386config I2C_PASEMI
@@ -401,7 +404,7 @@ config I2C_PROSAVAGE
401 404
402 This driver is deprecated in favor of the savagefb driver. 405 This driver is deprecated in favor of the savagefb driver.
403 406
404 This support is also available as a module. If so, the module 407 This support is also available as a module. If so, the module
405 will be called i2c-prosavage. 408 will be called i2c-prosavage.
406 409
407config I2C_S3C2410 410config I2C_S3C2410
@@ -417,7 +420,7 @@ config I2C_SAVAGE4
417 depends on PCI 420 depends on PCI
418 select I2C_ALGOBIT 421 select I2C_ALGOBIT
419 help 422 help
420 If you say yes to this option, support will be included for the 423 If you say yes to this option, support will be included for the
421 S3 Savage 4 I2C interface. 424 S3 Savage 4 I2C interface.
422 425
423 This driver is deprecated in favor of the savagefb driver. 426 This driver is deprecated in favor of the savagefb driver.
@@ -452,7 +455,7 @@ config SCx200_I2C
452 455
453 If you don't know what to do here, say N. 456 If you don't know what to do here, say N.
454 457
455 This support is also available as a module. If so, the module 458 This support is also available as a module. If so, the module
456 will be called scx200_i2c. 459 will be called scx200_i2c.
457 460
458 This driver is deprecated and will be dropped soon. Use i2c-gpio 461 This driver is deprecated and will be dropped soon. Use i2c-gpio
@@ -483,14 +486,14 @@ config SCx200_ACB
483 486
484 If you don't know what to do here, say N. 487 If you don't know what to do here, say N.
485 488
486 This support is also available as a module. If so, the module 489 This support is also available as a module. If so, the module
487 will be called scx200_acb. 490 will be called scx200_acb.
488 491
489config I2C_SIS5595 492config I2C_SIS5595
490 tristate "SiS 5595" 493 tristate "SiS 5595"
491 depends on PCI 494 depends on PCI
492 help 495 help
493 If you say yes to this option, support will be included for the 496 If you say yes to this option, support will be included for the
494 SiS5595 SMBus (a subset of I2C) interface. 497 SiS5595 SMBus (a subset of I2C) interface.
495 498
496 This driver can also be built as a module. If so, the module 499 This driver can also be built as a module. If so, the module
@@ -500,7 +503,7 @@ config I2C_SIS630
500 tristate "SiS 630/730" 503 tristate "SiS 630/730"
501 depends on PCI 504 depends on PCI
502 help 505 help
503 If you say yes to this option, support will be included for the 506 If you say yes to this option, support will be included for the
504 SiS630 and SiS730 SMBus (a subset of I2C) interface. 507 SiS630 and SiS730 SMBus (a subset of I2C) interface.
505 508
506 This driver can also be built as a module. If so, the module 509 This driver can also be built as a module. If so, the module
@@ -632,9 +635,9 @@ config I2C_PCA_ISA
632 select I2C_ALGOPCA 635 select I2C_ALGOPCA
633 default n 636 default n
634 help 637 help
635 This driver supports ISA boards using the Philips PCA 9564 638 This driver supports ISA boards using the Philips PCA9564
636 Parallel bus to I2C bus controller 639 parallel bus to I2C bus controller.
637 640
638 This driver can also be built as a module. If so, the module 641 This driver can also be built as a module. If so, the module
639 will be called i2c-pca-isa. 642 will be called i2c-pca-isa.
640 643
@@ -643,6 +646,17 @@ config I2C_PCA_ISA
643 delays when I2C/SMBus chip drivers are loaded (e.g. at boot 646 delays when I2C/SMBus chip drivers are loaded (e.g. at boot
644 time). If unsure, say N. 647 time). If unsure, say N.
645 648
649config I2C_PCA_PLATFORM
650 tristate "PCA9564 as platform device"
651 select I2C_ALGOPCA
652 default n
653 help
654 This driver supports a memory mapped Philips PCA9564
655 parallel bus to I2C bus controller.
656
657 This driver can also be built as a module. If so, the module
658 will be called i2c-pca-platform.
659
646config I2C_MV64XXX 660config I2C_MV64XXX
647 tristate "Marvell mv64xxx I2C Controller" 661 tristate "Marvell mv64xxx I2C Controller"
648 depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL 662 depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL
@@ -672,4 +686,23 @@ config I2C_PMCMSP
672 This driver can also be built as module. If so, the module 686 This driver can also be built as module. If so, the module
673 will be called i2c-pmcmsp. 687 will be called i2c-pmcmsp.
674 688
689config I2C_SH7760
690 tristate "Renesas SH7760 I2C Controller"
691 depends on CPU_SUBTYPE_SH7760
692 help
693 This driver supports the 2 I2C interfaces on the Renesas SH7760.
694
695 This driver can also be built as a module. If so, the module
696 will be called i2c-sh7760.
697
698config I2C_SH_MOBILE
699 tristate "SuperH Mobile I2C Controller"
700 depends on SUPERH
701 help
702 If you say yes to this option, support will be included for the
703 built-in I2C interface on the Renesas SH-Mobile processor.
704
705 This driver can also be built as a module. If so, the module
706 will be called i2c-sh_mobile.
707
675endmenu 708endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index ea7068f1eb6b..e8c882a5ea66 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
30obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o 30obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
31obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o 31obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
32obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o 32obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
33obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
33obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o 34obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
34obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o 35obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
35obj-$(CONFIG_I2C_PNX) += i2c-pnx.o 36obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
@@ -37,6 +38,8 @@ obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o
37obj-$(CONFIG_I2C_PXA) += i2c-pxa.o 38obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
38obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o 39obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
39obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o 40obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o
41obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
42obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
40obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o 43obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
41obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o 44obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
42obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o 45obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c09b036913bd..73d61946a534 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -298,7 +298,7 @@ static int at91_i2c_resume(struct platform_device *pdev)
298#endif 298#endif
299 299
300/* work with "modprobe at91_i2c" from hotplugging or coldplugging */ 300/* work with "modprobe at91_i2c" from hotplugging or coldplugging */
301MODULE_ALIAS("at91_i2c"); 301MODULE_ALIAS("platform:at91_i2c");
302 302
303static struct platform_driver at91_i2c_driver = { 303static struct platform_driver at91_i2c_driver = {
304 .probe = at91_i2c_probe, 304 .probe = at91_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 1953b26da56a..491718fe46b7 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -472,6 +472,7 @@ i2c_au1550_exit(void)
472MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC."); 472MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC.");
473MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550"); 473MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550");
474MODULE_LICENSE("GPL"); 474MODULE_LICENSE("GPL");
475MODULE_ALIAS("platform:au1xpsc_smbus");
475 476
476module_init (i2c_au1550_init); 477module_init (i2c_au1550_init);
477module_exit (i2c_au1550_exit); 478module_exit (i2c_au1550_exit);
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 7dbdaeb707a9..48d084bdf7c8 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -1,25 +1,11 @@
1/* 1/*
2 * drivers/i2c/busses/i2c-bfin-twi.c 2 * Blackfin On-Chip Two Wire Interface Driver
3 * 3 *
4 * Description: Driver for Blackfin Two Wire Interface 4 * Copyright 2005-2007 Analog Devices Inc.
5 * 5 *
6 * Author: sonicz <sonic.zhang@analog.com> 6 * Enter bugs at http://blackfin.uclinux.org/
7 * 7 *
8 * Copyright (c) 2005-2007 Analog Devices, Inc. 8 * Licensed under the GPL-2 or later.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 9 */
24 10
25#include <linux/module.h> 11#include <linux/module.h>
@@ -34,14 +20,16 @@
34#include <linux/platform_device.h> 20#include <linux/platform_device.h>
35 21
36#include <asm/blackfin.h> 22#include <asm/blackfin.h>
23#include <asm/portmux.h>
37#include <asm/irq.h> 24#include <asm/irq.h>
38 25
39#define POLL_TIMEOUT (2 * HZ) 26#define POLL_TIMEOUT (2 * HZ)
40 27
41/* SMBus mode*/ 28/* SMBus mode*/
42#define TWI_I2C_MODE_STANDARD 0x01 29#define TWI_I2C_MODE_STANDARD 1
43#define TWI_I2C_MODE_STANDARDSUB 0x02 30#define TWI_I2C_MODE_STANDARDSUB 2
44#define TWI_I2C_MODE_COMBINED 0x04 31#define TWI_I2C_MODE_COMBINED 3
32#define TWI_I2C_MODE_REPEAT 4
45 33
46struct bfin_twi_iface { 34struct bfin_twi_iface {
47 int irq; 35 int irq;
@@ -58,39 +46,74 @@ struct bfin_twi_iface {
58 struct timer_list timeout_timer; 46 struct timer_list timeout_timer;
59 struct i2c_adapter adap; 47 struct i2c_adapter adap;
60 struct completion complete; 48 struct completion complete;
49 struct i2c_msg *pmsg;
50 int msg_num;
51 int cur_msg;
52 void __iomem *regs_base;
61}; 53};
62 54
63static struct bfin_twi_iface twi_iface; 55
56#define DEFINE_TWI_REG(reg, off) \
57static inline u16 read_##reg(struct bfin_twi_iface *iface) \
58 { return bfin_read16(iface->regs_base + (off)); } \
59static inline void write_##reg(struct bfin_twi_iface *iface, u16 v) \
60 { bfin_write16(iface->regs_base + (off), v); }
61
62DEFINE_TWI_REG(CLKDIV, 0x00)
63DEFINE_TWI_REG(CONTROL, 0x04)
64DEFINE_TWI_REG(SLAVE_CTL, 0x08)
65DEFINE_TWI_REG(SLAVE_STAT, 0x0C)
66DEFINE_TWI_REG(SLAVE_ADDR, 0x10)
67DEFINE_TWI_REG(MASTER_CTL, 0x14)
68DEFINE_TWI_REG(MASTER_STAT, 0x18)
69DEFINE_TWI_REG(MASTER_ADDR, 0x1C)
70DEFINE_TWI_REG(INT_STAT, 0x20)
71DEFINE_TWI_REG(INT_MASK, 0x24)
72DEFINE_TWI_REG(FIFO_CTL, 0x28)
73DEFINE_TWI_REG(FIFO_STAT, 0x2C)
74DEFINE_TWI_REG(XMT_DATA8, 0x80)
75DEFINE_TWI_REG(XMT_DATA16, 0x84)
76DEFINE_TWI_REG(RCV_DATA8, 0x88)
77DEFINE_TWI_REG(RCV_DATA16, 0x8C)
78
79static const u16 pin_req[2][3] = {
80 {P_TWI0_SCL, P_TWI0_SDA, 0},
81 {P_TWI1_SCL, P_TWI1_SDA, 0},
82};
64 83
65static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface) 84static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
66{ 85{
67 unsigned short twi_int_status = bfin_read_TWI_INT_STAT(); 86 unsigned short twi_int_status = read_INT_STAT(iface);
68 unsigned short mast_stat = bfin_read_TWI_MASTER_STAT(); 87 unsigned short mast_stat = read_MASTER_STAT(iface);
69 88
70 if (twi_int_status & XMTSERV) { 89 if (twi_int_status & XMTSERV) {
71 /* Transmit next data */ 90 /* Transmit next data */
72 if (iface->writeNum > 0) { 91 if (iface->writeNum > 0) {
73 bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); 92 write_XMT_DATA8(iface, *(iface->transPtr++));
74 iface->writeNum--; 93 iface->writeNum--;
75 } 94 }
76 /* start receive immediately after complete sending in 95 /* start receive immediately after complete sending in
77 * combine mode. 96 * combine mode.
78 */ 97 */
79 else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { 98 else if (iface->cur_mode == TWI_I2C_MODE_COMBINED)
80 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() 99 write_MASTER_CTL(iface,
81 | MDIR | RSTART); 100 read_MASTER_CTL(iface) | MDIR | RSTART);
82 } else if (iface->manual_stop) 101 else if (iface->manual_stop)
83 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() 102 write_MASTER_CTL(iface,
84 | STOP); 103 read_MASTER_CTL(iface) | STOP);
104 else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
105 iface->cur_msg+1 < iface->msg_num)
106 write_MASTER_CTL(iface,
107 read_MASTER_CTL(iface) | RSTART);
85 SSYNC(); 108 SSYNC();
86 /* Clear status */ 109 /* Clear status */
87 bfin_write_TWI_INT_STAT(XMTSERV); 110 write_INT_STAT(iface, XMTSERV);
88 SSYNC(); 111 SSYNC();
89 } 112 }
90 if (twi_int_status & RCVSERV) { 113 if (twi_int_status & RCVSERV) {
91 if (iface->readNum > 0) { 114 if (iface->readNum > 0) {
92 /* Receive next data */ 115 /* Receive next data */
93 *(iface->transPtr) = bfin_read_TWI_RCV_DATA8(); 116 *(iface->transPtr) = read_RCV_DATA8(iface);
94 if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { 117 if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
95 /* Change combine mode into sub mode after 118 /* Change combine mode into sub mode after
96 * read first data. 119 * read first data.
@@ -105,28 +128,33 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
105 iface->transPtr++; 128 iface->transPtr++;
106 iface->readNum--; 129 iface->readNum--;
107 } else if (iface->manual_stop) { 130 } else if (iface->manual_stop) {
108 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() 131 write_MASTER_CTL(iface,
109 | STOP); 132 read_MASTER_CTL(iface) | STOP);
133 SSYNC();
134 } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
135 iface->cur_msg+1 < iface->msg_num) {
136 write_MASTER_CTL(iface,
137 read_MASTER_CTL(iface) | RSTART);
110 SSYNC(); 138 SSYNC();
111 } 139 }
112 /* Clear interrupt source */ 140 /* Clear interrupt source */
113 bfin_write_TWI_INT_STAT(RCVSERV); 141 write_INT_STAT(iface, RCVSERV);
114 SSYNC(); 142 SSYNC();
115 } 143 }
116 if (twi_int_status & MERR) { 144 if (twi_int_status & MERR) {
117 bfin_write_TWI_INT_STAT(MERR); 145 write_INT_STAT(iface, MERR);
118 bfin_write_TWI_INT_MASK(0); 146 write_INT_MASK(iface, 0);
119 bfin_write_TWI_MASTER_STAT(0x3e); 147 write_MASTER_STAT(iface, 0x3e);
120 bfin_write_TWI_MASTER_CTL(0); 148 write_MASTER_CTL(iface, 0);
121 SSYNC(); 149 SSYNC();
122 iface->result = -1; 150 iface->result = -EIO;
123 /* if both err and complete int stats are set, return proper 151 /* if both err and complete int stats are set, return proper
124 * results. 152 * results.
125 */ 153 */
126 if (twi_int_status & MCOMP) { 154 if (twi_int_status & MCOMP) {
127 bfin_write_TWI_INT_STAT(MCOMP); 155 write_INT_STAT(iface, MCOMP);
128 bfin_write_TWI_INT_MASK(0); 156 write_INT_MASK(iface, 0);
129 bfin_write_TWI_MASTER_CTL(0); 157 write_MASTER_CTL(iface, 0);
130 SSYNC(); 158 SSYNC();
131 /* If it is a quick transfer, only address bug no data, 159 /* If it is a quick transfer, only address bug no data,
132 * not an err, return 1. 160 * not an err, return 1.
@@ -143,7 +171,7 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
143 return; 171 return;
144 } 172 }
145 if (twi_int_status & MCOMP) { 173 if (twi_int_status & MCOMP) {
146 bfin_write_TWI_INT_STAT(MCOMP); 174 write_INT_STAT(iface, MCOMP);
147 SSYNC(); 175 SSYNC();
148 if (iface->cur_mode == TWI_I2C_MODE_COMBINED) { 176 if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
149 if (iface->readNum == 0) { 177 if (iface->readNum == 0) {
@@ -152,28 +180,63 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
152 */ 180 */
153 iface->readNum = 1; 181 iface->readNum = 1;
154 iface->manual_stop = 1; 182 iface->manual_stop = 1;
155 bfin_write_TWI_MASTER_CTL( 183 write_MASTER_CTL(iface,
156 bfin_read_TWI_MASTER_CTL() 184 read_MASTER_CTL(iface) | (0xff << 6));
157 | (0xff << 6));
158 } else { 185 } else {
159 /* set the readd number in other 186 /* set the readd number in other
160 * combine mode. 187 * combine mode.
161 */ 188 */
162 bfin_write_TWI_MASTER_CTL( 189 write_MASTER_CTL(iface,
163 (bfin_read_TWI_MASTER_CTL() & 190 (read_MASTER_CTL(iface) &
164 (~(0xff << 6))) | 191 (~(0xff << 6))) |
165 ( iface->readNum << 6)); 192 (iface->readNum << 6));
193 }
194 /* remove restart bit and enable master receive */
195 write_MASTER_CTL(iface,
196 read_MASTER_CTL(iface) & ~RSTART);
197 write_MASTER_CTL(iface,
198 read_MASTER_CTL(iface) | MEN | MDIR);
199 SSYNC();
200 } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
201 iface->cur_msg+1 < iface->msg_num) {
202 iface->cur_msg++;
203 iface->transPtr = iface->pmsg[iface->cur_msg].buf;
204 iface->writeNum = iface->readNum =
205 iface->pmsg[iface->cur_msg].len;
206 /* Set Transmit device address */
207 write_MASTER_ADDR(iface,
208 iface->pmsg[iface->cur_msg].addr);
209 if (iface->pmsg[iface->cur_msg].flags & I2C_M_RD)
210 iface->read_write = I2C_SMBUS_READ;
211 else {
212 iface->read_write = I2C_SMBUS_WRITE;
213 /* Transmit first data */
214 if (iface->writeNum > 0) {
215 write_XMT_DATA8(iface,
216 *(iface->transPtr++));
217 iface->writeNum--;
218 SSYNC();
219 }
220 }
221
222 if (iface->pmsg[iface->cur_msg].len <= 255)
223 write_MASTER_CTL(iface,
224 iface->pmsg[iface->cur_msg].len << 6);
225 else {
226 write_MASTER_CTL(iface, 0xff << 6);
227 iface->manual_stop = 1;
166 } 228 }
167 /* remove restart bit and enable master receive */ 229 /* remove restart bit and enable master receive */
168 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() & 230 write_MASTER_CTL(iface,
169 ~RSTART); 231 read_MASTER_CTL(iface) & ~RSTART);
170 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | 232 write_MASTER_CTL(iface, read_MASTER_CTL(iface) |
171 MEN | MDIR); 233 MEN | ((iface->read_write == I2C_SMBUS_READ) ?
234 MDIR : 0));
172 SSYNC(); 235 SSYNC();
173 } else { 236 } else {
174 iface->result = 1; 237 iface->result = 1;
175 bfin_write_TWI_INT_MASK(0); 238 write_INT_MASK(iface, 0);
176 bfin_write_TWI_MASTER_CTL(0); 239 write_MASTER_CTL(iface, 0);
177 SSYNC(); 240 SSYNC();
178 complete(&iface->complete); 241 complete(&iface->complete);
179 } 242 }
@@ -221,91 +284,85 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
221{ 284{
222 struct bfin_twi_iface *iface = adap->algo_data; 285 struct bfin_twi_iface *iface = adap->algo_data;
223 struct i2c_msg *pmsg; 286 struct i2c_msg *pmsg;
224 int i, ret;
225 int rc = 0; 287 int rc = 0;
226 288
227 if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) 289 if (!(read_CONTROL(iface) & TWI_ENA))
228 return -ENXIO; 290 return -ENXIO;
229 291
230 while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { 292 while (read_MASTER_STAT(iface) & BUSBUSY)
231 yield(); 293 yield();
294
295 iface->pmsg = msgs;
296 iface->msg_num = num;
297 iface->cur_msg = 0;
298
299 pmsg = &msgs[0];
300 if (pmsg->flags & I2C_M_TEN) {
301 dev_err(&adap->dev, "10 bits addr not supported!\n");
302 return -EINVAL;
232 } 303 }
233 304
234 ret = 0; 305 iface->cur_mode = TWI_I2C_MODE_REPEAT;
235 for (i = 0; rc >= 0 && i < num; i++) { 306 iface->manual_stop = 0;
236 pmsg = &msgs[i]; 307 iface->transPtr = pmsg->buf;
237 if (pmsg->flags & I2C_M_TEN) { 308 iface->writeNum = iface->readNum = pmsg->len;
238 dev_err(&(adap->dev), "i2c-bfin-twi: 10 bits addr " 309 iface->result = 0;
239 "not supported !\n"); 310 iface->timeout_count = 10;
240 rc = -EINVAL; 311 init_completion(&(iface->complete));
241 break; 312 /* Set Transmit device address */
242 } 313 write_MASTER_ADDR(iface, pmsg->addr);
243 314
244 iface->cur_mode = TWI_I2C_MODE_STANDARD; 315 /* FIFO Initiation. Data in FIFO should be
245 iface->manual_stop = 0; 316 * discarded before start a new operation.
246 iface->transPtr = pmsg->buf; 317 */
247 iface->writeNum = iface->readNum = pmsg->len; 318 write_FIFO_CTL(iface, 0x3);
248 iface->result = 0; 319 SSYNC();
249 iface->timeout_count = 10; 320 write_FIFO_CTL(iface, 0);
250 /* Set Transmit device address */ 321 SSYNC();
251 bfin_write_TWI_MASTER_ADDR(pmsg->addr);
252
253 /* FIFO Initiation. Data in FIFO should be
254 * discarded before start a new operation.
255 */
256 bfin_write_TWI_FIFO_CTL(0x3);
257 SSYNC();
258 bfin_write_TWI_FIFO_CTL(0);
259 SSYNC();
260 322
261 if (pmsg->flags & I2C_M_RD) 323 if (pmsg->flags & I2C_M_RD)
262 iface->read_write = I2C_SMBUS_READ; 324 iface->read_write = I2C_SMBUS_READ;
263 else { 325 else {
264 iface->read_write = I2C_SMBUS_WRITE; 326 iface->read_write = I2C_SMBUS_WRITE;
265 /* Transmit first data */ 327 /* Transmit first data */
266 if (iface->writeNum > 0) { 328 if (iface->writeNum > 0) {
267 bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); 329 write_XMT_DATA8(iface, *(iface->transPtr++));
268 iface->writeNum--; 330 iface->writeNum--;
269 SSYNC(); 331 SSYNC();
270 }
271 } 332 }
333 }
272 334
273 /* clear int stat */ 335 /* clear int stat */
274 bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV); 336 write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV);
275 337
276 /* Interrupt mask . Enable XMT, RCV interrupt */ 338 /* Interrupt mask . Enable XMT, RCV interrupt */
277 bfin_write_TWI_INT_MASK(MCOMP | MERR | 339 write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV);
278 ((iface->read_write == I2C_SMBUS_READ)? 340 SSYNC();
279 RCVSERV : XMTSERV));
280 SSYNC();
281 341
282 if (pmsg->len > 0 && pmsg->len <= 255) 342 if (pmsg->len <= 255)
283 bfin_write_TWI_MASTER_CTL(pmsg->len << 6); 343 write_MASTER_CTL(iface, pmsg->len << 6);
284 else if (pmsg->len > 255) { 344 else {
285 bfin_write_TWI_MASTER_CTL(0xff << 6); 345 write_MASTER_CTL(iface, 0xff << 6);
286 iface->manual_stop = 1; 346 iface->manual_stop = 1;
287 } else 347 }
288 break;
289 348
290 iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; 349 iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
291 add_timer(&iface->timeout_timer); 350 add_timer(&iface->timeout_timer);
292 351
293 /* Master enable */ 352 /* Master enable */
294 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | 353 write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
295 ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | 354 ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
296 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); 355 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
297 SSYNC(); 356 SSYNC();
298 357
299 wait_for_completion(&iface->complete); 358 wait_for_completion(&iface->complete);
300 359
301 rc = iface->result; 360 rc = iface->result;
302 if (rc == 1)
303 ret++;
304 else if (rc == -1)
305 break;
306 }
307 361
308 return ret; 362 if (rc == 1)
363 return num;
364 else
365 return rc;
309} 366}
310 367
311/* 368/*
@@ -319,12 +376,11 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
319 struct bfin_twi_iface *iface = adap->algo_data; 376 struct bfin_twi_iface *iface = adap->algo_data;
320 int rc = 0; 377 int rc = 0;
321 378
322 if (!(bfin_read_TWI_CONTROL() & TWI_ENA)) 379 if (!(read_CONTROL(iface) & TWI_ENA))
323 return -ENXIO; 380 return -ENXIO;
324 381
325 while (bfin_read_TWI_MASTER_STAT() & BUSBUSY) { 382 while (read_MASTER_STAT(iface) & BUSBUSY)
326 yield(); 383 yield();
327 }
328 384
329 iface->writeNum = 0; 385 iface->writeNum = 0;
330 iface->readNum = 0; 386 iface->readNum = 0;
@@ -392,19 +448,20 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
392 iface->read_write = read_write; 448 iface->read_write = read_write;
393 iface->command = command; 449 iface->command = command;
394 iface->timeout_count = 10; 450 iface->timeout_count = 10;
451 init_completion(&(iface->complete));
395 452
396 /* FIFO Initiation. Data in FIFO should be discarded before 453 /* FIFO Initiation. Data in FIFO should be discarded before
397 * start a new operation. 454 * start a new operation.
398 */ 455 */
399 bfin_write_TWI_FIFO_CTL(0x3); 456 write_FIFO_CTL(iface, 0x3);
400 SSYNC(); 457 SSYNC();
401 bfin_write_TWI_FIFO_CTL(0); 458 write_FIFO_CTL(iface, 0);
402 459
403 /* clear int stat */ 460 /* clear int stat */
404 bfin_write_TWI_INT_STAT(MERR|MCOMP|XMTSERV|RCVSERV); 461 write_INT_STAT(iface, MERR | MCOMP | XMTSERV | RCVSERV);
405 462
406 /* Set Transmit device address */ 463 /* Set Transmit device address */
407 bfin_write_TWI_MASTER_ADDR(addr); 464 write_MASTER_ADDR(iface, addr);
408 SSYNC(); 465 SSYNC();
409 466
410 iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; 467 iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
@@ -412,60 +469,64 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
412 469
413 switch (iface->cur_mode) { 470 switch (iface->cur_mode) {
414 case TWI_I2C_MODE_STANDARDSUB: 471 case TWI_I2C_MODE_STANDARDSUB:
415 bfin_write_TWI_XMT_DATA8(iface->command); 472 write_XMT_DATA8(iface, iface->command);
416 bfin_write_TWI_INT_MASK(MCOMP | MERR | 473 write_INT_MASK(iface, MCOMP | MERR |
417 ((iface->read_write == I2C_SMBUS_READ) ? 474 ((iface->read_write == I2C_SMBUS_READ) ?
418 RCVSERV : XMTSERV)); 475 RCVSERV : XMTSERV));
419 SSYNC(); 476 SSYNC();
420 477
421 if (iface->writeNum + 1 <= 255) 478 if (iface->writeNum + 1 <= 255)
422 bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6); 479 write_MASTER_CTL(iface, (iface->writeNum + 1) << 6);
423 else { 480 else {
424 bfin_write_TWI_MASTER_CTL(0xff << 6); 481 write_MASTER_CTL(iface, 0xff << 6);
425 iface->manual_stop = 1; 482 iface->manual_stop = 1;
426 } 483 }
427 /* Master enable */ 484 /* Master enable */
428 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | 485 write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
429 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); 486 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0));
430 break; 487 break;
431 case TWI_I2C_MODE_COMBINED: 488 case TWI_I2C_MODE_COMBINED:
432 bfin_write_TWI_XMT_DATA8(iface->command); 489 write_XMT_DATA8(iface, iface->command);
433 bfin_write_TWI_INT_MASK(MCOMP | MERR | RCVSERV | XMTSERV); 490 write_INT_MASK(iface, MCOMP | MERR | RCVSERV | XMTSERV);
434 SSYNC(); 491 SSYNC();
435 492
436 if (iface->writeNum > 0) 493 if (iface->writeNum > 0)
437 bfin_write_TWI_MASTER_CTL((iface->writeNum + 1) << 6); 494 write_MASTER_CTL(iface, (iface->writeNum + 1) << 6);
438 else 495 else
439 bfin_write_TWI_MASTER_CTL(0x1 << 6); 496 write_MASTER_CTL(iface, 0x1 << 6);
440 /* Master enable */ 497 /* Master enable */
441 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | 498 write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
442 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); 499 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0));
443 break; 500 break;
444 default: 501 default:
445 bfin_write_TWI_MASTER_CTL(0); 502 write_MASTER_CTL(iface, 0);
446 if (size != I2C_SMBUS_QUICK) { 503 if (size != I2C_SMBUS_QUICK) {
447 /* Don't access xmit data register when this is a 504 /* Don't access xmit data register when this is a
448 * read operation. 505 * read operation.
449 */ 506 */
450 if (iface->read_write != I2C_SMBUS_READ) { 507 if (iface->read_write != I2C_SMBUS_READ) {
451 if (iface->writeNum > 0) { 508 if (iface->writeNum > 0) {
452 bfin_write_TWI_XMT_DATA8(*(iface->transPtr++)); 509 write_XMT_DATA8(iface,
510 *(iface->transPtr++));
453 if (iface->writeNum <= 255) 511 if (iface->writeNum <= 255)
454 bfin_write_TWI_MASTER_CTL(iface->writeNum << 6); 512 write_MASTER_CTL(iface,
513 iface->writeNum << 6);
455 else { 514 else {
456 bfin_write_TWI_MASTER_CTL(0xff << 6); 515 write_MASTER_CTL(iface,
516 0xff << 6);
457 iface->manual_stop = 1; 517 iface->manual_stop = 1;
458 } 518 }
459 iface->writeNum--; 519 iface->writeNum--;
460 } else { 520 } else {
461 bfin_write_TWI_XMT_DATA8(iface->command); 521 write_XMT_DATA8(iface, iface->command);
462 bfin_write_TWI_MASTER_CTL(1 << 6); 522 write_MASTER_CTL(iface, 1 << 6);
463 } 523 }
464 } else { 524 } else {
465 if (iface->readNum > 0 && iface->readNum <= 255) 525 if (iface->readNum > 0 && iface->readNum <= 255)
466 bfin_write_TWI_MASTER_CTL(iface->readNum << 6); 526 write_MASTER_CTL(iface,
527 iface->readNum << 6);
467 else if (iface->readNum > 255) { 528 else if (iface->readNum > 255) {
468 bfin_write_TWI_MASTER_CTL(0xff << 6); 529 write_MASTER_CTL(iface, 0xff << 6);
469 iface->manual_stop = 1; 530 iface->manual_stop = 1;
470 } else { 531 } else {
471 del_timer(&iface->timeout_timer); 532 del_timer(&iface->timeout_timer);
@@ -473,13 +534,13 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
473 } 534 }
474 } 535 }
475 } 536 }
476 bfin_write_TWI_INT_MASK(MCOMP | MERR | 537 write_INT_MASK(iface, MCOMP | MERR |
477 ((iface->read_write == I2C_SMBUS_READ) ? 538 ((iface->read_write == I2C_SMBUS_READ) ?
478 RCVSERV : XMTSERV)); 539 RCVSERV : XMTSERV));
479 SSYNC(); 540 SSYNC();
480 541
481 /* Master enable */ 542 /* Master enable */
482 bfin_write_TWI_MASTER_CTL(bfin_read_TWI_MASTER_CTL() | MEN | 543 write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
483 ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | 544 ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
484 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); 545 ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
485 break; 546 break;
@@ -514,10 +575,10 @@ static struct i2c_algorithm bfin_twi_algorithm = {
514 575
515static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state) 576static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state)
516{ 577{
517/* struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/ 578 struct bfin_twi_iface *iface = platform_get_drvdata(dev);
518 579
519 /* Disable TWI */ 580 /* Disable TWI */
520 bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() & ~TWI_ENA); 581 write_CONTROL(iface, read_CONTROL(iface) & ~TWI_ENA);
521 SSYNC(); 582 SSYNC();
522 583
523 return 0; 584 return 0;
@@ -525,24 +586,52 @@ static int i2c_bfin_twi_suspend(struct platform_device *dev, pm_message_t state)
525 586
526static int i2c_bfin_twi_resume(struct platform_device *dev) 587static int i2c_bfin_twi_resume(struct platform_device *dev)
527{ 588{
528/* struct bfin_twi_iface *iface = platform_get_drvdata(dev);*/ 589 struct bfin_twi_iface *iface = platform_get_drvdata(dev);
529 590
530 /* Enable TWI */ 591 /* Enable TWI */
531 bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); 592 write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
532 SSYNC(); 593 SSYNC();
533 594
534 return 0; 595 return 0;
535} 596}
536 597
537static int i2c_bfin_twi_probe(struct platform_device *dev) 598static int i2c_bfin_twi_probe(struct platform_device *pdev)
538{ 599{
539 struct bfin_twi_iface *iface = &twi_iface; 600 struct bfin_twi_iface *iface;
540 struct i2c_adapter *p_adap; 601 struct i2c_adapter *p_adap;
602 struct resource *res;
541 int rc; 603 int rc;
542 604
605 iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL);
606 if (!iface) {
607 dev_err(&pdev->dev, "Cannot allocate memory\n");
608 rc = -ENOMEM;
609 goto out_error_nomem;
610 }
611
543 spin_lock_init(&(iface->lock)); 612 spin_lock_init(&(iface->lock));
544 init_completion(&(iface->complete)); 613
545 iface->irq = IRQ_TWI; 614 /* Find and map our resources */
615 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
616 if (res == NULL) {
617 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
618 rc = -ENOENT;
619 goto out_error_get_res;
620 }
621
622 iface->regs_base = ioremap(res->start, res->end - res->start + 1);
623 if (iface->regs_base == NULL) {
624 dev_err(&pdev->dev, "Cannot map IO\n");
625 rc = -ENXIO;
626 goto out_error_ioremap;
627 }
628
629 iface->irq = platform_get_irq(pdev, 0);
630 if (iface->irq < 0) {
631 dev_err(&pdev->dev, "No IRQ specified\n");
632 rc = -ENOENT;
633 goto out_error_no_irq;
634 }
546 635
547 init_timer(&(iface->timeout_timer)); 636 init_timer(&(iface->timeout_timer));
548 iface->timeout_timer.function = bfin_twi_timeout; 637 iface->timeout_timer.function = bfin_twi_timeout;
@@ -550,39 +639,63 @@ static int i2c_bfin_twi_probe(struct platform_device *dev)
550 639
551 p_adap = &iface->adap; 640 p_adap = &iface->adap;
552 p_adap->id = I2C_HW_BLACKFIN; 641 p_adap->id = I2C_HW_BLACKFIN;
553 p_adap->nr = dev->id; 642 p_adap->nr = pdev->id;
554 strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); 643 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
555 p_adap->algo = &bfin_twi_algorithm; 644 p_adap->algo = &bfin_twi_algorithm;
556 p_adap->algo_data = iface; 645 p_adap->algo_data = iface;
557 p_adap->class = I2C_CLASS_ALL; 646 p_adap->class = I2C_CLASS_ALL;
558 p_adap->dev.parent = &dev->dev; 647 p_adap->dev.parent = &pdev->dev;
648
649 rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
650 if (rc) {
651 dev_err(&pdev->dev, "Can't setup pin mux!\n");
652 goto out_error_pin_mux;
653 }
559 654
560 rc = request_irq(iface->irq, bfin_twi_interrupt_entry, 655 rc = request_irq(iface->irq, bfin_twi_interrupt_entry,
561 IRQF_DISABLED, dev->name, iface); 656 IRQF_DISABLED, pdev->name, iface);
562 if (rc) { 657 if (rc) {
563 dev_err(&(p_adap->dev), "i2c-bfin-twi: can't get IRQ %d !\n", 658 dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq);
564 iface->irq); 659 rc = -ENODEV;
565 return -ENODEV; 660 goto out_error_req_irq;
566 } 661 }
567 662
568 /* Set TWI internal clock as 10MHz */ 663 /* Set TWI internal clock as 10MHz */
569 bfin_write_TWI_CONTROL(((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); 664 write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
570 665
571 /* Set Twi interface clock as specified */ 666 /* Set Twi interface clock as specified */
572 bfin_write_TWI_CLKDIV((( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ ) 667 write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
573 << 8) | (( 5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ ) 668 << 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
574 & 0xFF)); 669 & 0xFF));
575 670
576 /* Enable TWI */ 671 /* Enable TWI */
577 bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); 672 write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
578 SSYNC(); 673 SSYNC();
579 674
580 rc = i2c_add_numbered_adapter(p_adap); 675 rc = i2c_add_numbered_adapter(p_adap);
581 if (rc < 0) 676 if (rc < 0) {
582 free_irq(iface->irq, iface); 677 dev_err(&pdev->dev, "Can't add i2c adapter!\n");
583 else 678 goto out_error_add_adapter;
584 platform_set_drvdata(dev, iface); 679 }
680
681 platform_set_drvdata(pdev, iface);
585 682
683 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
684 "regs_base@%p\n", iface->regs_base);
685
686 return 0;
687
688out_error_add_adapter:
689 free_irq(iface->irq, iface);
690out_error_req_irq:
691out_error_no_irq:
692 peripheral_free_list(pin_req[pdev->id]);
693out_error_pin_mux:
694 iounmap(iface->regs_base);
695out_error_ioremap:
696out_error_get_res:
697 kfree(iface);
698out_error_nomem:
586 return rc; 699 return rc;
587} 700}
588 701
@@ -594,6 +707,9 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
594 707
595 i2c_del_adapter(&(iface->adap)); 708 i2c_del_adapter(&(iface->adap));
596 free_irq(iface->irq, iface); 709 free_irq(iface->irq, iface);
710 peripheral_free_list(pin_req[pdev->id]);
711 iounmap(iface->regs_base);
712 kfree(iface);
597 713
598 return 0; 714 return 0;
599} 715}
@@ -611,8 +727,6 @@ static struct platform_driver i2c_bfin_twi_driver = {
611 727
612static int __init i2c_bfin_twi_init(void) 728static int __init i2c_bfin_twi_init(void)
613{ 729{
614 pr_info("I2C: Blackfin I2C TWI driver\n");
615
616 return platform_driver_register(&i2c_bfin_twi_driver); 730 return platform_driver_register(&i2c_bfin_twi_driver);
617} 731}
618 732
@@ -621,9 +735,10 @@ static void __exit i2c_bfin_twi_exit(void)
621 platform_driver_unregister(&i2c_bfin_twi_driver); 735 platform_driver_unregister(&i2c_bfin_twi_driver);
622} 736}
623 737
624MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
625MODULE_DESCRIPTION("I2C-Bus adapter routines for Blackfin TWI");
626MODULE_LICENSE("GPL");
627
628module_init(i2c_bfin_twi_init); 738module_init(i2c_bfin_twi_init);
629module_exit(i2c_bfin_twi_exit); 739module_exit(i2c_bfin_twi_exit);
740
741MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
742MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
743MODULE_LICENSE("GPL");
744MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index fde26345a379..7ecbfc429b19 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -328,7 +328,7 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
328 int i; 328 int i;
329 int ret; 329 int ret;
330 330
331 dev_dbg(dev->dev, "%s: msgs: %d\n", __FUNCTION__, num); 331 dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
332 332
333 ret = i2c_davinci_wait_bus_not_busy(dev, 1); 333 ret = i2c_davinci_wait_bus_not_busy(dev, 1);
334 if (ret < 0) { 334 if (ret < 0) {
@@ -342,7 +342,7 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
342 return ret; 342 return ret;
343 } 343 }
344 344
345 dev_dbg(dev->dev, "%s:%d ret: %d\n", __FUNCTION__, __LINE__, ret); 345 dev_dbg(dev->dev, "%s:%d ret: %d\n", __func__, __LINE__, ret);
346 346
347 return num; 347 return num;
348} 348}
@@ -364,7 +364,7 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
364 u16 w; 364 u16 w;
365 365
366 while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) { 366 while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) {
367 dev_dbg(dev->dev, "%s: stat=0x%x\n", __FUNCTION__, stat); 367 dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
368 if (count++ == 100) { 368 if (count++ == 100) {
369 dev_warn(dev->dev, "Too much work in one IRQ\n"); 369 dev_warn(dev->dev, "Too much work in one IRQ\n");
370 break; 370 break;
@@ -553,6 +553,9 @@ static int davinci_i2c_remove(struct platform_device *pdev)
553 return 0; 553 return 0;
554} 554}
555 555
556/* work with hotplug and coldplug */
557MODULE_ALIAS("platform:i2c_davinci");
558
556static struct platform_driver davinci_i2c_driver = { 559static struct platform_driver davinci_i2c_driver = {
557 .probe = davinci_i2c_probe, 560 .probe = davinci_i2c_probe,
558 .remove = davinci_i2c_remove, 561 .remove = davinci_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 3ca19fc234fb..7c1b762aa681 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -220,3 +220,4 @@ module_exit(i2c_gpio_exit);
220MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); 220MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
221MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver"); 221MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver");
222MODULE_LICENSE("GPL"); 222MODULE_LICENSE("GPL");
223MODULE_ALIAS("platform:i2c-gpio");
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 22bb247d0e60..85dbf34382e1 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -6,6 +6,9 @@
6 * Copyright (c) 2003, 2004 Zultys Technologies. 6 * Copyright (c) 2003, 2004 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 * 8 *
9 * Copyright (c) 2008 PIKA Technologies
10 * Sean MacLennan <smaclennan@pikatech.com>
11 *
9 * Based on original work by 12 * Based on original work by
10 * Ian DaSilva <idasilva@mvista.com> 13 * Ian DaSilva <idasilva@mvista.com>
11 * Armin Kuster <akuster@mvista.com> 14 * Armin Kuster <akuster@mvista.com>
@@ -39,12 +42,17 @@
39#include <asm/io.h> 42#include <asm/io.h>
40#include <linux/i2c.h> 43#include <linux/i2c.h>
41#include <linux/i2c-id.h> 44#include <linux/i2c-id.h>
45
46#ifdef CONFIG_IBM_OCP
42#include <asm/ocp.h> 47#include <asm/ocp.h>
43#include <asm/ibm4xx.h> 48#include <asm/ibm4xx.h>
49#else
50#include <linux/of_platform.h>
51#endif
44 52
45#include "i2c-ibm_iic.h" 53#include "i2c-ibm_iic.h"
46 54
47#define DRIVER_VERSION "2.1" 55#define DRIVER_VERSION "2.2"
48 56
49MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION); 57MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION);
50MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
@@ -650,13 +658,14 @@ static inline u8 iic_clckdiv(unsigned int opb)
650 opb /= 1000000; 658 opb /= 1000000;
651 659
652 if (opb < 20 || opb > 150){ 660 if (opb < 20 || opb > 150){
653 printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n", 661 printk(KERN_WARNING "ibm-iic: invalid OPB clock frequency %u MHz\n",
654 opb); 662 opb);
655 opb = opb < 20 ? 20 : 150; 663 opb = opb < 20 ? 20 : 150;
656 } 664 }
657 return (u8)((opb + 9) / 10 - 1); 665 return (u8)((opb + 9) / 10 - 1);
658} 666}
659 667
668#ifdef CONFIG_IBM_OCP
660/* 669/*
661 * Register single IIC interface 670 * Register single IIC interface
662 */ 671 */
@@ -672,7 +681,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
672 ocp->def->index); 681 ocp->def->index);
673 682
674 if (!(dev = kzalloc(sizeof(*dev), GFP_KERNEL))) { 683 if (!(dev = kzalloc(sizeof(*dev), GFP_KERNEL))) {
675 printk(KERN_CRIT "ibm-iic%d: failed to allocate device data\n", 684 printk(KERN_ERR "ibm-iic%d: failed to allocate device data\n",
676 ocp->def->index); 685 ocp->def->index);
677 return -ENOMEM; 686 return -ENOMEM;
678 } 687 }
@@ -687,7 +696,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
687 } 696 }
688 697
689 if (!(dev->vaddr = ioremap(ocp->def->paddr, sizeof(struct iic_regs)))){ 698 if (!(dev->vaddr = ioremap(ocp->def->paddr, sizeof(struct iic_regs)))){
690 printk(KERN_CRIT "ibm-iic%d: failed to ioremap device registers\n", 699 printk(KERN_ERR "ibm-iic%d: failed to ioremap device registers\n",
691 dev->idx); 700 dev->idx);
692 ret = -ENXIO; 701 ret = -ENXIO;
693 goto fail2; 702 goto fail2;
@@ -745,7 +754,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
745 adap->nr = dev->idx >= 0 ? dev->idx : 0; 754 adap->nr = dev->idx >= 0 ? dev->idx : 0;
746 755
747 if ((ret = i2c_add_numbered_adapter(adap)) < 0) { 756 if ((ret = i2c_add_numbered_adapter(adap)) < 0) {
748 printk(KERN_CRIT "ibm-iic%d: failed to register i2c adapter\n", 757 printk(KERN_ERR "ibm-iic%d: failed to register i2c adapter\n",
749 dev->idx); 758 dev->idx);
750 goto fail; 759 goto fail;
751 } 760 }
@@ -778,7 +787,7 @@ static void __devexit iic_remove(struct ocp_device *ocp)
778 struct ibm_iic_private* dev = (struct ibm_iic_private*)ocp_get_drvdata(ocp); 787 struct ibm_iic_private* dev = (struct ibm_iic_private*)ocp_get_drvdata(ocp);
779 BUG_ON(dev == NULL); 788 BUG_ON(dev == NULL);
780 if (i2c_del_adapter(&dev->adap)){ 789 if (i2c_del_adapter(&dev->adap)){
781 printk(KERN_CRIT "ibm-iic%d: failed to delete i2c adapter :(\n", 790 printk(KERN_ERR "ibm-iic%d: failed to delete i2c adapter :(\n",
782 dev->idx); 791 dev->idx);
783 /* That's *very* bad, just shutdown IRQ ... */ 792 /* That's *very* bad, just shutdown IRQ ... */
784 if (dev->irq >= 0){ 793 if (dev->irq >= 0){
@@ -828,5 +837,181 @@ static void __exit iic_exit(void)
828 ocp_unregister_driver(&ibm_iic_driver); 837 ocp_unregister_driver(&ibm_iic_driver);
829} 838}
830 839
840#else /* !CONFIG_IBM_OCP */
841
842static int __devinit iic_request_irq(struct of_device *ofdev,
843 struct ibm_iic_private *dev)
844{
845 struct device_node *np = ofdev->node;
846 int irq;
847
848 if (iic_force_poll)
849 return NO_IRQ;
850
851 irq = irq_of_parse_and_map(np, 0);
852 if (irq == NO_IRQ) {
853 dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
854 return NO_IRQ;
855 }
856
857 /* Disable interrupts until we finish initialization, assumes
858 * level-sensitive IRQ setup...
859 */
860 iic_interrupt_mode(dev, 0);
861 if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) {
862 dev_err(&ofdev->dev, "request_irq %d failed\n", irq);
863 /* Fallback to the polling mode */
864 return NO_IRQ;
865 }
866
867 return irq;
868}
869
870/*
871 * Register single IIC interface
872 */
873static int __devinit iic_probe(struct of_device *ofdev,
874 const struct of_device_id *match)
875{
876 struct device_node *np = ofdev->node;
877 struct ibm_iic_private *dev;
878 struct i2c_adapter *adap;
879 const u32 *indexp, *freq;
880 int ret;
881
882 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
883 if (!dev) {
884 dev_err(&ofdev->dev, "failed to allocate device data\n");
885 return -ENOMEM;
886 }
887
888 dev_set_drvdata(&ofdev->dev, dev);
889
890 indexp = of_get_property(np, "index", NULL);
891 if (!indexp) {
892 dev_err(&ofdev->dev, "no index specified\n");
893 ret = -EINVAL;
894 goto error_cleanup;
895 }
896 dev->idx = *indexp;
897
898 dev->vaddr = of_iomap(np, 0);
899 if (dev->vaddr == NULL) {
900 dev_err(&ofdev->dev, "failed to iomap device\n");
901 ret = -ENXIO;
902 goto error_cleanup;
903 }
904
905 init_waitqueue_head(&dev->wq);
906
907 dev->irq = iic_request_irq(ofdev, dev);
908 if (dev->irq == NO_IRQ)
909 dev_warn(&ofdev->dev, "using polling mode\n");
910
911 /* Board specific settings */
912 if (iic_force_fast || of_get_property(np, "fast-mode", NULL))
913 dev->fast_mode = 1;
914
915 freq = of_get_property(np, "clock-frequency", NULL);
916 if (freq == NULL) {
917 freq = of_get_property(np->parent, "clock-frequency", NULL);
918 if (freq == NULL) {
919 dev_err(&ofdev->dev, "Unable to get bus frequency\n");
920 ret = -EINVAL;
921 goto error_cleanup;
922 }
923 }
924
925 dev->clckdiv = iic_clckdiv(*freq);
926 dev_dbg(&ofdev->dev, "clckdiv = %d\n", dev->clckdiv);
927
928 /* Initialize IIC interface */
929 iic_dev_init(dev);
930
931 /* Register it with i2c layer */
932 adap = &dev->adap;
933 adap->dev.parent = &ofdev->dev;
934 strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
935 i2c_set_adapdata(adap, dev);
936 adap->id = I2C_HW_OCP;
937 adap->class = I2C_CLASS_HWMON;
938 adap->algo = &iic_algo;
939 adap->timeout = 1;
940 adap->nr = dev->idx;
941
942 ret = i2c_add_numbered_adapter(adap);
943 if (ret < 0) {
944 dev_err(&ofdev->dev, "failed to register i2c adapter\n");
945 goto error_cleanup;
946 }
947
948 dev_info(&ofdev->dev, "using %s mode\n",
949 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
950
951 return 0;
952
953error_cleanup:
954 if (dev->irq != NO_IRQ) {
955 iic_interrupt_mode(dev, 0);
956 free_irq(dev->irq, dev);
957 }
958
959 if (dev->vaddr)
960 iounmap(dev->vaddr);
961
962 dev_set_drvdata(&ofdev->dev, NULL);
963 kfree(dev);
964 return ret;
965}
966
967/*
968 * Cleanup initialized IIC interface
969 */
970static int __devexit iic_remove(struct of_device *ofdev)
971{
972 struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
973
974 dev_set_drvdata(&ofdev->dev, NULL);
975
976 i2c_del_adapter(&dev->adap);
977
978 if (dev->irq != NO_IRQ) {
979 iic_interrupt_mode(dev, 0);
980 free_irq(dev->irq, dev);
981 }
982
983 iounmap(dev->vaddr);
984 kfree(dev);
985
986 return 0;
987}
988
989static const struct of_device_id ibm_iic_match[] = {
990 { .compatible = "ibm,iic-405ex", },
991 { .compatible = "ibm,iic-405gp", },
992 { .compatible = "ibm,iic-440gp", },
993 { .compatible = "ibm,iic-440gpx", },
994 { .compatible = "ibm,iic-440grx", },
995 {}
996};
997
998static struct of_platform_driver ibm_iic_driver = {
999 .name = "ibm-iic",
1000 .match_table = ibm_iic_match,
1001 .probe = iic_probe,
1002 .remove = __devexit_p(iic_remove),
1003};
1004
1005static int __init iic_init(void)
1006{
1007 return of_register_platform_driver(&ibm_iic_driver);
1008}
1009
1010static void __exit iic_exit(void)
1011{
1012 of_unregister_platform_driver(&ibm_iic_driver);
1013}
1014#endif /* CONFIG_IBM_OCP */
1015
831module_init(iic_init); 1016module_init(iic_init);
832module_exit(iic_exit); 1017module_exit(iic_exit);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index ab41400c883e..39884e797594 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -550,3 +550,4 @@ module_exit (i2c_iop3xx_exit);
550MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>"); 550MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>");
551MODULE_DESCRIPTION("IOP3xx iic algorithm and driver"); 551MODULE_DESCRIPTION("IOP3xx iic algorithm and driver");
552MODULE_LICENSE("GPL"); 552MODULE_LICENSE("GPL");
553MODULE_ALIAS("platform:IOP3xx-I2C");
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 6352121a2827..5af9e6521e6c 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -164,4 +164,5 @@ module_exit(ixp2000_i2c_exit);
164MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>"); 164MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
165MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver"); 165MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
166MODULE_LICENSE("GPL"); 166MODULE_LICENSE("GPL");
167MODULE_ALIAS("platform:IXP2000-I2C");
167 168
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index bbe787b243b7..18beb0ad7bf3 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -392,6 +392,9 @@ static int fsl_i2c_remove(struct platform_device *pdev)
392 return 0; 392 return 0;
393}; 393};
394 394
395/* work with hotplug and coldplug */
396MODULE_ALIAS("platform:fsl-i2c");
397
395/* Structure for a device driver */ 398/* Structure for a device driver */
396static struct platform_driver fsl_i2c_driver = { 399static struct platform_driver fsl_i2c_driver = {
397 .probe = fsl_i2c_probe, 400 .probe = fsl_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e417c2c3ca22..f145692cbb76 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -312,6 +312,9 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev)
312 return 0; 312 return 0;
313} 313}
314 314
315/* work with hotplug and coldplug */
316MODULE_ALIAS("platform:ocores-i2c");
317
315static struct platform_driver ocores_i2c_driver = { 318static struct platform_driver ocores_i2c_driver = {
316 .probe = ocores_i2c_probe, 319 .probe = ocores_i2c_probe,
317 .remove = __devexit_p(ocores_i2c_remove), 320 .remove = __devexit_p(ocores_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7ba31770d773..e7eb7bf9ddec 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -693,3 +693,4 @@ module_exit(omap_i2c_exit_driver);
693MODULE_AUTHOR("MontaVista Software, Inc. (and others)"); 693MODULE_AUTHOR("MontaVista Software, Inc. (and others)");
694MODULE_DESCRIPTION("TI OMAP I2C bus adapter"); 694MODULE_DESCRIPTION("TI OMAP I2C bus adapter");
695MODULE_LICENSE("GPL"); 695MODULE_LICENSE("GPL");
696MODULE_ALIAS("platform:i2c_omap");
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 496ee875eb4f..a119784bae10 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * i2c-pca-isa.c driver for PCA9564 on ISA boards 2 * i2c-pca-isa.c driver for PCA9564 on ISA boards
3 * Copyright (C) 2004 Arcom Control Systems 3 * Copyright (C) 2004 Arcom Control Systems
4 * Copyright (C) 2008 Pengutronix
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -22,11 +23,9 @@
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/slab.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/wait.h> 28#include <linux/wait.h>
29
30#include <linux/isa.h> 29#include <linux/isa.h>
31#include <linux/i2c.h> 30#include <linux/i2c.h>
32#include <linux/i2c-algo-pca.h> 31#include <linux/i2c-algo-pca.h>
@@ -34,13 +33,9 @@
34#include <asm/io.h> 33#include <asm/io.h>
35#include <asm/irq.h> 34#include <asm/irq.h>
36 35
37#include "../algos/i2c-algo-pca.h" 36#define DRIVER "i2c-pca-isa"
38
39#define IO_SIZE 4 37#define IO_SIZE 4
40 38
41#undef DEBUG_IO
42//#define DEBUG_IO
43
44static unsigned long base = 0x330; 39static unsigned long base = 0x330;
45static int irq = 10; 40static int irq = 10;
46 41
@@ -48,22 +43,9 @@ static int irq = 10;
48 * in the actual clock rate */ 43 * in the actual clock rate */
49static int clock = I2C_PCA_CON_59kHz; 44static int clock = I2C_PCA_CON_59kHz;
50 45
51static int own = 0x55;
52
53static wait_queue_head_t pca_wait; 46static wait_queue_head_t pca_wait;
54 47
55static int pca_isa_getown(struct i2c_algo_pca_data *adap) 48static void pca_isa_writebyte(void *pd, int reg, int val)
56{
57 return (own);
58}
59
60static int pca_isa_getclock(struct i2c_algo_pca_data *adap)
61{
62 return (clock);
63}
64
65static void
66pca_isa_writebyte(struct i2c_algo_pca_data *adap, int reg, int val)
67{ 49{
68#ifdef DEBUG_IO 50#ifdef DEBUG_IO
69 static char *names[] = { "T/O", "DAT", "ADR", "CON" }; 51 static char *names[] = { "T/O", "DAT", "ADR", "CON" };
@@ -72,44 +54,49 @@ pca_isa_writebyte(struct i2c_algo_pca_data *adap, int reg, int val)
72 outb(val, base+reg); 54 outb(val, base+reg);
73} 55}
74 56
75static int 57static int pca_isa_readbyte(void *pd, int reg)
76pca_isa_readbyte(struct i2c_algo_pca_data *adap, int reg)
77{ 58{
78 int res = inb(base+reg); 59 int res = inb(base+reg);
79#ifdef DEBUG_IO 60#ifdef DEBUG_IO
80 { 61 {
81 static char *names[] = { "STA", "DAT", "ADR", "CON" }; 62 static char *names[] = { "STA", "DAT", "ADR", "CON" };
82 printk("*** read %s => %#04x\n", names[reg], res); 63 printk("*** read %s => %#04x\n", names[reg], res);
83 } 64 }
84#endif 65#endif
85 return res; 66 return res;
86} 67}
87 68
88static int pca_isa_waitforinterrupt(struct i2c_algo_pca_data *adap) 69static int pca_isa_waitforcompletion(void *pd)
89{ 70{
90 int ret = 0; 71 int ret = 0;
91 72
92 if (irq > -1) { 73 if (irq > -1) {
93 ret = wait_event_interruptible(pca_wait, 74 ret = wait_event_interruptible(pca_wait,
94 pca_isa_readbyte(adap, I2C_PCA_CON) & I2C_PCA_CON_SI); 75 pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI);
95 } else { 76 } else {
96 while ((pca_isa_readbyte(adap, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) 77 while ((pca_isa_readbyte(pd, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
97 udelay(100); 78 udelay(100);
98 } 79 }
99 return ret; 80 return ret;
100} 81}
101 82
83static void pca_isa_resetchip(void *pd)
84{
85 /* apparently only an external reset will do it. not a lot can be done */
86 printk(KERN_WARNING DRIVER ": Haven't figured out how to do a reset yet\n");
87}
88
102static irqreturn_t pca_handler(int this_irq, void *dev_id) { 89static irqreturn_t pca_handler(int this_irq, void *dev_id) {
103 wake_up_interruptible(&pca_wait); 90 wake_up_interruptible(&pca_wait);
104 return IRQ_HANDLED; 91 return IRQ_HANDLED;
105} 92}
106 93
107static struct i2c_algo_pca_data pca_isa_data = { 94static struct i2c_algo_pca_data pca_isa_data = {
108 .get_own = pca_isa_getown, 95 /* .data intentionally left NULL, not needed with ISA */
109 .get_clock = pca_isa_getclock,
110 .write_byte = pca_isa_writebyte, 96 .write_byte = pca_isa_writebyte,
111 .read_byte = pca_isa_readbyte, 97 .read_byte = pca_isa_readbyte,
112 .wait_for_interrupt = pca_isa_waitforinterrupt, 98 .wait_for_completion = pca_isa_waitforcompletion,
99 .reset_chip = pca_isa_resetchip,
113}; 100};
114 101
115static struct i2c_adapter pca_isa_ops = { 102static struct i2c_adapter pca_isa_ops = {
@@ -117,6 +104,7 @@ static struct i2c_adapter pca_isa_ops = {
117 .id = I2C_HW_A_ISA, 104 .id = I2C_HW_A_ISA,
118 .algo_data = &pca_isa_data, 105 .algo_data = &pca_isa_data,
119 .name = "PCA9564 ISA Adapter", 106 .name = "PCA9564 ISA Adapter",
107 .timeout = 100,
120}; 108};
121 109
122static int __devinit pca_isa_probe(struct device *dev, unsigned int id) 110static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
@@ -144,6 +132,7 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
144 } 132 }
145 } 133 }
146 134
135 pca_isa_data.i2c_clock = clock;
147 if (i2c_pca_add_bus(&pca_isa_ops) < 0) { 136 if (i2c_pca_add_bus(&pca_isa_ops) < 0) {
148 dev_err(dev, "Failed to add i2c bus\n"); 137 dev_err(dev, "Failed to add i2c bus\n");
149 goto out_irq; 138 goto out_irq;
@@ -178,7 +167,7 @@ static struct isa_driver pca_isa_driver = {
178 .remove = __devexit_p(pca_isa_remove), 167 .remove = __devexit_p(pca_isa_remove),
179 .driver = { 168 .driver = {
180 .owner = THIS_MODULE, 169 .owner = THIS_MODULE,
181 .name = "i2c-pca-isa", 170 .name = DRIVER,
182 } 171 }
183}; 172};
184 173
@@ -204,7 +193,5 @@ MODULE_PARM_DESC(irq, "IRQ");
204module_param(clock, int, 0); 193module_param(clock, int, 0);
205MODULE_PARM_DESC(clock, "Clock rate as described in table 1 of PCA9564 datasheet"); 194MODULE_PARM_DESC(clock, "Clock rate as described in table 1 of PCA9564 datasheet");
206 195
207module_param(own, int, 0); /* the driver can't do slave mode, so there's no real point in this */
208
209module_init(pca_isa_init); 196module_init(pca_isa_init);
210module_exit(pca_isa_exit); 197module_exit(pca_isa_exit);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
new file mode 100644
index 000000000000..9d75f51e8f0e
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -0,0 +1,298 @@
1/*
2 * i2c_pca_platform.c
3 *
4 * Platform driver for the PCA9564 I2C controller.
5 *
6 * Copyright (C) 2008 Pengutronix
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11
12 */
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/errno.h>
19#include <linux/i2c.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/i2c-algo-pca.h>
23#include <linux/i2c-pca-platform.h>
24#include <linux/gpio.h>
25
26#include <asm/irq.h>
27#include <asm/io.h>
28
29#define res_len(r) ((r)->end - (r)->start + 1)
30
31struct i2c_pca_pf_data {
32 void __iomem *reg_base;
33 int irq; /* if 0, use polling */
34 int gpio;
35 wait_queue_head_t wait;
36 struct i2c_adapter adap;
37 struct i2c_algo_pca_data algo_data;
38 unsigned long io_base;
39 unsigned long io_size;
40};
41
42/* Read/Write functions for different register alignments */
43
44static int i2c_pca_pf_readbyte8(void *pd, int reg)
45{
46 struct i2c_pca_pf_data *i2c = pd;
47 return ioread8(i2c->reg_base + reg);
48}
49
50static int i2c_pca_pf_readbyte16(void *pd, int reg)
51{
52 struct i2c_pca_pf_data *i2c = pd;
53 return ioread8(i2c->reg_base + reg * 2);
54}
55
56static int i2c_pca_pf_readbyte32(void *pd, int reg)
57{
58 struct i2c_pca_pf_data *i2c = pd;
59 return ioread8(i2c->reg_base + reg * 4);
60}
61
62static void i2c_pca_pf_writebyte8(void *pd, int reg, int val)
63{
64 struct i2c_pca_pf_data *i2c = pd;
65 iowrite8(val, i2c->reg_base + reg);
66}
67
68static void i2c_pca_pf_writebyte16(void *pd, int reg, int val)
69{
70 struct i2c_pca_pf_data *i2c = pd;
71 iowrite8(val, i2c->reg_base + reg * 2);
72}
73
74static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
75{
76 struct i2c_pca_pf_data *i2c = pd;
77 iowrite8(val, i2c->reg_base + reg * 4);
78}
79
80
81static int i2c_pca_pf_waitforcompletion(void *pd)
82{
83 struct i2c_pca_pf_data *i2c = pd;
84 int ret = 0;
85
86 if (i2c->irq) {
87 ret = wait_event_interruptible(i2c->wait,
88 i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
89 & I2C_PCA_CON_SI);
90 } else {
91 /*
92 * Do polling...
93 * XXX: Could get stuck in extreme cases!
94 * Maybe add timeout, but using irqs is preferred anyhow.
95 */
96 while ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
97 & I2C_PCA_CON_SI) == 0)
98 udelay(100);
99 }
100
101 return ret;
102}
103
104static void i2c_pca_pf_dummyreset(void *pd)
105{
106 struct i2c_pca_pf_data *i2c = pd;
107 printk(KERN_WARNING "%s: No reset-pin found. Chip may get stuck!\n",
108 i2c->adap.name);
109}
110
111static void i2c_pca_pf_resetchip(void *pd)
112{
113 struct i2c_pca_pf_data *i2c = pd;
114
115 gpio_set_value(i2c->gpio, 0);
116 ndelay(100);
117 gpio_set_value(i2c->gpio, 1);
118}
119
120static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
121{
122 struct i2c_pca_pf_data *i2c = dev_id;
123
124 if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
125 return IRQ_NONE;
126
127 wake_up_interruptible(&i2c->wait);
128
129 return IRQ_HANDLED;
130}
131
132
133static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
134{
135 struct i2c_pca_pf_data *i2c;
136 struct resource *res;
137 struct i2c_pca9564_pf_platform_data *platform_data =
138 pdev->dev.platform_data;
139 int ret = 0;
140 int irq;
141
142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
143 irq = platform_get_irq(pdev, 0);
144 /* If irq is 0, we do polling. */
145
146 if (res == NULL) {
147 ret = -ENODEV;
148 goto e_print;
149 }
150
151 if (!request_mem_region(res->start, res_len(res), res->name)) {
152 ret = -ENOMEM;
153 goto e_print;
154 }
155
156 i2c = kzalloc(sizeof(struct i2c_pca_pf_data), GFP_KERNEL);
157 if (!i2c) {
158 ret = -ENOMEM;
159 goto e_alloc;
160 }
161
162 init_waitqueue_head(&i2c->wait);
163
164 i2c->reg_base = ioremap(res->start, res_len(res));
165 if (!i2c->reg_base) {
166 ret = -EIO;
167 goto e_remap;
168 }
169 i2c->io_base = res->start;
170 i2c->io_size = res_len(res);
171 i2c->irq = irq;
172
173 i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
174 i2c->adap.owner = THIS_MODULE;
175 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "PCA9564 at 0x%08lx",
176 (unsigned long) res->start);
177 i2c->adap.algo_data = &i2c->algo_data;
178 i2c->adap.dev.parent = &pdev->dev;
179 i2c->adap.timeout = platform_data->timeout;
180
181 i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed;
182 i2c->algo_data.data = i2c;
183
184 switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
185 case IORESOURCE_MEM_32BIT:
186 i2c->algo_data.write_byte = i2c_pca_pf_writebyte32;
187 i2c->algo_data.read_byte = i2c_pca_pf_readbyte32;
188 break;
189 case IORESOURCE_MEM_16BIT:
190 i2c->algo_data.write_byte = i2c_pca_pf_writebyte16;
191 i2c->algo_data.read_byte = i2c_pca_pf_readbyte16;
192 break;
193 case IORESOURCE_MEM_8BIT:
194 default:
195 i2c->algo_data.write_byte = i2c_pca_pf_writebyte8;
196 i2c->algo_data.read_byte = i2c_pca_pf_readbyte8;
197 break;
198 }
199
200 i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion;
201
202 i2c->gpio = platform_data->gpio;
203 i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset;
204
205 /* Use gpio_is_valid() when in mainline */
206 if (i2c->gpio > -1) {
207 ret = gpio_request(i2c->gpio, i2c->adap.name);
208 if (ret == 0) {
209 gpio_direction_output(i2c->gpio, 1);
210 i2c->algo_data.reset_chip = i2c_pca_pf_resetchip;
211 } else {
212 printk(KERN_WARNING "%s: Registering gpio failed!\n",
213 i2c->adap.name);
214 i2c->gpio = ret;
215 }
216 }
217
218 if (irq) {
219 ret = request_irq(irq, i2c_pca_pf_handler,
220 IRQF_TRIGGER_FALLING, i2c->adap.name, i2c);
221 if (ret)
222 goto e_reqirq;
223 }
224
225 if (i2c_pca_add_numbered_bus(&i2c->adap) < 0) {
226 ret = -ENODEV;
227 goto e_adapt;
228 }
229
230 platform_set_drvdata(pdev, i2c);
231
232 printk(KERN_INFO "%s registered.\n", i2c->adap.name);
233
234 return 0;
235
236e_adapt:
237 if (irq)
238 free_irq(irq, i2c);
239e_reqirq:
240 if (i2c->gpio > -1)
241 gpio_free(i2c->gpio);
242
243 iounmap(i2c->reg_base);
244e_remap:
245 kfree(i2c);
246e_alloc:
247 release_mem_region(res->start, res_len(res));
248e_print:
249 printk(KERN_ERR "Registering PCA9564 FAILED! (%d)\n", ret);
250 return ret;
251}
252
253static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
254{
255 struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
256 platform_set_drvdata(pdev, NULL);
257
258 i2c_del_adapter(&i2c->adap);
259
260 if (i2c->irq)
261 free_irq(i2c->irq, i2c);
262
263 if (i2c->gpio > -1)
264 gpio_free(i2c->gpio);
265
266 iounmap(i2c->reg_base);
267 release_mem_region(i2c->io_base, i2c->io_size);
268 kfree(i2c);
269
270 return 0;
271}
272
273static struct platform_driver i2c_pca_pf_driver = {
274 .probe = i2c_pca_pf_probe,
275 .remove = __devexit_p(i2c_pca_pf_remove),
276 .driver = {
277 .name = "i2c-pca-platform",
278 .owner = THIS_MODULE,
279 },
280};
281
282static int __init i2c_pca_pf_init(void)
283{
284 return platform_driver_register(&i2c_pca_pf_driver);
285}
286
287static void __exit i2c_pca_pf_exit(void)
288{
289 platform_driver_unregister(&i2c_pca_pf_driver);
290}
291
292MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
293MODULE_DESCRIPTION("I2C-PCA9564 platform driver");
294MODULE_LICENSE("GPL");
295
296module_init(i2c_pca_pf_init);
297module_exit(i2c_pca_pf_exit);
298
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index b03af5653c65..63b3e2c11cff 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -467,7 +467,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
467 (cmd->read_len == 0 || cmd->write_len == 0))) { 467 (cmd->read_len == 0 || cmd->write_len == 0))) {
468 dev_err(&pmcmsptwi_adapter.dev, 468 dev_err(&pmcmsptwi_adapter.dev,
469 "%s: Cannot transfer less than 1 byte\n", 469 "%s: Cannot transfer less than 1 byte\n",
470 __FUNCTION__); 470 __func__);
471 return -EINVAL; 471 return -EINVAL;
472 } 472 }
473 473
@@ -475,7 +475,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
475 cmd->write_len > MSP_MAX_BYTES_PER_RW) { 475 cmd->write_len > MSP_MAX_BYTES_PER_RW) {
476 dev_err(&pmcmsptwi_adapter.dev, 476 dev_err(&pmcmsptwi_adapter.dev,
477 "%s: Cannot transfer more than %d bytes\n", 477 "%s: Cannot transfer more than %d bytes\n",
478 __FUNCTION__, MSP_MAX_BYTES_PER_RW); 478 __func__, MSP_MAX_BYTES_PER_RW);
479 return -EINVAL; 479 return -EINVAL;
480 } 480 }
481 481
@@ -627,6 +627,9 @@ static struct i2c_adapter pmcmsptwi_adapter = {
627 .name = DRV_NAME, 627 .name = DRV_NAME,
628}; 628};
629 629
630/* work with hotplug and coldplug */
631MODULE_ALIAS("platform:" DRV_NAME);
632
630static struct platform_driver pmcmsptwi_driver = { 633static struct platform_driver pmcmsptwi_driver = {
631 .probe = pmcmsptwi_probe, 634 .probe = pmcmsptwi_probe,
632 .remove = __devexit_p(pmcmsptwi_remove), 635 .remove = __devexit_p(pmcmsptwi_remove),
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index f8d0dff0de7e..1ca21084ffcf 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -76,7 +76,7 @@ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap)
76{ 76{
77 struct i2c_pnx_algo_data *alg_data = adap->algo_data; 77 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
78 78
79 dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __FUNCTION__, 79 dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __func__,
80 slave_addr, alg_data->mif.mode); 80 slave_addr, alg_data->mif.mode);
81 81
82 /* Check for 7 bit slave addresses only */ 82 /* Check for 7 bit slave addresses only */
@@ -110,14 +110,14 @@ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap)
110 iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi, 110 iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi,
111 I2C_REG_STS(alg_data)); 111 I2C_REG_STS(alg_data));
112 112
113 dev_dbg(&adap->dev, "%s(): sending %#x\n", __FUNCTION__, 113 dev_dbg(&adap->dev, "%s(): sending %#x\n", __func__,
114 (slave_addr << 1) | start_bit | alg_data->mif.mode); 114 (slave_addr << 1) | start_bit | alg_data->mif.mode);
115 115
116 /* Write the slave address, START bit and R/W bit */ 116 /* Write the slave address, START bit and R/W bit */
117 iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode, 117 iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode,
118 I2C_REG_TX(alg_data)); 118 I2C_REG_TX(alg_data));
119 119
120 dev_dbg(&adap->dev, "%s(): exit\n", __FUNCTION__); 120 dev_dbg(&adap->dev, "%s(): exit\n", __func__);
121 121
122 return 0; 122 return 0;
123} 123}
@@ -135,7 +135,7 @@ static void i2c_pnx_stop(struct i2c_adapter *adap)
135 long timeout = 1000; 135 long timeout = 1000;
136 136
137 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", 137 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
138 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 138 __func__, ioread32(I2C_REG_STS(alg_data)));
139 139
140 /* Write a STOP bit to TX FIFO */ 140 /* Write a STOP bit to TX FIFO */
141 iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data)); 141 iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data));
@@ -149,7 +149,7 @@ static void i2c_pnx_stop(struct i2c_adapter *adap)
149 } 149 }
150 150
151 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", 151 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
152 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 152 __func__, ioread32(I2C_REG_STS(alg_data)));
153} 153}
154 154
155/** 155/**
@@ -164,7 +164,7 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
164 u32 val; 164 u32 val;
165 165
166 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", 166 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
167 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 167 __func__, ioread32(I2C_REG_STS(alg_data)));
168 168
169 if (alg_data->mif.len > 0) { 169 if (alg_data->mif.len > 0) {
170 /* We still have something to talk about... */ 170 /* We still have something to talk about... */
@@ -179,7 +179,7 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
179 alg_data->mif.len--; 179 alg_data->mif.len--;
180 iowrite32(val, I2C_REG_TX(alg_data)); 180 iowrite32(val, I2C_REG_TX(alg_data));
181 181
182 dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __FUNCTION__, 182 dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __func__,
183 val, alg_data->mif.len + 1); 183 val, alg_data->mif.len + 1);
184 184
185 if (alg_data->mif.len == 0) { 185 if (alg_data->mif.len == 0) {
@@ -197,7 +197,7 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
197 del_timer_sync(&alg_data->mif.timer); 197 del_timer_sync(&alg_data->mif.timer);
198 198
199 dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n", 199 dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n",
200 __FUNCTION__); 200 __func__);
201 201
202 complete(&alg_data->mif.complete); 202 complete(&alg_data->mif.complete);
203 } 203 }
@@ -213,13 +213,13 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
213 /* Stop timer. */ 213 /* Stop timer. */
214 del_timer_sync(&alg_data->mif.timer); 214 del_timer_sync(&alg_data->mif.timer);
215 dev_dbg(&adap->dev, "%s(): Waking up xfer routine after " 215 dev_dbg(&adap->dev, "%s(): Waking up xfer routine after "
216 "zero-xfer.\n", __FUNCTION__); 216 "zero-xfer.\n", __func__);
217 217
218 complete(&alg_data->mif.complete); 218 complete(&alg_data->mif.complete);
219 } 219 }
220 220
221 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", 221 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
222 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 222 __func__, ioread32(I2C_REG_STS(alg_data)));
223 223
224 return 0; 224 return 0;
225} 225}
@@ -237,14 +237,14 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
237 u32 ctl = 0; 237 u32 ctl = 0;
238 238
239 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", 239 dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
240 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 240 __func__, ioread32(I2C_REG_STS(alg_data)));
241 241
242 /* Check, whether there is already data, 242 /* Check, whether there is already data,
243 * or we didn't 'ask' for it yet. 243 * or we didn't 'ask' for it yet.
244 */ 244 */
245 if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { 245 if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
246 dev_dbg(&adap->dev, "%s(): Write dummy data to fill " 246 dev_dbg(&adap->dev, "%s(): Write dummy data to fill "
247 "Rx-fifo...\n", __FUNCTION__); 247 "Rx-fifo...\n", __func__);
248 248
249 if (alg_data->mif.len == 1) { 249 if (alg_data->mif.len == 1) {
250 /* Last byte, do not acknowledge next rcv. */ 250 /* Last byte, do not acknowledge next rcv. */
@@ -276,7 +276,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
276 if (alg_data->mif.len > 0) { 276 if (alg_data->mif.len > 0) {
277 val = ioread32(I2C_REG_RX(alg_data)); 277 val = ioread32(I2C_REG_RX(alg_data));
278 *alg_data->mif.buf++ = (u8) (val & 0xff); 278 *alg_data->mif.buf++ = (u8) (val & 0xff);
279 dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __FUNCTION__, val, 279 dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __func__, val,
280 alg_data->mif.len); 280 alg_data->mif.len);
281 281
282 alg_data->mif.len--; 282 alg_data->mif.len--;
@@ -300,7 +300,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
300 } 300 }
301 301
302 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", 302 dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
303 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 303 __func__, ioread32(I2C_REG_STS(alg_data)));
304 304
305 return 0; 305 return 0;
306} 306}
@@ -312,7 +312,7 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
312 struct i2c_pnx_algo_data *alg_data = adap->algo_data; 312 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
313 313
314 dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n", 314 dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n",
315 __FUNCTION__, 315 __func__,
316 ioread32(I2C_REG_STS(alg_data)), 316 ioread32(I2C_REG_STS(alg_data)),
317 ioread32(I2C_REG_CTL(alg_data)), 317 ioread32(I2C_REG_CTL(alg_data)),
318 alg_data->mif.mode); 318 alg_data->mif.mode);
@@ -336,7 +336,7 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
336 /* Slave did not acknowledge, generate a STOP */ 336 /* Slave did not acknowledge, generate a STOP */
337 dev_dbg(&adap->dev, "%s(): " 337 dev_dbg(&adap->dev, "%s(): "
338 "Slave did not acknowledge, generating a STOP.\n", 338 "Slave did not acknowledge, generating a STOP.\n",
339 __FUNCTION__); 339 __func__);
340 i2c_pnx_stop(adap); 340 i2c_pnx_stop(adap);
341 341
342 /* Disable master interrupts. */ 342 /* Disable master interrupts. */
@@ -375,7 +375,7 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
375 iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); 375 iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data));
376 376
377 dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n", 377 dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n",
378 __FUNCTION__, ioread32(I2C_REG_STS(alg_data)), 378 __func__, ioread32(I2C_REG_STS(alg_data)),
379 ioread32(I2C_REG_CTL(alg_data))); 379 ioread32(I2C_REG_CTL(alg_data)));
380 380
381 return IRQ_HANDLED; 381 return IRQ_HANDLED;
@@ -447,7 +447,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
447 u32 stat = ioread32(I2C_REG_STS(alg_data)); 447 u32 stat = ioread32(I2C_REG_STS(alg_data));
448 448
449 dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n", 449 dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n",
450 __FUNCTION__, num, ioread32(I2C_REG_STS(alg_data))); 450 __func__, num, ioread32(I2C_REG_STS(alg_data)));
451 451
452 bus_reset_if_active(adap); 452 bus_reset_if_active(adap);
453 453
@@ -473,7 +473,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
473 alg_data->mif.ret = 0; 473 alg_data->mif.ret = 0;
474 alg_data->last = (i == num - 1); 474 alg_data->last = (i == num - 1);
475 475
476 dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __FUNCTION__, 476 dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __func__,
477 alg_data->mif.mode, 477 alg_data->mif.mode,
478 alg_data->mif.len); 478 alg_data->mif.len);
479 479
@@ -498,7 +498,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
498 if (!(rc = alg_data->mif.ret)) 498 if (!(rc = alg_data->mif.ret))
499 completed++; 499 completed++;
500 dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n", 500 dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n",
501 __FUNCTION__, rc); 501 __func__, rc);
502 502
503 /* Clear TDI and AFI bits in case they are set. */ 503 /* Clear TDI and AFI bits in case they are set. */
504 if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) { 504 if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
@@ -522,7 +522,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
522 alg_data->mif.len = 0; 522 alg_data->mif.len = 0;
523 523
524 dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n", 524 dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n",
525 __FUNCTION__, ioread32(I2C_REG_STS(alg_data))); 525 __func__, ioread32(I2C_REG_STS(alg_data)));
526 526
527 if (completed != num) 527 if (completed != num)
528 return ((rc < 0) ? rc : -EREMOTEIO); 528 return ((rc < 0) ? rc : -EREMOTEIO);
@@ -563,7 +563,7 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
563 563
564 if (!i2c_pnx || !i2c_pnx->adapter) { 564 if (!i2c_pnx || !i2c_pnx->adapter) {
565 dev_err(&pdev->dev, "%s: no platform data supplied\n", 565 dev_err(&pdev->dev, "%s: no platform data supplied\n",
566 __FUNCTION__); 566 __func__);
567 ret = -EINVAL; 567 ret = -EINVAL;
568 goto out; 568 goto out;
569 } 569 }
@@ -697,6 +697,7 @@ static void __exit i2c_adap_pnx_exit(void)
697MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>"); 697MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
698MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); 698MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
699MODULE_LICENSE("GPL"); 699MODULE_LICENSE("GPL");
700MODULE_ALIAS("platform:pnx-i2c");
700 701
701/* We need to make sure I2C is initialized before USB */ 702/* We need to make sure I2C is initialized before USB */
702subsys_initcall(i2c_adap_pnx_init); 703subsys_initcall(i2c_adap_pnx_init);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 7813127649a1..22f6d5c00d80 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -263,6 +263,9 @@ static int __devexit i2c_powermac_probe(struct platform_device *dev)
263} 263}
264 264
265 265
266/* work with hotplug and coldplug */
267MODULE_ALIAS("platform:i2c-powermac");
268
266static struct platform_driver i2c_powermac_driver = { 269static struct platform_driver i2c_powermac_driver = {
267 .probe = i2c_powermac_probe, 270 .probe = i2c_powermac_probe,
268 .remove = __devexit_p(i2c_powermac_remove), 271 .remove = __devexit_p(i2c_powermac_remove),
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 6fd2d6a84eff..eb69fbadc9cb 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -155,7 +155,7 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname)
155 readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c))); 155 readl(_ISR(i2c)), readl(_ICR(i2c)), readl(_IBMR(i2c)));
156} 156}
157 157
158#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __FUNCTION__) 158#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __func__)
159#else 159#else
160#define i2c_debug 0 160#define i2c_debug 0
161 161
@@ -1132,6 +1132,7 @@ static void __exit i2c_adap_pxa_exit(void)
1132} 1132}
1133 1133
1134MODULE_LICENSE("GPL"); 1134MODULE_LICENSE("GPL");
1135MODULE_ALIAS("platform:pxa2xx-i2c");
1135 1136
1136module_init(i2c_adap_pxa_init); 1137module_init(i2c_adap_pxa_init);
1137module_exit(i2c_adap_pxa_exit); 1138module_exit(i2c_adap_pxa_exit);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c44ada5f4292..1305ef190fc1 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -276,12 +276,12 @@ static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
276 switch (i2c->state) { 276 switch (i2c->state) {
277 277
278 case STATE_IDLE: 278 case STATE_IDLE:
279 dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __FUNCTION__); 279 dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
280 goto out; 280 goto out;
281 break; 281 break;
282 282
283 case STATE_STOP: 283 case STATE_STOP:
284 dev_err(i2c->dev, "%s: called in STATE_STOP\n", __FUNCTION__); 284 dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
285 s3c24xx_i2c_disable_irq(i2c); 285 s3c24xx_i2c_disable_irq(i2c);
286 goto out_ack; 286 goto out_ack;
287 287
@@ -948,3 +948,4 @@ module_exit(i2c_adap_s3c_exit);
948MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); 948MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
949MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); 949MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
950MODULE_LICENSE("GPL"); 950MODULE_LICENSE("GPL");
951MODULE_ALIAS("platform:s3c2410-i2c");
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
new file mode 100644
index 000000000000..5e0e254976de
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -0,0 +1,577 @@
1/*
2 * I2C bus driver for the SH7760 I2C Interfaces.
3 *
4 * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
5 *
6 * licensed under the terms outlined in the file COPYING.
7 *
8 */
9
10#include <linux/completion.h>
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/i2c.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include <asm/clock.h>
21#include <asm/i2c-sh7760.h>
22#include <asm/io.h>
23
24/* register offsets */
25#define I2CSCR 0x0 /* slave ctrl */
26#define I2CMCR 0x4 /* master ctrl */
27#define I2CSSR 0x8 /* slave status */
28#define I2CMSR 0xC /* master status */
29#define I2CSIER 0x10 /* slave irq enable */
30#define I2CMIER 0x14 /* master irq enable */
31#define I2CCCR 0x18 /* clock dividers */
32#define I2CSAR 0x1c /* slave address */
33#define I2CMAR 0x20 /* master address */
34#define I2CRXTX 0x24 /* data port */
35#define I2CFCR 0x28 /* fifo control */
36#define I2CFSR 0x2C /* fifo status */
37#define I2CFIER 0x30 /* fifo irq enable */
38#define I2CRFDR 0x34 /* rx fifo count */
39#define I2CTFDR 0x38 /* tx fifo count */
40
41#define REGSIZE 0x3C
42
43#define MCR_MDBS 0x80 /* non-fifo mode switch */
44#define MCR_FSCL 0x40 /* override SCL pin */
45#define MCR_FSDA 0x20 /* override SDA pin */
46#define MCR_OBPC 0x10 /* override pins */
47#define MCR_MIE 0x08 /* master if enable */
48#define MCR_TSBE 0x04
49#define MCR_FSB 0x02 /* force stop bit */
50#define MCR_ESG 0x01 /* en startbit gen. */
51
52#define MSR_MNR 0x40 /* nack received */
53#define MSR_MAL 0x20 /* arbitration lost */
54#define MSR_MST 0x10 /* sent a stop */
55#define MSR_MDE 0x08
56#define MSR_MDT 0x04
57#define MSR_MDR 0x02
58#define MSR_MAT 0x01 /* slave addr xfer done */
59
60#define MIE_MNRE 0x40 /* nack irq en */
61#define MIE_MALE 0x20 /* arblos irq en */
62#define MIE_MSTE 0x10 /* stop irq en */
63#define MIE_MDEE 0x08
64#define MIE_MDTE 0x04
65#define MIE_MDRE 0x02
66#define MIE_MATE 0x01 /* address sent irq en */
67
68#define FCR_RFRST 0x02 /* reset rx fifo */
69#define FCR_TFRST 0x01 /* reset tx fifo */
70
71#define FSR_TEND 0x04 /* last byte sent */
72#define FSR_RDF 0x02 /* rx fifo trigger */
73#define FSR_TDFE 0x01 /* tx fifo empty */
74
75#define FIER_TEIE 0x04 /* tx fifo empty irq en */
76#define FIER_RXIE 0x02 /* rx fifo trig irq en */
77#define FIER_TXIE 0x01 /* tx fifo trig irq en */
78
79#define FIFO_SIZE 16
80
81struct cami2c {
82 void __iomem *iobase;
83 struct i2c_adapter adap;
84
85 /* message processing */
86 struct i2c_msg *msg;
87#define IDF_SEND 1
88#define IDF_RECV 2
89#define IDF_STOP 4
90 int flags;
91
92#define IDS_DONE 1
93#define IDS_ARBLOST 2
94#define IDS_NACK 4
95 int status;
96 struct completion xfer_done;
97
98 int irq;
99 struct resource *ioarea;
100};
101
102static inline void OUT32(struct cami2c *cam, int reg, unsigned long val)
103{
104 ctrl_outl(val, (unsigned long)cam->iobase + reg);
105}
106
107static inline unsigned long IN32(struct cami2c *cam, int reg)
108{
109 return ctrl_inl((unsigned long)cam->iobase + reg);
110}
111
112static irqreturn_t sh7760_i2c_irq(int irq, void *ptr)
113{
114 struct cami2c *id = ptr;
115 struct i2c_msg *msg = id->msg;
116 char *data = msg->buf;
117 unsigned long msr, fsr, fier, len;
118
119 msr = IN32(id, I2CMSR);
120 fsr = IN32(id, I2CFSR);
121
122 /* arbitration lost */
123 if (msr & MSR_MAL) {
124 OUT32(id, I2CMCR, 0);
125 OUT32(id, I2CSCR, 0);
126 OUT32(id, I2CSAR, 0);
127 id->status |= IDS_DONE | IDS_ARBLOST;
128 goto out;
129 }
130
131 if (msr & MSR_MNR) {
132 /* NACK handling is very screwed up. After receiving a
133 * NAK IRQ one has to wait a bit before writing to any
134 * registers, or the ctl will lock up. After that delay
135 * do a normal i2c stop. Then wait at least 1 ms before
136 * attempting another transfer or ctl will stop working
137 */
138 udelay(100); /* wait or risk ctl hang */
139 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
140 OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
141 OUT32(id, I2CFIER, 0);
142 OUT32(id, I2CMIER, MIE_MSTE);
143 OUT32(id, I2CSCR, 0);
144 OUT32(id, I2CSAR, 0);
145 id->status |= IDS_NACK;
146 msr &= ~MSR_MAT;
147 fsr = 0;
148 /* In some cases the MST bit is also set. */
149 }
150
151 /* i2c-stop was sent */
152 if (msr & MSR_MST) {
153 id->status |= IDS_DONE;
154 goto out;
155 }
156
157 /* i2c slave addr was sent; set to "normal" operation */
158 if (msr & MSR_MAT)
159 OUT32(id, I2CMCR, MCR_MIE);
160
161 fier = IN32(id, I2CFIER);
162
163 if (fsr & FSR_RDF) {
164 len = IN32(id, I2CRFDR);
165 if (msg->len <= len) {
166 if (id->flags & IDF_STOP) {
167 OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
168 OUT32(id, I2CFIER, 0);
169 /* manual says: wait >= 0.5 SCL times */
170 udelay(5);
171 /* next int should be MST */
172 } else {
173 id->status |= IDS_DONE;
174 /* keep the RDF bit: ctrl holds SCL low
175 * until the setup for the next i2c_msg
176 * clears this bit.
177 */
178 fsr &= ~FSR_RDF;
179 }
180 }
181 while (msg->len && len) {
182 *data++ = IN32(id, I2CRXTX);
183 msg->len--;
184 len--;
185 }
186
187 if (msg->len) {
188 len = (msg->len >= FIFO_SIZE) ? FIFO_SIZE - 1
189 : msg->len - 1;
190
191 OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xf) << 4));
192 }
193
194 } else if (id->flags & IDF_SEND) {
195 if ((fsr & FSR_TEND) && (msg->len < 1)) {
196 if (id->flags & IDF_STOP) {
197 OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
198 } else {
199 id->status |= IDS_DONE;
200 /* keep the TEND bit: ctl holds SCL low
201 * until the setup for the next i2c_msg
202 * clears this bit.
203 */
204 fsr &= ~FSR_TEND;
205 }
206 }
207 if (fsr & FSR_TDFE) {
208 while (msg->len && (IN32(id, I2CTFDR) < FIFO_SIZE)) {
209 OUT32(id, I2CRXTX, *data++);
210 msg->len--;
211 }
212
213 if (msg->len < 1) {
214 fier &= ~FIER_TXIE;
215 OUT32(id, I2CFIER, fier);
216 } else {
217 len = (msg->len >= FIFO_SIZE) ? 2 : 0;
218 OUT32(id, I2CFCR,
219 FCR_RFRST | ((len & 3) << 2));
220 }
221 }
222 }
223out:
224 if (id->status & IDS_DONE) {
225 OUT32(id, I2CMIER, 0);
226 OUT32(id, I2CFIER, 0);
227 id->msg = NULL;
228 complete(&id->xfer_done);
229 }
230 /* clear status flags and ctrl resumes work */
231 OUT32(id, I2CMSR, ~msr);
232 OUT32(id, I2CFSR, ~fsr);
233 OUT32(id, I2CSSR, 0);
234
235 return IRQ_HANDLED;
236}
237
238
239/* prepare and start a master receive operation */
240static void sh7760_i2c_mrecv(struct cami2c *id)
241{
242 int len;
243
244 id->flags |= IDF_RECV;
245
246 /* set the slave addr reg; otherwise rcv wont work! */
247 OUT32(id, I2CSAR, 0xfe);
248 OUT32(id, I2CMAR, (id->msg->addr << 1) | 1);
249
250 /* adjust rx fifo trigger */
251 if (id->msg->len >= FIFO_SIZE)
252 len = FIFO_SIZE - 1; /* trigger at fifo full */
253 else
254 len = id->msg->len - 1; /* trigger before all received */
255
256 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
257 OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xF) << 4));
258
259 OUT32(id, I2CMSR, 0);
260 OUT32(id, I2CMCR, MCR_MIE | MCR_ESG);
261 OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE);
262 OUT32(id, I2CFIER, FIER_RXIE);
263}
264
265/* prepare and start a master send operation */
266static void sh7760_i2c_msend(struct cami2c *id)
267{
268 int len;
269
270 id->flags |= IDF_SEND;
271
272 /* set the slave addr reg; otherwise xmit wont work! */
273 OUT32(id, I2CSAR, 0xfe);
274 OUT32(id, I2CMAR, (id->msg->addr << 1) | 0);
275
276 /* adjust tx fifo trigger */
277 if (id->msg->len >= FIFO_SIZE)
278 len = 2; /* trig: 2 bytes left in TX fifo */
279 else
280 len = 0; /* trig: 8 bytes left in TX fifo */
281
282 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
283 OUT32(id, I2CFCR, FCR_RFRST | ((len & 3) << 2));
284
285 while (id->msg->len && IN32(id, I2CTFDR) < FIFO_SIZE) {
286 OUT32(id, I2CRXTX, *(id->msg->buf));
287 (id->msg->len)--;
288 (id->msg->buf)++;
289 }
290
291 OUT32(id, I2CMSR, 0);
292 OUT32(id, I2CMCR, MCR_MIE | MCR_ESG);
293 OUT32(id, I2CFSR, 0);
294 OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE);
295 OUT32(id, I2CFIER, FIER_TEIE | (id->msg->len ? FIER_TXIE : 0));
296}
297
298static inline int sh7760_i2c_busy_check(struct cami2c *id)
299{
300 return (IN32(id, I2CMCR) & MCR_FSDA);
301}
302
303static int sh7760_i2c_master_xfer(struct i2c_adapter *adap,
304 struct i2c_msg *msgs,
305 int num)
306{
307 struct cami2c *id = adap->algo_data;
308 int i, retr;
309
310 if (sh7760_i2c_busy_check(id)) {
311 dev_err(&adap->dev, "sh7760-i2c%d: bus busy!\n", adap->nr);
312 return -EBUSY;
313 }
314
315 i = 0;
316 while (i < num) {
317 retr = adap->retries;
318retry:
319 id->flags = ((i == (num-1)) ? IDF_STOP : 0);
320 id->status = 0;
321 id->msg = msgs;
322 init_completion(&id->xfer_done);
323
324 if (msgs->flags & I2C_M_RD)
325 sh7760_i2c_mrecv(id);
326 else
327 sh7760_i2c_msend(id);
328
329 wait_for_completion(&id->xfer_done);
330
331 if (id->status == 0) {
332 num = -EIO;
333 break;
334 }
335
336 if (id->status & IDS_NACK) {
337 /* wait a bit or i2c module stops working */
338 mdelay(1);
339 num = -EREMOTEIO;
340 break;
341 }
342
343 if (id->status & IDS_ARBLOST) {
344 if (retr--) {
345 mdelay(2);
346 goto retry;
347 }
348 num = -EREMOTEIO;
349 break;
350 }
351
352 msgs++;
353 i++;
354 }
355
356 id->msg = NULL;
357 id->flags = 0;
358 id->status = 0;
359
360 OUT32(id, I2CMCR, 0);
361 OUT32(id, I2CMSR, 0);
362 OUT32(id, I2CMIER, 0);
363 OUT32(id, I2CFIER, 0);
364
365 /* reset slave module registers too: master mode enables slave
366 * module for receive ops (ack, data). Without this reset,
367 * eternal bus activity might be reported after NACK / ARBLOST.
368 */
369 OUT32(id, I2CSCR, 0);
370 OUT32(id, I2CSAR, 0);
371 OUT32(id, I2CSSR, 0);
372
373 return num;
374}
375
376static u32 sh7760_i2c_func(struct i2c_adapter *adap)
377{
378 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
379}
380
381static const struct i2c_algorithm sh7760_i2c_algo = {
382 .master_xfer = sh7760_i2c_master_xfer,
383 .functionality = sh7760_i2c_func,
384};
385
386/* calculate CCR register setting for a desired scl clock. SCL clock is
387 * derived from I2C module clock (iclk) which in turn is derived from
388 * peripheral module clock (mclk, usually around 33MHz):
389 * iclk = mclk/(CDF + 1). iclk must be < 20MHz.
390 * scl = iclk/(SCGD*8 + 20).
391 */
392static int __devinit calc_CCR(unsigned long scl_hz)
393{
394 struct clk *mclk;
395 unsigned long mck, m1, dff, odff, iclk;
396 signed char cdf, cdfm;
397 int scgd, scgdm, scgds;
398
399 mclk = clk_get(NULL, "module_clk");
400 if (IS_ERR(mclk)) {
401 return PTR_ERR(mclk);
402 } else {
403 mck = mclk->rate;
404 clk_put(mclk);
405 }
406
407 odff = scl_hz;
408 scgdm = cdfm = m1 = 0;
409 for (cdf = 3; cdf >= 0; cdf--) {
410 iclk = mck / (1 + cdf);
411 if (iclk >= 20000000)
412 continue;
413 scgds = ((iclk / scl_hz) - 20) >> 3;
414 for (scgd = scgds; (scgd < 63) && scgd <= scgds + 1; scgd++) {
415 m1 = iclk / (20 + (scgd << 3));
416 dff = abs(scl_hz - m1);
417 if (dff < odff) {
418 odff = dff;
419 cdfm = cdf;
420 scgdm = scgd;
421 }
422 }
423 }
424 /* fail if more than 25% off of requested SCL */
425 if (odff > (scl_hz >> 2))
426 return -EINVAL;
427
428 /* create a CCR register value */
429 return ((scgdm << 2) | cdfm);
430}
431
432static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
433{
434 struct sh7760_i2c_platdata *pd;
435 struct resource *res;
436 struct cami2c *id;
437 int ret;
438
439 pd = pdev->dev.platform_data;
440 if (!pd) {
441 dev_err(&pdev->dev, "no platform_data!\n");
442 ret = -ENODEV;
443 goto out0;
444 }
445
446 id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
447 if (!id) {
448 dev_err(&pdev->dev, "no mem for private data\n");
449 ret = -ENOMEM;
450 goto out0;
451 }
452
453 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
454 if (!res) {
455 dev_err(&pdev->dev, "no mmio resources\n");
456 ret = -ENODEV;
457 goto out1;
458 }
459
460 id->ioarea = request_mem_region(res->start, REGSIZE, pdev->name);
461 if (!id->ioarea) {
462 dev_err(&pdev->dev, "mmio already reserved\n");
463 ret = -EBUSY;
464 goto out1;
465 }
466
467 id->iobase = ioremap(res->start, REGSIZE);
468 if (!id->iobase) {
469 dev_err(&pdev->dev, "cannot ioremap\n");
470 ret = -ENODEV;
471 goto out2;
472 }
473
474 id->irq = platform_get_irq(pdev, 0);
475
476 id->adap.nr = pdev->id;
477 id->adap.algo = &sh7760_i2c_algo;
478 id->adap.class = I2C_CLASS_ALL;
479 id->adap.retries = 3;
480 id->adap.algo_data = id;
481 id->adap.dev.parent = &pdev->dev;
482 snprintf(id->adap.name, sizeof(id->adap.name),
483 "SH7760 I2C at %08lx", (unsigned long)res->start);
484
485 OUT32(id, I2CMCR, 0);
486 OUT32(id, I2CMSR, 0);
487 OUT32(id, I2CMIER, 0);
488 OUT32(id, I2CMAR, 0);
489 OUT32(id, I2CSIER, 0);
490 OUT32(id, I2CSAR, 0);
491 OUT32(id, I2CSCR, 0);
492 OUT32(id, I2CSSR, 0);
493 OUT32(id, I2CFIER, 0);
494 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
495 OUT32(id, I2CFSR, 0);
496
497 ret = calc_CCR(pd->speed_khz * 1000);
498 if (ret < 0) {
499 dev_err(&pdev->dev, "invalid SCL clock: %dkHz\n",
500 pd->speed_khz);
501 goto out3;
502 }
503 OUT32(id, I2CCCR, ret);
504
505 if (request_irq(id->irq, sh7760_i2c_irq, IRQF_DISABLED,
506 SH7760_I2C_DEVNAME, id)) {
507 dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
508 ret = -EBUSY;
509 goto out3;
510 }
511
512 ret = i2c_add_numbered_adapter(&id->adap);
513 if (ret < 0) {
514 dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
515 goto out4;
516 }
517
518 platform_set_drvdata(pdev, id);
519
520 dev_info(&pdev->dev, "%d kHz mmio %08x irq %d\n",
521 pd->speed_khz, res->start, id->irq);
522
523 return 0;
524
525out4:
526 free_irq(id->irq, id);
527out3:
528 iounmap(id->iobase);
529out2:
530 release_resource(id->ioarea);
531 kfree(id->ioarea);
532out1:
533 kfree(id);
534out0:
535 return ret;
536}
537
538static int __devexit sh7760_i2c_remove(struct platform_device *pdev)
539{
540 struct cami2c *id = platform_get_drvdata(pdev);
541
542 i2c_del_adapter(&id->adap);
543 free_irq(id->irq, id);
544 iounmap(id->iobase);
545 release_resource(id->ioarea);
546 kfree(id->ioarea);
547 kfree(id);
548 platform_set_drvdata(pdev, NULL);
549
550 return 0;
551}
552
553static struct platform_driver sh7760_i2c_drv = {
554 .driver = {
555 .name = SH7760_I2C_DEVNAME,
556 .owner = THIS_MODULE,
557 },
558 .probe = sh7760_i2c_probe,
559 .remove = __devexit_p(sh7760_i2c_remove),
560};
561
562static int __init sh7760_i2c_init(void)
563{
564 return platform_driver_register(&sh7760_i2c_drv);
565}
566
567static void __exit sh7760_i2c_exit(void)
568{
569 platform_driver_unregister(&sh7760_i2c_drv);
570}
571
572module_init(sh7760_i2c_init);
573module_exit(sh7760_i2c_exit);
574
575MODULE_LICENSE("GPL");
576MODULE_DESCRIPTION("SH7760 I2C bus driver");
577MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
new file mode 100644
index 000000000000..840e634fa31f
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -0,0 +1,500 @@
1/*
2 * SuperH Mobile I2C Controller
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * Portions of the code based on out-of-tree driver i2c-sh7343.c
7 * Copyright (c) 2006 Carlos Munoz <carlos@kenati.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29#include <linux/i2c.h>
30#include <linux/err.h>
31#include <linux/clk.h>
32#include <linux/io.h>
33
34enum sh_mobile_i2c_op {
35 OP_START = 0,
36 OP_TX_ONLY,
37 OP_TX_STOP,
38 OP_TX_TO_RX,
39 OP_RX_ONLY,
40 OP_RX_STOP,
41};
42
43struct sh_mobile_i2c_data {
44 struct device *dev;
45 void __iomem *reg;
46 struct i2c_adapter adap;
47
48 struct clk *clk;
49 u_int8_t iccl;
50 u_int8_t icch;
51
52 spinlock_t lock;
53 wait_queue_head_t wait;
54 struct i2c_msg *msg;
55 int pos;
56 int sr;
57};
58
59#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */
60
61/* Register offsets */
62#define ICDR(pd) (pd->reg + 0x00)
63#define ICCR(pd) (pd->reg + 0x04)
64#define ICSR(pd) (pd->reg + 0x08)
65#define ICIC(pd) (pd->reg + 0x0c)
66#define ICCL(pd) (pd->reg + 0x10)
67#define ICCH(pd) (pd->reg + 0x14)
68
69/* Register bits */
70#define ICCR_ICE 0x80
71#define ICCR_RACK 0x40
72#define ICCR_TRS 0x10
73#define ICCR_BBSY 0x04
74#define ICCR_SCP 0x01
75
76#define ICSR_SCLM 0x80
77#define ICSR_SDAM 0x40
78#define SW_DONE 0x20
79#define ICSR_BUSY 0x10
80#define ICSR_AL 0x08
81#define ICSR_TACK 0x04
82#define ICSR_WAIT 0x02
83#define ICSR_DTE 0x01
84
85#define ICIC_ALE 0x08
86#define ICIC_TACKE 0x04
87#define ICIC_WAITE 0x02
88#define ICIC_DTEE 0x01
89
90static void activate_ch(struct sh_mobile_i2c_data *pd)
91{
92 /* Make sure the clock is enabled */
93 clk_enable(pd->clk);
94
95 /* Enable channel and configure rx ack */
96 iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
97
98 /* Mask all interrupts */
99 iowrite8(0, ICIC(pd));
100
101 /* Set the clock */
102 iowrite8(pd->iccl, ICCL(pd));
103 iowrite8(pd->icch, ICCH(pd));
104}
105
106static void deactivate_ch(struct sh_mobile_i2c_data *pd)
107{
108 /* Clear/disable interrupts */
109 iowrite8(0, ICSR(pd));
110 iowrite8(0, ICIC(pd));
111
112 /* Disable channel */
113 iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
114
115 /* Disable clock */
116 clk_disable(pd->clk);
117}
118
119static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
120 enum sh_mobile_i2c_op op, unsigned char data)
121{
122 unsigned char ret = 0;
123 unsigned long flags;
124
125 dev_dbg(pd->dev, "op %d, data in 0x%02x\n", op, data);
126
127 spin_lock_irqsave(&pd->lock, flags);
128
129 switch (op) {
130 case OP_START:
131 iowrite8(0x94, ICCR(pd));
132 break;
133 case OP_TX_ONLY:
134 iowrite8(data, ICDR(pd));
135 break;
136 case OP_TX_STOP:
137 iowrite8(data, ICDR(pd));
138 iowrite8(0x90, ICCR(pd));
139 iowrite8(ICIC_ALE | ICIC_TACKE, ICIC(pd));
140 break;
141 case OP_TX_TO_RX:
142 iowrite8(data, ICDR(pd));
143 iowrite8(0x81, ICCR(pd));
144 break;
145 case OP_RX_ONLY:
146 ret = ioread8(ICDR(pd));
147 break;
148 case OP_RX_STOP:
149 ret = ioread8(ICDR(pd));
150 iowrite8(0xc0, ICCR(pd));
151 break;
152 }
153
154 spin_unlock_irqrestore(&pd->lock, flags);
155
156 dev_dbg(pd->dev, "op %d, data out 0x%02x\n", op, ret);
157 return ret;
158}
159
160static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
161{
162 struct platform_device *dev = dev_id;
163 struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
164 struct i2c_msg *msg = pd->msg;
165 unsigned char data, sr;
166 int wakeup = 0;
167
168 sr = ioread8(ICSR(pd));
169 pd->sr |= sr;
170
171 dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
172 (msg->flags & I2C_M_RD) ? "read" : "write",
173 pd->pos, msg->len);
174
175 if (sr & (ICSR_AL | ICSR_TACK)) {
176 iowrite8(0, ICIC(pd)); /* disable interrupts */
177 wakeup = 1;
178 goto do_wakeup;
179 }
180
181 if (pd->pos == msg->len) {
182 i2c_op(pd, OP_RX_ONLY, 0);
183 wakeup = 1;
184 goto do_wakeup;
185 }
186
187 if (pd->pos == -1) {
188 data = (msg->addr & 0x7f) << 1;
189 data |= (msg->flags & I2C_M_RD) ? 1 : 0;
190 } else
191 data = msg->buf[pd->pos];
192
193 if ((pd->pos == -1) || !(msg->flags & I2C_M_RD)) {
194 if (msg->flags & I2C_M_RD)
195 i2c_op(pd, OP_TX_TO_RX, data);
196 else if (pd->pos == (msg->len - 1)) {
197 i2c_op(pd, OP_TX_STOP, data);
198 wakeup = 1;
199 } else
200 i2c_op(pd, OP_TX_ONLY, data);
201 } else {
202 if (pd->pos == (msg->len - 1))
203 data = i2c_op(pd, OP_RX_STOP, 0);
204 else
205 data = i2c_op(pd, OP_RX_ONLY, 0);
206
207 msg->buf[pd->pos] = data;
208 }
209 pd->pos++;
210
211 do_wakeup:
212 if (wakeup) {
213 pd->sr |= SW_DONE;
214 wake_up(&pd->wait);
215 }
216
217 return IRQ_HANDLED;
218}
219
220static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
221{
222 /* Initialize channel registers */
223 iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
224
225 /* Enable channel and configure rx ack */
226 iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd));
227
228 /* Set the clock */
229 iowrite8(pd->iccl, ICCL(pd));
230 iowrite8(pd->icch, ICCH(pd));
231
232 pd->msg = usr_msg;
233 pd->pos = -1;
234 pd->sr = 0;
235
236 /* Enable all interrupts except wait */
237 iowrite8(ioread8(ICIC(pd)) | ICIC_ALE | ICIC_TACKE | ICIC_DTEE,
238 ICIC(pd));
239 return 0;
240}
241
242static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
243 struct i2c_msg *msgs,
244 int num)
245{
246 struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
247 struct i2c_msg *msg;
248 int err = 0;
249 u_int8_t val;
250 int i, k, retry_count;
251
252 activate_ch(pd);
253
254 /* Process all messages */
255 for (i = 0; i < num; i++) {
256 msg = &msgs[i];
257
258 err = start_ch(pd, msg);
259 if (err)
260 break;
261
262 i2c_op(pd, OP_START, 0);
263
264 /* The interrupt handler takes care of the rest... */
265 k = wait_event_timeout(pd->wait,
266 pd->sr & (ICSR_TACK | SW_DONE),
267 5 * HZ);
268 if (!k)
269 dev_err(pd->dev, "Transfer request timed out\n");
270
271 retry_count = 10;
272again:
273 val = ioread8(ICSR(pd));
274
275 dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
276
277 if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) {
278 err = -EIO;
279 break;
280 }
281
282 /* the interrupt handler may wake us up before the
283 * transfer is finished, so poll the hardware
284 * until we're done.
285 */
286
287 if (!(!(val & ICSR_BUSY) && (val & ICSR_SCLM) &&
288 (val & ICSR_SDAM))) {
289 msleep(1);
290 if (retry_count--)
291 goto again;
292
293 err = -EIO;
294 dev_err(pd->dev, "Polling timed out\n");
295 break;
296 }
297 }
298
299 deactivate_ch(pd);
300
301 if (!err)
302 err = num;
303 return err;
304}
305
306static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
307{
308 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
309}
310
311static struct i2c_algorithm sh_mobile_i2c_algorithm = {
312 .functionality = sh_mobile_i2c_func,
313 .master_xfer = sh_mobile_i2c_xfer,
314};
315
316static void sh_mobile_i2c_setup_channel(struct platform_device *dev)
317{
318 struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
319 unsigned long peripheral_clk = clk_get_rate(pd->clk);
320 u_int32_t num;
321 u_int32_t denom;
322 u_int32_t tmp;
323
324 spin_lock_init(&pd->lock);
325 init_waitqueue_head(&pd->wait);
326
327 /* Calculate the value for iccl. From the data sheet:
328 * iccl = (p clock / transfer rate) * (L / (L + H))
329 * where L and H are the SCL low/high ratio (5/4 in this case).
330 * We also round off the result.
331 */
332 num = peripheral_clk * 5;
333 denom = NORMAL_SPEED * 9;
334 tmp = num * 10 / denom;
335 if (tmp % 10 >= 5)
336 pd->iccl = (u_int8_t)((num/denom) + 1);
337 else
338 pd->iccl = (u_int8_t)(num/denom);
339
340 /* Calculate the value for icch. From the data sheet:
341 icch = (p clock / transfer rate) * (H / (L + H)) */
342 num = peripheral_clk * 4;
343 tmp = num * 10 / denom;
344 if (tmp % 10 >= 5)
345 pd->icch = (u_int8_t)((num/denom) + 1);
346 else
347 pd->icch = (u_int8_t)(num/denom);
348}
349
350static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
351{
352 struct resource *res;
353 int ret = -ENXIO;
354 int q, m;
355 int k = 0;
356 int n = 0;
357
358 while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
359 for (n = res->start; hook && n <= res->end; n++) {
360 if (request_irq(n, sh_mobile_i2c_isr, IRQF_DISABLED,
361 dev->dev.bus_id, dev))
362 goto rollback;
363 }
364 k++;
365 }
366
367 if (hook)
368 return k > 0 ? 0 : -ENOENT;
369
370 k--;
371 ret = 0;
372
373 rollback:
374 for (q = k; k >= 0; k--) {
375 for (m = n; m >= res->start; m--)
376 free_irq(m, dev);
377
378 res = platform_get_resource(dev, IORESOURCE_IRQ, k - 1);
379 m = res->end;
380 }
381
382 return ret;
383}
384
385static int sh_mobile_i2c_probe(struct platform_device *dev)
386{
387 struct sh_mobile_i2c_data *pd;
388 struct i2c_adapter *adap;
389 struct resource *res;
390 int size;
391 int ret;
392
393 pd = kzalloc(sizeof(struct sh_mobile_i2c_data), GFP_KERNEL);
394 if (pd == NULL) {
395 dev_err(&dev->dev, "cannot allocate private data\n");
396 return -ENOMEM;
397 }
398
399 pd->clk = clk_get(&dev->dev, "peripheral_clk");
400 if (IS_ERR(pd->clk)) {
401 dev_err(&dev->dev, "cannot get peripheral clock\n");
402 ret = PTR_ERR(pd->clk);
403 goto err;
404 }
405
406 ret = sh_mobile_i2c_hook_irqs(dev, 1);
407 if (ret) {
408 dev_err(&dev->dev, "cannot request IRQ\n");
409 goto err_clk;
410 }
411
412 pd->dev = &dev->dev;
413 platform_set_drvdata(dev, pd);
414
415 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
416 if (res == NULL) {
417 dev_err(&dev->dev, "cannot find IO resource\n");
418 ret = -ENOENT;
419 goto err_irq;
420 }
421
422 size = (res->end - res->start) + 1;
423
424 pd->reg = ioremap(res->start, size);
425 if (pd->reg == NULL) {
426 dev_err(&dev->dev, "cannot map IO\n");
427 ret = -ENXIO;
428 goto err_irq;
429 }
430
431 /* setup the private data */
432 adap = &pd->adap;
433 i2c_set_adapdata(adap, pd);
434
435 adap->owner = THIS_MODULE;
436 adap->algo = &sh_mobile_i2c_algorithm;
437 adap->dev.parent = &dev->dev;
438 adap->retries = 5;
439 adap->nr = dev->id;
440
441 strlcpy(adap->name, dev->name, sizeof(adap->name));
442
443 sh_mobile_i2c_setup_channel(dev);
444
445 ret = i2c_add_numbered_adapter(adap);
446 if (ret < 0) {
447 dev_err(&dev->dev, "cannot add numbered adapter\n");
448 goto err_all;
449 }
450
451 return 0;
452
453 err_all:
454 iounmap(pd->reg);
455 err_irq:
456 sh_mobile_i2c_hook_irqs(dev, 0);
457 err_clk:
458 clk_put(pd->clk);
459 err:
460 kfree(pd);
461 return ret;
462}
463
464static int sh_mobile_i2c_remove(struct platform_device *dev)
465{
466 struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
467
468 i2c_del_adapter(&pd->adap);
469 iounmap(pd->reg);
470 sh_mobile_i2c_hook_irqs(dev, 0);
471 clk_put(pd->clk);
472 kfree(pd);
473 return 0;
474}
475
476static struct platform_driver sh_mobile_i2c_driver = {
477 .driver = {
478 .name = "i2c-sh_mobile",
479 .owner = THIS_MODULE,
480 },
481 .probe = sh_mobile_i2c_probe,
482 .remove = sh_mobile_i2c_remove,
483};
484
485static int __init sh_mobile_i2c_adap_init(void)
486{
487 return platform_driver_register(&sh_mobile_i2c_driver);
488}
489
490static void __exit sh_mobile_i2c_adap_exit(void)
491{
492 platform_driver_unregister(&sh_mobile_i2c_driver);
493}
494
495module_init(sh_mobile_i2c_adap_init);
496module_exit(sh_mobile_i2c_adap_exit);
497
498MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
499MODULE_AUTHOR("Magnus Damm");
500MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 10af8d31e12a..042fda295f3a 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -159,6 +159,9 @@ static int simtec_i2c_remove(struct platform_device *dev)
159 159
160/* device driver */ 160/* device driver */
161 161
162/* work with hotplug and coldplug */
163MODULE_ALIAS("platform:simtec-i2c");
164
162static struct platform_driver simtec_i2c_driver = { 165static struct platform_driver simtec_i2c_driver = {
163 .driver = { 166 .driver = {
164 .name = "simtec-i2c", 167 .name = "simtec-i2c",
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 081d9578ce10..4678babd3ce6 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -151,3 +151,4 @@ module_exit(i2c_versatile_exit);
151 151
152MODULE_DESCRIPTION("ARM Versatile I2C bus driver"); 152MODULE_DESCRIPTION("ARM Versatile I2C bus driver");
153MODULE_LICENSE("GPL"); 153MODULE_LICENSE("GPL");
154MODULE_ALIAS("platform:versatile-i2c");
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index f5e7a70da831..61abe0f33255 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -527,7 +527,7 @@ static int __init scx200_create_isa(const char *text, unsigned long base,
527 if (iface == NULL) 527 if (iface == NULL)
528 return -ENOMEM; 528 return -ENOMEM;
529 529
530 if (request_region(base, 8, iface->adapter.name) == 0) { 530 if (!request_region(base, 8, iface->adapter.name)) {
531 printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", 531 printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
532 base, base + 8 - 1); 532 base, base + 8 - 1);
533 rc = -EBUSY; 533 rc = -EBUSY;
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 2a3160153f54..b1b45dddb17e 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -658,7 +658,7 @@ pulldown:
658 OTG_CTRL_REG |= OTG_PULLUP; 658 OTG_CTRL_REG |= OTG_PULLUP;
659 } 659 }
660 660
661 check_state(isp, __FUNCTION__); 661 check_state(isp, __func__);
662 dump_regs(isp, "otg->isp1301"); 662 dump_regs(isp, "otg->isp1301");
663} 663}
664 664
@@ -782,7 +782,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
782 if (otg_ctrl & OTG_DRIVER_SEL) { 782 if (otg_ctrl & OTG_DRIVER_SEL) {
783 switch (isp->otg.state) { 783 switch (isp->otg.state) {
784 case OTG_STATE_A_IDLE: 784 case OTG_STATE_A_IDLE:
785 b_idle(isp, __FUNCTION__); 785 b_idle(isp, __func__);
786 break; 786 break;
787 default: 787 default:
788 break; 788 break;
@@ -826,7 +826,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
826 isp->otg.host->otg_port); 826 isp->otg.host->otg_port);
827 } 827 }
828 828
829 check_state(isp, __FUNCTION__); 829 check_state(isp, __func__);
830 return ret; 830 return ret;
831} 831}
832 832
@@ -837,7 +837,7 @@ static int otg_init(struct isp1301 *isp)
837 if (!otg_dev) 837 if (!otg_dev)
838 return -ENODEV; 838 return -ENODEV;
839 839
840 dump_regs(isp, __FUNCTION__); 840 dump_regs(isp, __func__);
841 /* some of these values are board-specific... */ 841 /* some of these values are board-specific... */
842 OTG_SYSCON_2_REG |= OTG_EN 842 OTG_SYSCON_2_REG |= OTG_EN
843 /* for B-device: */ 843 /* for B-device: */
@@ -853,9 +853,9 @@ static int otg_init(struct isp1301 *isp)
853 update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE)); 853 update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE));
854 update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); 854 update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
855 855
856 check_state(isp, __FUNCTION__); 856 check_state(isp, __func__);
857 pr_debug("otg: %s, %s %06x\n", 857 pr_debug("otg: %s, %s %06x\n",
858 state_name(isp), __FUNCTION__, OTG_CTRL_REG); 858 state_name(isp), __func__, OTG_CTRL_REG);
859 859
860 OTG_IRQ_EN_REG = DRIVER_SWITCH | OPRT_CHG 860 OTG_IRQ_EN_REG = DRIVER_SWITCH | OPRT_CHG
861 | B_SRP_TMROUT | B_HNP_FAIL 861 | B_SRP_TMROUT | B_HNP_FAIL
@@ -1041,11 +1041,11 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
1041 OTG1_DP_PULLDOWN); 1041 OTG1_DP_PULLDOWN);
1042 isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, 1042 isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1,
1043 OTG1_DP_PULLUP); 1043 OTG1_DP_PULLUP);
1044 dump_regs(isp, __FUNCTION__); 1044 dump_regs(isp, __func__);
1045#endif 1045#endif
1046 /* FALLTHROUGH */ 1046 /* FALLTHROUGH */
1047 case OTG_STATE_B_SRP_INIT: 1047 case OTG_STATE_B_SRP_INIT:
1048 b_idle(isp, __FUNCTION__); 1048 b_idle(isp, __func__);
1049 OTG_CTRL_REG &= OTG_CTRL_REG & OTG_XCEIV_OUTPUTS; 1049 OTG_CTRL_REG &= OTG_CTRL_REG & OTG_XCEIV_OUTPUTS;
1050 /* FALLTHROUGH */ 1050 /* FALLTHROUGH */
1051 case OTG_STATE_B_IDLE: 1051 case OTG_STATE_B_IDLE:
@@ -1077,7 +1077,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
1077 */ 1077 */
1078 update_otg1(isp, isp_stat); 1078 update_otg1(isp, isp_stat);
1079 update_otg2(isp, isp_bstat); 1079 update_otg2(isp, isp_bstat);
1080 check_state(isp, __FUNCTION__); 1080 check_state(isp, __func__);
1081#endif 1081#endif
1082 1082
1083 dump_regs(isp, "isp1301->otg"); 1083 dump_regs(isp, "isp1301->otg");
@@ -1310,7 +1310,7 @@ isp1301_set_host(struct otg_transceiver *otg, struct usb_bus *host)
1310 */ 1310 */
1311 isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_VBUS_DRV); 1311 isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_VBUS_DRV);
1312 1312
1313 dump_regs(isp, __FUNCTION__); 1313 dump_regs(isp, __func__);
1314 1314
1315 return 0; 1315 return 0;
1316 1316
@@ -1365,7 +1365,7 @@ isp1301_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
1365 isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING, 1365 isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING,
1366 INTR_VBUS_VLD); 1366 INTR_VBUS_VLD);
1367 dev_info(&isp->client.dev, "B-Peripheral sessions ok\n"); 1367 dev_info(&isp->client.dev, "B-Peripheral sessions ok\n");
1368 dump_regs(isp, __FUNCTION__); 1368 dump_regs(isp, __func__);
1369 1369
1370 /* If this has a Mini-AB connector, this mode is highly 1370 /* If this has a Mini-AB connector, this mode is highly
1371 * nonstandard ... but can be handy for testing, so long 1371 * nonstandard ... but can be handy for testing, so long
@@ -1416,7 +1416,7 @@ isp1301_start_srp(struct otg_transceiver *dev)
1416 1416
1417 pr_debug("otg: SRP, %s ... %06x\n", state_name(isp), OTG_CTRL_REG); 1417 pr_debug("otg: SRP, %s ... %06x\n", state_name(isp), OTG_CTRL_REG);
1418#ifdef CONFIG_USB_OTG 1418#ifdef CONFIG_USB_OTG
1419 check_state(isp, __FUNCTION__); 1419 check_state(isp, __func__);
1420#endif 1420#endif
1421 return 0; 1421 return 0;
1422} 1422}
@@ -1463,7 +1463,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
1463 } 1463 }
1464 pr_debug("otg: HNP %s, %06x ...\n", 1464 pr_debug("otg: HNP %s, %06x ...\n",
1465 state_name(isp), OTG_CTRL_REG); 1465 state_name(isp), OTG_CTRL_REG);
1466 check_state(isp, __FUNCTION__); 1466 check_state(isp, __func__);
1467 return 0; 1467 return 0;
1468#else 1468#else
1469 /* srp-only */ 1469 /* srp-only */
@@ -1601,7 +1601,7 @@ fail2:
1601 update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS)); 1601 update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
1602#endif 1602#endif
1603 1603
1604 dump_regs(isp, __FUNCTION__); 1604 dump_regs(isp, __func__);
1605 1605
1606#ifdef VERBOSE 1606#ifdef VERBOSE
1607 mod_timer(&isp->timer, jiffies + TIMER_JIFFIES); 1607 mod_timer(&isp->timer, jiffies + TIMER_JIFFIES);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e186df657119..6c7fa8d53c0e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1506,7 +1506,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
1506 read_write = I2C_SMBUS_READ; 1506 read_write = I2C_SMBUS_READ;
1507 if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { 1507 if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
1508 dev_err(&adapter->dev, "%s called with invalid " 1508 dev_err(&adapter->dev, "%s called with invalid "
1509 "block proc call size (%d)\n", __FUNCTION__, 1509 "block proc call size (%d)\n", __func__,
1510 data->block[0]); 1510 data->block[0]);
1511 return -1; 1511 return -1;
1512 } 1512 }
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 393e679d9faa..d34c14c81c29 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -200,16 +200,176 @@ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
200 return device_for_each_child(&adapter->dev, &addr, i2cdev_check); 200 return device_for_each_child(&adapter->dev, &addr, i2cdev_check);
201} 201}
202 202
203static int i2cdev_ioctl(struct inode *inode, struct file *file, 203static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
204 unsigned int cmd, unsigned long arg) 204 unsigned long arg)
205{ 205{
206 struct i2c_client *client = (struct i2c_client *)file->private_data;
207 struct i2c_rdwr_ioctl_data rdwr_arg; 206 struct i2c_rdwr_ioctl_data rdwr_arg;
208 struct i2c_smbus_ioctl_data data_arg;
209 union i2c_smbus_data temp;
210 struct i2c_msg *rdwr_pa; 207 struct i2c_msg *rdwr_pa;
211 u8 __user **data_ptrs; 208 u8 __user **data_ptrs;
212 int i,datasize,res; 209 int i, res;
210
211 if (copy_from_user(&rdwr_arg,
212 (struct i2c_rdwr_ioctl_data __user *)arg,
213 sizeof(rdwr_arg)))
214 return -EFAULT;
215
216 /* Put an arbitrary limit on the number of messages that can
217 * be sent at once */
218 if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
219 return -EINVAL;
220
221 rdwr_pa = (struct i2c_msg *)
222 kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
223 GFP_KERNEL);
224 if (!rdwr_pa)
225 return -ENOMEM;
226
227 if (copy_from_user(rdwr_pa, rdwr_arg.msgs,
228 rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
229 kfree(rdwr_pa);
230 return -EFAULT;
231 }
232
233 data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
234 if (data_ptrs == NULL) {
235 kfree(rdwr_pa);
236 return -ENOMEM;
237 }
238
239 res = 0;
240 for (i = 0; i < rdwr_arg.nmsgs; i++) {
241 /* Limit the size of the message to a sane amount;
242 * and don't let length change either. */
243 if ((rdwr_pa[i].len > 8192) ||
244 (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
245 res = -EINVAL;
246 break;
247 }
248 data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
249 rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL);
250 if (rdwr_pa[i].buf == NULL) {
251 res = -ENOMEM;
252 break;
253 }
254 if (copy_from_user(rdwr_pa[i].buf, data_ptrs[i],
255 rdwr_pa[i].len)) {
256 ++i; /* Needs to be kfreed too */
257 res = -EFAULT;
258 break;
259 }
260 }
261 if (res < 0) {
262 int j;
263 for (j = 0; j < i; ++j)
264 kfree(rdwr_pa[j].buf);
265 kfree(data_ptrs);
266 kfree(rdwr_pa);
267 return res;
268 }
269
270 res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
271 while (i-- > 0) {
272 if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
273 if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf,
274 rdwr_pa[i].len))
275 res = -EFAULT;
276 }
277 kfree(rdwr_pa[i].buf);
278 }
279 kfree(data_ptrs);
280 kfree(rdwr_pa);
281 return res;
282}
283
284static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
285 unsigned long arg)
286{
287 struct i2c_smbus_ioctl_data data_arg;
288 union i2c_smbus_data temp;
289 int datasize, res;
290
291 if (copy_from_user(&data_arg,
292 (struct i2c_smbus_ioctl_data __user *) arg,
293 sizeof(struct i2c_smbus_ioctl_data)))
294 return -EFAULT;
295 if ((data_arg.size != I2C_SMBUS_BYTE) &&
296 (data_arg.size != I2C_SMBUS_QUICK) &&
297 (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
298 (data_arg.size != I2C_SMBUS_WORD_DATA) &&
299 (data_arg.size != I2C_SMBUS_PROC_CALL) &&
300 (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
301 (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
302 (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
303 (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
304 dev_dbg(&client->adapter->dev,
305 "size out of range (%x) in ioctl I2C_SMBUS.\n",
306 data_arg.size);
307 return -EINVAL;
308 }
309 /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
310 so the check is valid if size==I2C_SMBUS_QUICK too. */
311 if ((data_arg.read_write != I2C_SMBUS_READ) &&
312 (data_arg.read_write != I2C_SMBUS_WRITE)) {
313 dev_dbg(&client->adapter->dev,
314 "read_write out of range (%x) in ioctl I2C_SMBUS.\n",
315 data_arg.read_write);
316 return -EINVAL;
317 }
318
319 /* Note that command values are always valid! */
320
321 if ((data_arg.size == I2C_SMBUS_QUICK) ||
322 ((data_arg.size == I2C_SMBUS_BYTE) &&
323 (data_arg.read_write == I2C_SMBUS_WRITE)))
324 /* These are special: we do not use data */
325 return i2c_smbus_xfer(client->adapter, client->addr,
326 client->flags, data_arg.read_write,
327 data_arg.command, data_arg.size, NULL);
328
329 if (data_arg.data == NULL) {
330 dev_dbg(&client->adapter->dev,
331 "data is NULL pointer in ioctl I2C_SMBUS.\n");
332 return -EINVAL;
333 }
334
335 if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
336 (data_arg.size == I2C_SMBUS_BYTE))
337 datasize = sizeof(data_arg.data->byte);
338 else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
339 (data_arg.size == I2C_SMBUS_PROC_CALL))
340 datasize = sizeof(data_arg.data->word);
341 else /* size == smbus block, i2c block, or block proc. call */
342 datasize = sizeof(data_arg.data->block);
343
344 if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
345 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
346 (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
347 (data_arg.read_write == I2C_SMBUS_WRITE)) {
348 if (copy_from_user(&temp, data_arg.data, datasize))
349 return -EFAULT;
350 }
351 if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
352 /* Convert old I2C block commands to the new
353 convention. This preserves binary compatibility. */
354 data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
355 if (data_arg.read_write == I2C_SMBUS_READ)
356 temp.block[0] = I2C_SMBUS_BLOCK_MAX;
357 }
358 res = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
359 data_arg.read_write, data_arg.command, data_arg.size, &temp);
360 if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
361 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
362 (data_arg.read_write == I2C_SMBUS_READ))) {
363 if (copy_to_user(data_arg.data, &temp, datasize))
364 return -EFAULT;
365 }
366 return res;
367}
368
369static int i2cdev_ioctl(struct inode *inode, struct file *file,
370 unsigned int cmd, unsigned long arg)
371{
372 struct i2c_client *client = (struct i2c_client *)file->private_data;
213 unsigned long funcs; 373 unsigned long funcs;
214 374
215 dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", 375 dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
@@ -253,164 +413,11 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
253 return put_user(funcs, (unsigned long __user *)arg); 413 return put_user(funcs, (unsigned long __user *)arg);
254 414
255 case I2C_RDWR: 415 case I2C_RDWR:
256 if (copy_from_user(&rdwr_arg, 416 return i2cdev_ioctl_rdrw(client, arg);
257 (struct i2c_rdwr_ioctl_data __user *)arg,
258 sizeof(rdwr_arg)))
259 return -EFAULT;
260
261 /* Put an arbitrary limit on the number of messages that can
262 * be sent at once */
263 if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
264 return -EINVAL;
265
266 rdwr_pa = (struct i2c_msg *)
267 kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg),
268 GFP_KERNEL);
269
270 if (rdwr_pa == NULL) return -ENOMEM;
271
272 if (copy_from_user(rdwr_pa, rdwr_arg.msgs,
273 rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
274 kfree(rdwr_pa);
275 return -EFAULT;
276 }
277
278 data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
279 if (data_ptrs == NULL) {
280 kfree(rdwr_pa);
281 return -ENOMEM;
282 }
283
284 res = 0;
285 for( i=0; i<rdwr_arg.nmsgs; i++ ) {
286 /* Limit the size of the message to a sane amount;
287 * and don't let length change either. */
288 if ((rdwr_pa[i].len > 8192) ||
289 (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
290 res = -EINVAL;
291 break;
292 }
293 data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
294 rdwr_pa[i].buf = kmalloc(rdwr_pa[i].len, GFP_KERNEL);
295 if(rdwr_pa[i].buf == NULL) {
296 res = -ENOMEM;
297 break;
298 }
299 if(copy_from_user(rdwr_pa[i].buf,
300 data_ptrs[i],
301 rdwr_pa[i].len)) {
302 ++i; /* Needs to be kfreed too */
303 res = -EFAULT;
304 break;
305 }
306 }
307 if (res < 0) {
308 int j;
309 for (j = 0; j < i; ++j)
310 kfree(rdwr_pa[j].buf);
311 kfree(data_ptrs);
312 kfree(rdwr_pa);
313 return res;
314 }
315
316 res = i2c_transfer(client->adapter,
317 rdwr_pa,
318 rdwr_arg.nmsgs);
319 while(i-- > 0) {
320 if( res>=0 && (rdwr_pa[i].flags & I2C_M_RD)) {
321 if(copy_to_user(
322 data_ptrs[i],
323 rdwr_pa[i].buf,
324 rdwr_pa[i].len)) {
325 res = -EFAULT;
326 }
327 }
328 kfree(rdwr_pa[i].buf);
329 }
330 kfree(data_ptrs);
331 kfree(rdwr_pa);
332 return res;
333 417
334 case I2C_SMBUS: 418 case I2C_SMBUS:
335 if (copy_from_user(&data_arg, 419 return i2cdev_ioctl_smbus(client, arg);
336 (struct i2c_smbus_ioctl_data __user *) arg,
337 sizeof(struct i2c_smbus_ioctl_data)))
338 return -EFAULT;
339 if ((data_arg.size != I2C_SMBUS_BYTE) &&
340 (data_arg.size != I2C_SMBUS_QUICK) &&
341 (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
342 (data_arg.size != I2C_SMBUS_WORD_DATA) &&
343 (data_arg.size != I2C_SMBUS_PROC_CALL) &&
344 (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
345 (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
346 (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
347 (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
348 dev_dbg(&client->adapter->dev,
349 "size out of range (%x) in ioctl I2C_SMBUS.\n",
350 data_arg.size);
351 return -EINVAL;
352 }
353 /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
354 so the check is valid if size==I2C_SMBUS_QUICK too. */
355 if ((data_arg.read_write != I2C_SMBUS_READ) &&
356 (data_arg.read_write != I2C_SMBUS_WRITE)) {
357 dev_dbg(&client->adapter->dev,
358 "read_write out of range (%x) in ioctl I2C_SMBUS.\n",
359 data_arg.read_write);
360 return -EINVAL;
361 }
362
363 /* Note that command values are always valid! */
364
365 if ((data_arg.size == I2C_SMBUS_QUICK) ||
366 ((data_arg.size == I2C_SMBUS_BYTE) &&
367 (data_arg.read_write == I2C_SMBUS_WRITE)))
368 /* These are special: we do not use data */
369 return i2c_smbus_xfer(client->adapter, client->addr,
370 client->flags,
371 data_arg.read_write,
372 data_arg.command,
373 data_arg.size, NULL);
374
375 if (data_arg.data == NULL) {
376 dev_dbg(&client->adapter->dev,
377 "data is NULL pointer in ioctl I2C_SMBUS.\n");
378 return -EINVAL;
379 }
380 420
381 if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
382 (data_arg.size == I2C_SMBUS_BYTE))
383 datasize = sizeof(data_arg.data->byte);
384 else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
385 (data_arg.size == I2C_SMBUS_PROC_CALL))
386 datasize = sizeof(data_arg.data->word);
387 else /* size == smbus block, i2c block, or block proc. call */
388 datasize = sizeof(data_arg.data->block);
389
390 if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
391 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
392 (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
393 (data_arg.read_write == I2C_SMBUS_WRITE)) {
394 if (copy_from_user(&temp, data_arg.data, datasize))
395 return -EFAULT;
396 }
397 if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
398 /* Convert old I2C block commands to the new
399 convention. This preserves binary compatibility. */
400 data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
401 if (data_arg.read_write == I2C_SMBUS_READ)
402 temp.block[0] = I2C_SMBUS_BLOCK_MAX;
403 }
404 res = i2c_smbus_xfer(client->adapter,client->addr,client->flags,
405 data_arg.read_write,
406 data_arg.command,data_arg.size,&temp);
407 if (! res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
408 (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
409 (data_arg.read_write == I2C_SMBUS_READ))) {
410 if (copy_to_user(data_arg.data, &temp, datasize))
411 return -EFAULT;
412 }
413 return res;
414 case I2C_RETRIES: 421 case I2C_RETRIES:
415 client->adapter->retries = arg; 422 client->adapter->retries = arg;
416 break; 423 break;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a3a6199639f9..eb97c4113d78 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,6 +1,5 @@
1menuconfig NEW_LEDS 1menuconfig NEW_LEDS
2 bool "LED Support" 2 bool "LED Support"
3 depends on HAS_IOMEM
4 help 3 help
5 Say Y to enable Linux LED support. This allows control of supported 4 Say Y to enable Linux LED support. This allows control of supported
6 LEDs from both userspace and optionally, by kernel events (triggers). 5 LEDs from both userspace and optionally, by kernel events (triggers).
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index af66f4f28300..4edc120a6359 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -19,8 +19,6 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/mfd/htc-pasic3.h> 20#include <linux/mfd/htc-pasic3.h>
21 21
22#include <asm/arch/pxa-regs.h>
23
24struct pasic3_data { 22struct pasic3_data {
25 void __iomem *mapping; 23 void __iomem *mapping;
26 unsigned int bus_shift; 24 unsigned int bus_shift;
@@ -30,7 +28,6 @@ struct pasic3_data {
30 28
31#define REG_ADDR 5 29#define REG_ADDR 5
32#define REG_DATA 6 30#define REG_DATA 6
33#define NUM_REGS 7
34 31
35#define READ_MODE 0x80 32#define READ_MODE 0x80
36 33
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bb94ce78a6d0..297a48f85446 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -360,4 +360,16 @@ config ENCLOSURE_SERVICES
360 driver (SCSI/ATA) which supports enclosures 360 driver (SCSI/ATA) which supports enclosures
361 or a SCSI enclosure device (SES) to use these services. 361 or a SCSI enclosure device (SES) to use these services.
362 362
363config SGI_XP
364 tristate "Support communication between SGI SSIs"
365 depends on IA64_GENERIC || IA64_SGI_SN2
366 select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
367 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
368 ---help---
369 An SGI machine can be divided into multiple Single System
370 Images which act independently of each other and have
371 hardware based memory protection from the others. Enabling
372 this feature will allow for direct communication between SSIs
373 based on a network adapter and DMA messaging.
374
363endif # MISC_DEVICES 375endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4581b2533111..5914da434854 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
24obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o 24obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
25obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 25obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
26obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 26obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
27obj-$(CONFIG_SGI_XP) += sgi-xp/
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
new file mode 100644
index 000000000000..b6e40a7958ce
--- /dev/null
+++ b/drivers/misc/sgi-xp/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for SGI's XP devices.
3#
4
5obj-$(CONFIG_SGI_XP) += xp.o
6xp-y := xp_main.o xp_nofault.o
7
8obj-$(CONFIG_SGI_XP) += xpc.o
9xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
10
11obj-$(CONFIG_SGI_XP) += xpnet.o
diff --git a/include/asm-ia64/sn/xp.h b/drivers/misc/sgi-xp/xp.h
index f7711b308e48..5515234be86a 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -3,18 +3,15 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * External Cross Partition (XP) structures and defines. 10 * External Cross Partition (XP) structures and defines.
12 */ 11 */
13 12
14 13#ifndef _DRIVERS_MISC_SGIXP_XP_H
15#ifndef _ASM_IA64_SN_XP_H 14#define _DRIVERS_MISC_SGIXP_XP_H
16#define _ASM_IA64_SN_XP_H
17
18 15
19#include <linux/cache.h> 16#include <linux/cache.h>
20#include <linux/hardirq.h> 17#include <linux/hardirq.h>
@@ -22,14 +19,12 @@
22#include <asm/sn/types.h> 19#include <asm/sn/types.h>
23#include <asm/sn/bte.h> 20#include <asm/sn/bte.h>
24 21
25
26#ifdef USE_DBUG_ON 22#ifdef USE_DBUG_ON
27#define DBUG_ON(condition) BUG_ON(condition) 23#define DBUG_ON(condition) BUG_ON(condition)
28#else 24#else
29#define DBUG_ON(condition) 25#define DBUG_ON(condition)
30#endif 26#endif
31 27
32
33/* 28/*
34 * Define the maximum number of logically defined partitions the system 29 * Define the maximum number of logically defined partitions the system
35 * can support. It is constrained by the maximum number of hardware 30 * can support. It is constrained by the maximum number of hardware
@@ -43,7 +38,6 @@
43 */ 38 */
44#define XP_MAX_PARTITIONS 64 39#define XP_MAX_PARTITIONS 64
45 40
46
47/* 41/*
48 * Define the number of u64s required to represent all the C-brick nasids 42 * Define the number of u64s required to represent all the C-brick nasids
49 * as a bitmap. The cross-partition kernel modules deal only with 43 * as a bitmap. The cross-partition kernel modules deal only with
@@ -54,7 +48,6 @@
54#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) 48#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
55#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) 49#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
56 50
57
58/* 51/*
59 * Wrapper for bte_copy() that should it return a failure status will retry 52 * Wrapper for bte_copy() that should it return a failure status will retry
60 * the bte_copy() once in the hope that the failure was due to a temporary 53 * the bte_copy() once in the hope that the failure was due to a temporary
@@ -74,7 +67,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
74 bte_result_t ret; 67 bte_result_t ret;
75 u64 pdst = ia64_tpa(vdst); 68 u64 pdst = ia64_tpa(vdst);
76 69
77
78 /* 70 /*
79 * Ensure that the physically mapped memory is contiguous. 71 * Ensure that the physically mapped memory is contiguous.
80 * 72 *
@@ -87,16 +79,15 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
87 79
88 ret = bte_copy(src, pdst, len, mode, notification); 80 ret = bte_copy(src, pdst, len, mode, notification);
89 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { 81 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
90 if (!in_interrupt()) { 82 if (!in_interrupt())
91 cond_resched(); 83 cond_resched();
92 } 84
93 ret = bte_copy(src, pdst, len, mode, notification); 85 ret = bte_copy(src, pdst, len, mode, notification);
94 } 86 }
95 87
96 return ret; 88 return ret;
97} 89}
98 90
99
100/* 91/*
101 * XPC establishes channel connections between the local partition and any 92 * XPC establishes channel connections between the local partition and any
102 * other partition that is currently up. Over these channels, kernel-level 93 * other partition that is currently up. Over these channels, kernel-level
@@ -122,7 +113,6 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
122#error XPC_NCHANNELS exceeds MAXIMUM allowed. 113#error XPC_NCHANNELS exceeds MAXIMUM allowed.
123#endif 114#endif
124 115
125
126/* 116/*
127 * The format of an XPC message is as follows: 117 * The format of an XPC message is as follows:
128 * 118 *
@@ -160,12 +150,10 @@ struct xpc_msg {
160 u64 payload; /* user defined portion of message */ 150 u64 payload; /* user defined portion of message */
161}; 151};
162 152
163
164#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) 153#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
165#define XPC_MSG_SIZE(_payload_size) \ 154#define XPC_MSG_SIZE(_payload_size) \
166 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) 155 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
167 156
168
169/* 157/*
170 * Define the return values and values passed to user's callout functions. 158 * Define the return values and values passed to user's callout functions.
171 * (It is important to add new value codes at the end just preceding 159 * (It is important to add new value codes at the end just preceding
@@ -267,10 +255,9 @@ enum xpc_retval {
267 /* 115: BTE end */ 255 /* 115: BTE end */
268 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, 256 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
269 257
270 xpcUnknownReason /* 116: unknown reason -- must be last in list */ 258 xpcUnknownReason /* 116: unknown reason - must be last in enum */
271}; 259};
272 260
273
274/* 261/*
275 * Define the callout function types used by XPC to update the user on 262 * Define the callout function types used by XPC to update the user on
276 * connection activity and state changes (via the user function registered by 263 * connection activity and state changes (via the user function registered by
@@ -375,12 +362,11 @@ enum xpc_retval {
375 * =====================+================================+===================== 362 * =====================+================================+=====================
376 */ 363 */
377 364
378typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, 365typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
379 int ch_number, void *data, void *key); 366 int ch_number, void *data, void *key);
380
381typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
382 int ch_number, void *key);
383 367
368typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
369 int ch_number, void *key);
384 370
385/* 371/*
386 * The following is a registration entry. There is a global array of these, 372 * The following is a registration entry. There is a global array of these,
@@ -398,50 +384,45 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
398 */ 384 */
399struct xpc_registration { 385struct xpc_registration {
400 struct mutex mutex; 386 struct mutex mutex;
401 xpc_channel_func func; /* function to call */ 387 xpc_channel_func func; /* function to call */
402 void *key; /* pointer to user's key */ 388 void *key; /* pointer to user's key */
403 u16 nentries; /* #of msg entries in local msg queue */ 389 u16 nentries; /* #of msg entries in local msg queue */
404 u16 msg_size; /* message queue's message size */ 390 u16 msg_size; /* message queue's message size */
405 u32 assigned_limit; /* limit on #of assigned kthreads */ 391 u32 assigned_limit; /* limit on #of assigned kthreads */
406 u32 idle_limit; /* limit on #of idle kthreads */ 392 u32 idle_limit; /* limit on #of idle kthreads */
407} ____cacheline_aligned; 393} ____cacheline_aligned;
408 394
409
410#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) 395#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
411 396
412
413/* the following are valid xpc_allocate() flags */ 397/* the following are valid xpc_allocate() flags */
414#define XPC_WAIT 0 /* wait flag */ 398#define XPC_WAIT 0 /* wait flag */
415#define XPC_NOWAIT 1 /* no wait flag */ 399#define XPC_NOWAIT 1 /* no wait flag */
416
417 400
418struct xpc_interface { 401struct xpc_interface {
419 void (*connect)(int); 402 void (*connect) (int);
420 void (*disconnect)(int); 403 void (*disconnect) (int);
421 enum xpc_retval (*allocate)(partid_t, int, u32, void **); 404 enum xpc_retval (*allocate) (partid_t, int, u32, void **);
422 enum xpc_retval (*send)(partid_t, int, void *); 405 enum xpc_retval (*send) (partid_t, int, void *);
423 enum xpc_retval (*send_notify)(partid_t, int, void *, 406 enum xpc_retval (*send_notify) (partid_t, int, void *,
424 xpc_notify_func, void *); 407 xpc_notify_func, void *);
425 void (*received)(partid_t, int, void *); 408 void (*received) (partid_t, int, void *);
426 enum xpc_retval (*partid_to_nasids)(partid_t, void *); 409 enum xpc_retval (*partid_to_nasids) (partid_t, void *);
427}; 410};
428 411
429
430extern struct xpc_interface xpc_interface; 412extern struct xpc_interface xpc_interface;
431 413
432extern void xpc_set_interface(void (*)(int), 414extern void xpc_set_interface(void (*)(int),
433 void (*)(int), 415 void (*)(int),
434 enum xpc_retval (*)(partid_t, int, u32, void **), 416 enum xpc_retval (*)(partid_t, int, u32, void **),
435 enum xpc_retval (*)(partid_t, int, void *), 417 enum xpc_retval (*)(partid_t, int, void *),
436 enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, 418 enum xpc_retval (*)(partid_t, int, void *,
437 void *), 419 xpc_notify_func, void *),
438 void (*)(partid_t, int, void *), 420 void (*)(partid_t, int, void *),
439 enum xpc_retval (*)(partid_t, void *)); 421 enum xpc_retval (*)(partid_t, void *));
440extern void xpc_clear_interface(void); 422extern void xpc_clear_interface(void);
441 423
442
443extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, 424extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
444 u16, u32, u32); 425 u16, u32, u32);
445extern void xpc_disconnect(int); 426extern void xpc_disconnect(int);
446 427
447static inline enum xpc_retval 428static inline enum xpc_retval
@@ -458,7 +439,7 @@ xpc_send(partid_t partid, int ch_number, void *payload)
458 439
459static inline enum xpc_retval 440static inline enum xpc_retval
460xpc_send_notify(partid_t partid, int ch_number, void *payload, 441xpc_send_notify(partid_t partid, int ch_number, void *payload,
461 xpc_notify_func func, void *key) 442 xpc_notify_func func, void *key)
462{ 443{
463 return xpc_interface.send_notify(partid, ch_number, payload, func, key); 444 return xpc_interface.send_notify(partid, ch_number, payload, func, key);
464} 445}
@@ -475,11 +456,8 @@ xpc_partid_to_nasids(partid_t partid, void *nasids)
475 return xpc_interface.partid_to_nasids(partid, nasids); 456 return xpc_interface.partid_to_nasids(partid, nasids);
476} 457}
477 458
478
479extern u64 xp_nofault_PIOR_target; 459extern u64 xp_nofault_PIOR_target;
480extern int xp_nofault_PIOR(void *); 460extern int xp_nofault_PIOR(void *);
481extern int xp_error_PIOR(void); 461extern int xp_error_PIOR(void);
482 462
483 463#endif /* _DRIVERS_MISC_SGIXP_XP_H */
484#endif /* _ASM_IA64_SN_XP_H */
485
diff --git a/arch/ia64/sn/kernel/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index b7ea46645e12..1fbf99bae963 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition (XP) base. 10 * Cross Partition (XP) base.
12 * 11 *
@@ -15,58 +14,64 @@
15 * 14 *
16 */ 15 */
17 16
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/interrupt.h> 18#include <linux/interrupt.h>
21#include <linux/module.h> 19#include <linux/module.h>
22#include <linux/mutex.h> 20#include <linux/mutex.h>
23#include <asm/sn/intr.h> 21#include <asm/sn/intr.h>
24#include <asm/sn/sn_sal.h> 22#include <asm/sn/sn_sal.h>
25#include <asm/sn/xp.h> 23#include "xp.h"
26
27 24
28/* 25/*
29 * Target of nofault PIO read. 26 * The export of xp_nofault_PIOR needs to happen here since it is defined
27 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
28 * defined here.
30 */ 29 */
31u64 xp_nofault_PIOR_target; 30EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
32 31
32u64 xp_nofault_PIOR_target;
33EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
33 34
34/* 35/*
35 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 36 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
36 * users of XPC. 37 * users of XPC.
37 */ 38 */
38struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 39struct xpc_registration xpc_registrations[XPC_NCHANNELS];
39 40EXPORT_SYMBOL_GPL(xpc_registrations);
40 41
41/* 42/*
42 * Initialize the XPC interface to indicate that XPC isn't loaded. 43 * Initialize the XPC interface to indicate that XPC isn't loaded.
43 */ 44 */
44static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } 45static enum xpc_retval
46xpc_notloaded(void)
47{
48 return xpcNotLoaded;
49}
45 50
46struct xpc_interface xpc_interface = { 51struct xpc_interface xpc_interface = {
47 (void (*)(int)) xpc_notloaded, 52 (void (*)(int))xpc_notloaded,
48 (void (*)(int)) xpc_notloaded, 53 (void (*)(int))xpc_notloaded,
49 (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, 54 (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
50 (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, 55 (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
51 (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) 56 (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
52 xpc_notloaded, 57 xpc_notloaded,
53 (void (*)(partid_t, int, void *)) xpc_notloaded, 58 (void (*)(partid_t, int, void *))xpc_notloaded,
54 (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded 59 (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
55}; 60};
56 61EXPORT_SYMBOL_GPL(xpc_interface);
57 62
58/* 63/*
59 * XPC calls this when it (the XPC module) has been loaded. 64 * XPC calls this when it (the XPC module) has been loaded.
60 */ 65 */
61void 66void
62xpc_set_interface(void (*connect)(int), 67xpc_set_interface(void (*connect) (int),
63 void (*disconnect)(int), 68 void (*disconnect) (int),
64 enum xpc_retval (*allocate)(partid_t, int, u32, void **), 69 enum xpc_retval (*allocate) (partid_t, int, u32, void **),
65 enum xpc_retval (*send)(partid_t, int, void *), 70 enum xpc_retval (*send) (partid_t, int, void *),
66 enum xpc_retval (*send_notify)(partid_t, int, void *, 71 enum xpc_retval (*send_notify) (partid_t, int, void *,
67 xpc_notify_func, void *), 72 xpc_notify_func, void *),
68 void (*received)(partid_t, int, void *), 73 void (*received) (partid_t, int, void *),
69 enum xpc_retval (*partid_to_nasids)(partid_t, void *)) 74 enum xpc_retval (*partid_to_nasids) (partid_t, void *))
70{ 75{
71 xpc_interface.connect = connect; 76 xpc_interface.connect = connect;
72 xpc_interface.disconnect = disconnect; 77 xpc_interface.disconnect = disconnect;
@@ -76,7 +81,7 @@ xpc_set_interface(void (*connect)(int),
76 xpc_interface.received = received; 81 xpc_interface.received = received;
77 xpc_interface.partid_to_nasids = partid_to_nasids; 82 xpc_interface.partid_to_nasids = partid_to_nasids;
78} 83}
79 84EXPORT_SYMBOL_GPL(xpc_set_interface);
80 85
81/* 86/*
82 * XPC calls this when it (the XPC module) is being unloaded. 87 * XPC calls this when it (the XPC module) is being unloaded.
@@ -84,20 +89,21 @@ xpc_set_interface(void (*connect)(int),
84void 89void
85xpc_clear_interface(void) 90xpc_clear_interface(void)
86{ 91{
87 xpc_interface.connect = (void (*)(int)) xpc_notloaded; 92 xpc_interface.connect = (void (*)(int))xpc_notloaded;
88 xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; 93 xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
89 xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, 94 xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
90 void **)) xpc_notloaded; 95 void **))xpc_notloaded;
91 xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) 96 xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
92 xpc_notloaded; 97 xpc_notloaded;
93 xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, 98 xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
94 xpc_notify_func, void *)) xpc_notloaded; 99 xpc_notify_func,
100 void *))xpc_notloaded;
95 xpc_interface.received = (void (*)(partid_t, int, void *)) 101 xpc_interface.received = (void (*)(partid_t, int, void *))
96 xpc_notloaded; 102 xpc_notloaded;
97 xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) 103 xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
98 xpc_notloaded; 104 xpc_notloaded;
99} 105}
100 106EXPORT_SYMBOL_GPL(xpc_clear_interface);
101 107
102/* 108/*
103 * Register for automatic establishment of a channel connection whenever 109 * Register for automatic establishment of a channel connection whenever
@@ -125,11 +131,10 @@ xpc_clear_interface(void)
125 */ 131 */
126enum xpc_retval 132enum xpc_retval
127xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, 133xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
128 u16 nentries, u32 assigned_limit, u32 idle_limit) 134 u16 nentries, u32 assigned_limit, u32 idle_limit)
129{ 135{
130 struct xpc_registration *registration; 136 struct xpc_registration *registration;
131 137
132
133 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 138 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
134 DBUG_ON(payload_size == 0 || nentries == 0); 139 DBUG_ON(payload_size == 0 || nentries == 0);
135 DBUG_ON(func == NULL); 140 DBUG_ON(func == NULL);
@@ -137,9 +142,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
137 142
138 registration = &xpc_registrations[ch_number]; 143 registration = &xpc_registrations[ch_number];
139 144
140 if (mutex_lock_interruptible(&registration->mutex) != 0) { 145 if (mutex_lock_interruptible(&registration->mutex) != 0)
141 return xpcInterrupted; 146 return xpcInterrupted;
142 }
143 147
144 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 148 /* if XPC_CHANNEL_REGISTERED(ch_number) */
145 if (registration->func != NULL) { 149 if (registration->func != NULL) {
@@ -161,7 +165,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
161 165
162 return xpcSuccess; 166 return xpcSuccess;
163} 167}
164 168EXPORT_SYMBOL_GPL(xpc_connect);
165 169
166/* 170/*
167 * Remove the registration for automatic connection of the specified channel 171 * Remove the registration for automatic connection of the specified channel
@@ -181,7 +185,6 @@ xpc_disconnect(int ch_number)
181{ 185{
182 struct xpc_registration *registration; 186 struct xpc_registration *registration;
183 187
184
185 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 188 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
186 189
187 registration = &xpc_registrations[ch_number]; 190 registration = &xpc_registrations[ch_number];
@@ -213,19 +216,17 @@ xpc_disconnect(int ch_number)
213 216
214 return; 217 return;
215} 218}
216 219EXPORT_SYMBOL_GPL(xpc_disconnect);
217 220
218int __init 221int __init
219xp_init(void) 222xp_init(void)
220{ 223{
221 int ret, ch_number; 224 int ret, ch_number;
222 u64 func_addr = *(u64 *) xp_nofault_PIOR; 225 u64 func_addr = *(u64 *)xp_nofault_PIOR;
223 u64 err_func_addr = *(u64 *) xp_error_PIOR; 226 u64 err_func_addr = *(u64 *)xp_error_PIOR;
224 227
225 228 if (!ia64_platform_is("sn2"))
226 if (!ia64_platform_is("sn2")) {
227 return -ENODEV; 229 return -ENODEV;
228 }
229 230
230 /* 231 /*
231 * Register a nofault code region which performs a cross-partition 232 * Register a nofault code region which performs a cross-partition
@@ -236,55 +237,43 @@ xp_init(void)
236 * least some CPUs on Shubs <= v1.2, which unfortunately we have to 237 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
237 * work around). 238 * work around).
238 */ 239 */
239 if ((ret = sn_register_nofault_code(func_addr, err_func_addr, 240 ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
240 err_func_addr, 1, 1)) != 0) { 241 1, 1);
242 if (ret != 0) {
241 printk(KERN_ERR "XP: can't register nofault code, error=%d\n", 243 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
242 ret); 244 ret);
243 } 245 }
244 /* 246 /*
245 * Setup the nofault PIO read target. (There is no special reason why 247 * Setup the nofault PIO read target. (There is no special reason why
246 * SH_IPI_ACCESS was selected.) 248 * SH_IPI_ACCESS was selected.)
247 */ 249 */
248 if (is_shub2()) { 250 if (is_shub2())
249 xp_nofault_PIOR_target = SH2_IPI_ACCESS0; 251 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
250 } else { 252 else
251 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 253 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
252 }
253 254
254 /* initialize the connection registration mutex */ 255 /* initialize the connection registration mutex */
255 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 256 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++)
256 mutex_init(&xpc_registrations[ch_number].mutex); 257 mutex_init(&xpc_registrations[ch_number].mutex);
257 }
258 258
259 return 0; 259 return 0;
260} 260}
261module_init(xp_init);
262 261
262module_init(xp_init);
263 263
264void __exit 264void __exit
265xp_exit(void) 265xp_exit(void)
266{ 266{
267 u64 func_addr = *(u64 *) xp_nofault_PIOR; 267 u64 func_addr = *(u64 *)xp_nofault_PIOR;
268 u64 err_func_addr = *(u64 *) xp_error_PIOR; 268 u64 err_func_addr = *(u64 *)xp_error_PIOR;
269
270 269
271 /* unregister the PIO read nofault code region */ 270 /* unregister the PIO read nofault code region */
272 (void) sn_register_nofault_code(func_addr, err_func_addr, 271 (void)sn_register_nofault_code(func_addr, err_func_addr,
273 err_func_addr, 1, 0); 272 err_func_addr, 1, 0);
274} 273}
275module_exit(xp_exit);
276 274
275module_exit(xp_exit);
277 276
278MODULE_AUTHOR("Silicon Graphics, Inc."); 277MODULE_AUTHOR("Silicon Graphics, Inc.");
279MODULE_DESCRIPTION("Cross Partition (XP) base"); 278MODULE_DESCRIPTION("Cross Partition (XP) base");
280MODULE_LICENSE("GPL"); 279MODULE_LICENSE("GPL");
281
282EXPORT_SYMBOL(xp_nofault_PIOR);
283EXPORT_SYMBOL(xp_nofault_PIOR_target);
284EXPORT_SYMBOL(xpc_registrations);
285EXPORT_SYMBOL(xpc_interface);
286EXPORT_SYMBOL(xpc_clear_interface);
287EXPORT_SYMBOL(xpc_set_interface);
288EXPORT_SYMBOL(xpc_connect);
289EXPORT_SYMBOL(xpc_disconnect);
290
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S
index 98e7c7dbfdd8..e38d43319429 100644
--- a/arch/ia64/sn/kernel/xp_nofault.S
+++ b/drivers/misc/sgi-xp/xp_nofault.S
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * The xp_nofault_PIOR function takes a pointer to a remote PIO register 10 * The xp_nofault_PIOR function takes a pointer to a remote PIO register
12 * and attempts to load and consume a value from it. This function 11 * and attempts to load and consume a value from it. This function
diff --git a/include/asm-ia64/sn/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 3c0900ab8003..9eb6d4a3269c 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -3,17 +3,15 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) structures and macros. 10 * Cross Partition Communication (XPC) structures and macros.
12 */ 11 */
13 12
14#ifndef _ASM_IA64_SN_XPC_H 13#ifndef _DRIVERS_MISC_SGIXP_XPC_H
15#define _ASM_IA64_SN_XPC_H 14#define _DRIVERS_MISC_SGIXP_XPC_H
16
17 15
18#include <linux/interrupt.h> 16#include <linux/interrupt.h>
19#include <linux/sysctl.h> 17#include <linux/sysctl.h>
@@ -27,8 +25,7 @@
27#include <asm/sn/addrs.h> 25#include <asm/sn/addrs.h>
28#include <asm/sn/mspec.h> 26#include <asm/sn/mspec.h>
29#include <asm/sn/shub_mmr.h> 27#include <asm/sn/shub_mmr.h>
30#include <asm/sn/xp.h> 28#include "xp.h"
31
32 29
33/* 30/*
34 * XPC Version numbers consist of a major and minor number. XPC can always 31 * XPC Version numbers consist of a major and minor number. XPC can always
@@ -39,7 +36,6 @@
39#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) 36#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
40#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) 37#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
41 38
42
43/* 39/*
44 * The next macros define word or bit representations for given 40 * The next macros define word or bit representations for given
45 * C-brick nasid in either the SAL provided bit array representing 41 * C-brick nasid in either the SAL provided bit array representing
@@ -67,7 +63,6 @@
67/* define the process name of the discovery thread */ 63/* define the process name of the discovery thread */
68#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" 64#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
69 65
70
71/* 66/*
72 * the reserved page 67 * the reserved page
73 * 68 *
@@ -115,16 +110,16 @@ struct xpc_rsvd_page {
115 u8 partid; /* SAL: partition ID */ 110 u8 partid; /* SAL: partition ID */
116 u8 version; 111 u8 version;
117 u8 pad1[6]; /* align to next u64 in cacheline */ 112 u8 pad1[6]; /* align to next u64 in cacheline */
118 volatile u64 vars_pa; 113 u64 vars_pa; /* physical address of struct xpc_vars */
119 struct timespec stamp; /* time when reserved page was setup by XPC */ 114 struct timespec stamp; /* time when reserved page was setup by XPC */
120 u64 pad2[9]; /* align to last u64 in cacheline */ 115 u64 pad2[9]; /* align to last u64 in cacheline */
121 u64 nasids_size; /* SAL: size of each nasid mask in bytes */ 116 u64 nasids_size; /* SAL: size of each nasid mask in bytes */
122}; 117};
123 118
124#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */ 119#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */
125 120
126#define XPC_SUPPORTS_RP_STAMP(_version) \ 121#define XPC_SUPPORTS_RP_STAMP(_version) \
127 (_version >= _XPC_VERSION(1,1)) 122 (_version >= _XPC_VERSION(1, 1))
128 123
129/* 124/*
130 * compare stamps - the return value is: 125 * compare stamps - the return value is:
@@ -138,14 +133,13 @@ xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
138{ 133{
139 int ret; 134 int ret;
140 135
141 136 ret = stamp1->tv_sec - stamp2->tv_sec;
142 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) { 137 if (ret == 0)
143 ret = stamp1->tv_nsec - stamp2->tv_nsec; 138 ret = stamp1->tv_nsec - stamp2->tv_nsec;
144 } 139
145 return ret; 140 return ret;
146} 141}
147 142
148
149/* 143/*
150 * Define the structures by which XPC variables can be exported to other 144 * Define the structures by which XPC variables can be exported to other
151 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) 145 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
@@ -172,11 +166,10 @@ struct xpc_vars {
172 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 166 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
173}; 167};
174 168
175#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */ 169#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
176 170
177#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \ 171#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
178 (_version >= _XPC_VERSION(3,1)) 172 (_version >= _XPC_VERSION(3, 1))
179
180 173
181static inline int 174static inline int
182xpc_hb_allowed(partid_t partid, struct xpc_vars *vars) 175xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
@@ -193,7 +186,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
193 old_mask = vars->heartbeating_to_mask; 186 old_mask = vars->heartbeating_to_mask;
194 new_mask = (old_mask | (1UL << partid)); 187 new_mask = (old_mask | (1UL << partid));
195 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != 188 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
196 old_mask); 189 old_mask);
197} 190}
198 191
199static inline void 192static inline void
@@ -205,10 +198,9 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
205 old_mask = vars->heartbeating_to_mask; 198 old_mask = vars->heartbeating_to_mask;
206 new_mask = (old_mask & ~(1UL << partid)); 199 new_mask = (old_mask & ~(1UL << partid));
207 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) != 200 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
208 old_mask); 201 old_mask);
209} 202}
210 203
211
212/* 204/*
213 * The AMOs page consists of a number of AMO variables which are divided into 205 * The AMOs page consists of a number of AMO variables which are divided into
214 * four groups, The first two groups are used to identify an IRQ's sender. 206 * four groups, The first two groups are used to identify an IRQ's sender.
@@ -222,7 +214,6 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
222#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) 214#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
223#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) 215#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
224 216
225
226/* 217/*
227 * The following structure describes the per partition specific variables. 218 * The following structure describes the per partition specific variables.
228 * 219 *
@@ -234,7 +225,7 @@ xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
234 * occupies half a cacheline. 225 * occupies half a cacheline.
235 */ 226 */
236struct xpc_vars_part { 227struct xpc_vars_part {
237 volatile u64 magic; 228 u64 magic;
238 229
239 u64 openclose_args_pa; /* physical address of open and close args */ 230 u64 openclose_args_pa; /* physical address of open and close args */
240 u64 GPs_pa; /* physical address of Get/Put values */ 231 u64 GPs_pa; /* physical address of Get/Put values */
@@ -257,20 +248,20 @@ struct xpc_vars_part {
257 * MAGIC2 indicates that this partition has pulled the remote partititions 248 * MAGIC2 indicates that this partition has pulled the remote partititions
258 * per partition variables that pertain to this partition. 249 * per partition variables that pertain to this partition.
259 */ 250 */
260#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ 251#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
261#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 252#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
262
263 253
264/* the reserved page sizes and offsets */ 254/* the reserved page sizes and offsets */
265 255
266#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) 256#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
267#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars)) 257#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
268 258
269#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE) 259#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE))
270#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) 260#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
271#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words) 261#define XPC_RP_VARS(_rp) ((struct xpc_vars *)(XPC_RP_MACH_NASIDS(_rp) + \
272#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE) 262 xp_nasid_mask_words))
273 263#define XPC_RP_VARS_PART(_rp) ((struct xpc_vars_part *) \
264 ((u8 *)XPC_RP_VARS(_rp) + XPC_RP_VARS_SIZE))
274 265
275/* 266/*
276 * Functions registered by add_timer() or called by kernel_thread() only 267 * Functions registered by add_timer() or called by kernel_thread() only
@@ -285,21 +276,17 @@ struct xpc_vars_part {
285#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) 276#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
286#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) 277#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
287 278
288
289
290/* 279/*
291 * Define a Get/Put value pair (pointers) used with a message queue. 280 * Define a Get/Put value pair (pointers) used with a message queue.
292 */ 281 */
293struct xpc_gp { 282struct xpc_gp {
294 volatile s64 get; /* Get value */ 283 s64 get; /* Get value */
295 volatile s64 put; /* Put value */ 284 s64 put; /* Put value */
296}; 285};
297 286
298#define XPC_GP_SIZE \ 287#define XPC_GP_SIZE \
299 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) 288 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
300 289
301
302
303/* 290/*
304 * Define a structure that contains arguments associated with opening and 291 * Define a structure that contains arguments associated with opening and
305 * closing a channel. 292 * closing a channel.
@@ -315,20 +302,15 @@ struct xpc_openclose_args {
315#define XPC_OPENCLOSE_ARGS_SIZE \ 302#define XPC_OPENCLOSE_ARGS_SIZE \
316 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) 303 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
317 304
318
319
320/* struct xpc_msg flags */ 305/* struct xpc_msg flags */
321 306
322#define XPC_M_DONE 0x01 /* msg has been received/consumed */ 307#define XPC_M_DONE 0x01 /* msg has been received/consumed */
323#define XPC_M_READY 0x02 /* msg is ready to be sent */ 308#define XPC_M_READY 0x02 /* msg is ready to be sent */
324#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ 309#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
325 310
326
327#define XPC_MSG_ADDRESS(_payload) \ 311#define XPC_MSG_ADDRESS(_payload) \
328 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) 312 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
329 313
330
331
332/* 314/*
333 * Defines notify entry. 315 * Defines notify entry.
334 * 316 *
@@ -336,19 +318,17 @@ struct xpc_openclose_args {
336 * and consumed by the intended recipient. 318 * and consumed by the intended recipient.
337 */ 319 */
338struct xpc_notify { 320struct xpc_notify {
339 volatile u8 type; /* type of notification */ 321 u8 type; /* type of notification */
340 322
341 /* the following two fields are only used if type == XPC_N_CALL */ 323 /* the following two fields are only used if type == XPC_N_CALL */
342 xpc_notify_func func; /* user's notify function */ 324 xpc_notify_func func; /* user's notify function */
343 void *key; /* pointer to user's key */ 325 void *key; /* pointer to user's key */
344}; 326};
345 327
346/* struct xpc_notify type of notification */ 328/* struct xpc_notify type of notification */
347 329
348#define XPC_N_CALL 0x01 /* notify function provided by user */ 330#define XPC_N_CALL 0x01 /* notify function provided by user */
349 331
350
351
352/* 332/*
353 * Define the structure that manages all the stuff required by a channel. In 333 * Define the structure that manages all the stuff required by a channel. In
354 * particular, they are used to manage the messages sent across the channel. 334 * particular, they are used to manage the messages sent across the channel.
@@ -428,48 +408,48 @@ struct xpc_notify {
428 * messages. 408 * messages.
429 */ 409 */
430struct xpc_channel { 410struct xpc_channel {
431 partid_t partid; /* ID of remote partition connected */ 411 partid_t partid; /* ID of remote partition connected */
432 spinlock_t lock; /* lock for updating this structure */ 412 spinlock_t lock; /* lock for updating this structure */
433 u32 flags; /* general flags */ 413 u32 flags; /* general flags */
434 414
435 enum xpc_retval reason; /* reason why channel is disconnect'g */ 415 enum xpc_retval reason; /* reason why channel is disconnect'g */
436 int reason_line; /* line# disconnect initiated from */ 416 int reason_line; /* line# disconnect initiated from */
437 417
438 u16 number; /* channel # */ 418 u16 number; /* channel # */
439 419
440 u16 msg_size; /* sizeof each msg entry */ 420 u16 msg_size; /* sizeof each msg entry */
441 u16 local_nentries; /* #of msg entries in local msg queue */ 421 u16 local_nentries; /* #of msg entries in local msg queue */
442 u16 remote_nentries; /* #of msg entries in remote msg queue*/ 422 u16 remote_nentries; /* #of msg entries in remote msg queue */
443 423
444 void *local_msgqueue_base; /* base address of kmalloc'd space */ 424 void *local_msgqueue_base; /* base address of kmalloc'd space */
445 struct xpc_msg *local_msgqueue; /* local message queue */ 425 struct xpc_msg *local_msgqueue; /* local message queue */
446 void *remote_msgqueue_base; /* base address of kmalloc'd space */ 426 void *remote_msgqueue_base; /* base address of kmalloc'd space */
447 struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ 427 struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */
448 /* local message queue */ 428 /* local message queue */
449 u64 remote_msgqueue_pa; /* phys addr of remote partition's */ 429 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
450 /* local message queue */ 430 /* local message queue */
451 431
452 atomic_t references; /* #of external references to queues */ 432 atomic_t references; /* #of external references to queues */
453 433
454 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 434 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
455 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 435 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
456 436
457 u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 437 u8 delayed_IPI_flags; /* IPI flags received, but delayed */
458 /* action until channel disconnected */ 438 /* action until channel disconnected */
459 439
460 /* queue of msg senders who want to be notified when msg received */ 440 /* queue of msg senders who want to be notified when msg received */
461 441
462 atomic_t n_to_notify; /* #of msg senders to notify */ 442 atomic_t n_to_notify; /* #of msg senders to notify */
463 struct xpc_notify *notify_queue;/* notify queue for messages sent */ 443 struct xpc_notify *notify_queue; /* notify queue for messages sent */
464 444
465 xpc_channel_func func; /* user's channel function */ 445 xpc_channel_func func; /* user's channel function */
466 void *key; /* pointer to user's key */ 446 void *key; /* pointer to user's key */
467 447
468 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ 448 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
469 struct completion wdisconnect_wait; /* wait for channel disconnect */ 449 struct completion wdisconnect_wait; /* wait for channel disconnect */
470 450
471 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 451 struct xpc_openclose_args *local_openclose_args; /* args passed on */
472 /* opening or closing of channel */ 452 /* opening or closing of channel */
473 453
474 /* various flavors of local and remote Get/Put values */ 454 /* various flavors of local and remote Get/Put values */
475 455
@@ -477,56 +457,48 @@ struct xpc_channel {
477 struct xpc_gp remote_GP; /* remote Get/Put values */ 457 struct xpc_gp remote_GP; /* remote Get/Put values */
478 struct xpc_gp w_local_GP; /* working local Get/Put values */ 458 struct xpc_gp w_local_GP; /* working local Get/Put values */
479 struct xpc_gp w_remote_GP; /* working remote Get/Put values */ 459 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
480 s64 next_msg_to_pull; /* Put value of next msg to pull */ 460 s64 next_msg_to_pull; /* Put value of next msg to pull */
481 461
482 /* kthread management related fields */ 462 /* kthread management related fields */
483 463
484// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
485// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
486// >>> dependent on activity over the last interval of time
487 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 464 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
488 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ 465 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
489 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ 466 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
490 u32 kthreads_idle_limit; /* limit on #of kthreads idle */ 467 u32 kthreads_idle_limit; /* limit on #of kthreads idle */
491 atomic_t kthreads_active; /* #of kthreads actively working */ 468 atomic_t kthreads_active; /* #of kthreads actively working */
492 // >>> following field is temporary
493 u32 kthreads_created; /* total #of kthreads created */
494 469
495 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 470 wait_queue_head_t idle_wq; /* idle kthread wait queue */
496 471
497} ____cacheline_aligned; 472} ____cacheline_aligned;
498 473
499
500/* struct xpc_channel flags */ 474/* struct xpc_channel flags */
501 475
502#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ 476#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
503 477
504#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ 478#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
505#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ 479#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
506#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ 480#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
507#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 481#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
508 482
509#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 483#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
510#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ 484#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
511#define XPC_C_CONNECTEDCALLOUT_MADE \ 485#define XPC_C_CONNECTEDCALLOUT_MADE \
512 0x00000080 /* connected callout completed */ 486 0x00000080 /* connected callout completed */
513#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ 487#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
514#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ 488#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
515 489
516#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ 490#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
517#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ 491#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
518#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ 492#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
519#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ 493#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
520 494
521#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ 495#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
522#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ 496#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
523#define XPC_C_DISCONNECTINGCALLOUT \ 497#define XPC_C_DISCONNECTINGCALLOUT \
524 0x00010000 /* disconnecting callout initiated */ 498 0x00010000 /* disconnecting callout initiated */
525#define XPC_C_DISCONNECTINGCALLOUT_MADE \ 499#define XPC_C_DISCONNECTINGCALLOUT_MADE \
526 0x00020000 /* disconnecting callout completed */ 500 0x00020000 /* disconnecting callout completed */
527#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 501#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
528
529
530 502
531/* 503/*
532 * Manages channels on a partition basis. There is one of these structures 504 * Manages channels on a partition basis. There is one of these structures
@@ -537,33 +509,31 @@ struct xpc_partition {
537 509
538 /* XPC HB infrastructure */ 510 /* XPC HB infrastructure */
539 511
540 u8 remote_rp_version; /* version# of partition's rsvd pg */ 512 u8 remote_rp_version; /* version# of partition's rsvd pg */
541 struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */ 513 struct timespec remote_rp_stamp; /* time when rsvd pg was initialized */
542 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 514 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
543 u64 remote_vars_pa; /* phys addr of partition's vars */ 515 u64 remote_vars_pa; /* phys addr of partition's vars */
544 u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 516 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
545 u64 last_heartbeat; /* HB at last read */ 517 u64 last_heartbeat; /* HB at last read */
546 u64 remote_amos_page_pa; /* phys addr of partition's amos page */ 518 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
547 int remote_act_nasid; /* active part's act/deact nasid */ 519 int remote_act_nasid; /* active part's act/deact nasid */
548 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ 520 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
549 u32 act_IRQ_rcvd; /* IRQs since activation */ 521 u32 act_IRQ_rcvd; /* IRQs since activation */
550 spinlock_t act_lock; /* protect updating of act_state */ 522 spinlock_t act_lock; /* protect updating of act_state */
551 u8 act_state; /* from XPC HB viewpoint */ 523 u8 act_state; /* from XPC HB viewpoint */
552 u8 remote_vars_version; /* version# of partition's vars */ 524 u8 remote_vars_version; /* version# of partition's vars */
553 enum xpc_retval reason; /* reason partition is deactivating */ 525 enum xpc_retval reason; /* reason partition is deactivating */
554 int reason_line; /* line# deactivation initiated from */ 526 int reason_line; /* line# deactivation initiated from */
555 int reactivate_nasid; /* nasid in partition to reactivate */ 527 int reactivate_nasid; /* nasid in partition to reactivate */
556 528
557 unsigned long disengage_request_timeout; /* timeout in jiffies */ 529 unsigned long disengage_request_timeout; /* timeout in jiffies */
558 struct timer_list disengage_request_timer; 530 struct timer_list disengage_request_timer;
559 531
560
561 /* XPC infrastructure referencing and teardown control */ 532 /* XPC infrastructure referencing and teardown control */
562 533
563 volatile u8 setup_state; /* infrastructure setup state */ 534 u8 setup_state; /* infrastructure setup state */
564 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 535 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
565 atomic_t references; /* #of references to infrastructure */ 536 atomic_t references; /* #of references to infrastructure */
566
567 537
568 /* 538 /*
569 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN 539 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
@@ -572,53 +542,48 @@ struct xpc_partition {
572 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) 542 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
573 */ 543 */
574 544
575 545 u8 nchannels; /* #of defined channels supported */
576 u8 nchannels; /* #of defined channels supported */ 546 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
577 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 547 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
578 atomic_t nchannels_engaged;/* #of channels engaged with remote part */ 548 struct xpc_channel *channels; /* array of channel structures */
579 struct xpc_channel *channels;/* array of channel structures */ 549
580 550 void *local_GPs_base; /* base address of kmalloc'd space */
581 void *local_GPs_base; /* base address of kmalloc'd space */ 551 struct xpc_gp *local_GPs; /* local Get/Put values */
582 struct xpc_gp *local_GPs; /* local Get/Put values */ 552 void *remote_GPs_base; /* base address of kmalloc'd space */
583 void *remote_GPs_base; /* base address of kmalloc'd space */ 553 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
584 struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ 554 /* Get/Put values */
585 /* values */ 555 u64 remote_GPs_pa; /* phys address of remote partition's local */
586 u64 remote_GPs_pa; /* phys address of remote partition's local */ 556 /* Get/Put values */
587 /* Get/Put values */
588
589 557
590 /* fields used to pass args when opening or closing a channel */ 558 /* fields used to pass args when opening or closing a channel */
591 559
592 void *local_openclose_args_base; /* base address of kmalloc'd space */ 560 void *local_openclose_args_base; /* base address of kmalloc'd space */
593 struct xpc_openclose_args *local_openclose_args; /* local's args */ 561 struct xpc_openclose_args *local_openclose_args; /* local's args */
594 void *remote_openclose_args_base; /* base address of kmalloc'd space */ 562 void *remote_openclose_args_base; /* base address of kmalloc'd space */
595 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 563 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
596 /* args */ 564 /* args */
597 u64 remote_openclose_args_pa; /* phys addr of remote's args */ 565 u64 remote_openclose_args_pa; /* phys addr of remote's args */
598
599 566
600 /* IPI sending, receiving and handling related fields */ 567 /* IPI sending, receiving and handling related fields */
601 568
602 int remote_IPI_nasid; /* nasid of where to send IPIs */ 569 int remote_IPI_nasid; /* nasid of where to send IPIs */
603 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 570 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
604 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 571 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
605
606 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
607 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
608 char IPI_owner[8]; /* IPI owner's name */
609 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
610 572
611 spinlock_t IPI_lock; /* IPI handler lock */ 573 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
574 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
575 char IPI_owner[8]; /* IPI owner's name */
576 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
612 577
578 spinlock_t IPI_lock; /* IPI handler lock */
613 579
614 /* channel manager related fields */ 580 /* channel manager related fields */
615 581
616 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 582 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
617 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 583 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
618 584
619} ____cacheline_aligned; 585} ____cacheline_aligned;
620 586
621
622/* struct xpc_partition act_state values (for XPC HB) */ 587/* struct xpc_partition act_state values (for XPC HB) */
623 588
624#define XPC_P_INACTIVE 0x00 /* partition is not active */ 589#define XPC_P_INACTIVE 0x00 /* partition is not active */
@@ -627,11 +592,9 @@ struct xpc_partition {
627#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ 592#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
628#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ 593#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
629 594
630
631#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ 595#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
632 xpc_deactivate_partition(__LINE__, (_p), (_reason)) 596 xpc_deactivate_partition(__LINE__, (_p), (_reason))
633 597
634
635/* struct xpc_partition setup_state values */ 598/* struct xpc_partition setup_state values */
636 599
637#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ 600#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
@@ -639,8 +602,6 @@ struct xpc_partition {
639#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ 602#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
640#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 603#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
641 604
642
643
644/* 605/*
645 * struct xpc_partition IPI_timer #of seconds to wait before checking for 606 * struct xpc_partition IPI_timer #of seconds to wait before checking for
646 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 607 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -648,22 +609,17 @@ struct xpc_partition {
648 */ 609 */
649#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 610#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
650 611
651
652/* number of seconds to wait for other partitions to disengage */ 612/* number of seconds to wait for other partitions to disengage */
653#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 613#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
654 614
655/* interval in seconds to print 'waiting disengagement' messages */ 615/* interval in seconds to print 'waiting disengagement' messages */
656#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 616#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
657 617
658
659#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) 618#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
660 619
661
662
663/* found in xp_main.c */ 620/* found in xp_main.c */
664extern struct xpc_registration xpc_registrations[]; 621extern struct xpc_registration xpc_registrations[];
665 622
666
667/* found in xpc_main.c */ 623/* found in xpc_main.c */
668extern struct device *xpc_part; 624extern struct device *xpc_part;
669extern struct device *xpc_chan; 625extern struct device *xpc_chan;
@@ -676,7 +632,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
676extern void xpc_create_kthreads(struct xpc_channel *, int, int); 632extern void xpc_create_kthreads(struct xpc_channel *, int, int);
677extern void xpc_disconnect_wait(int); 633extern void xpc_disconnect_wait(int);
678 634
679
680/* found in xpc_partition.c */ 635/* found in xpc_partition.c */
681extern int xpc_exiting; 636extern int xpc_exiting;
682extern struct xpc_vars *xpc_vars; 637extern struct xpc_vars *xpc_vars;
@@ -696,10 +651,9 @@ extern void xpc_mark_partition_inactive(struct xpc_partition *);
696extern void xpc_discovery(void); 651extern void xpc_discovery(void);
697extern void xpc_check_remote_hb(void); 652extern void xpc_check_remote_hb(void);
698extern void xpc_deactivate_partition(const int, struct xpc_partition *, 653extern void xpc_deactivate_partition(const int, struct xpc_partition *,
699 enum xpc_retval); 654 enum xpc_retval);
700extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); 655extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
701 656
702
703/* found in xpc_channel.c */ 657/* found in xpc_channel.c */
704extern void xpc_initiate_connect(int); 658extern void xpc_initiate_connect(int);
705extern void xpc_initiate_disconnect(int); 659extern void xpc_initiate_disconnect(int);
@@ -714,23 +668,18 @@ extern void xpc_process_channel_activity(struct xpc_partition *);
714extern void xpc_connected_callout(struct xpc_channel *); 668extern void xpc_connected_callout(struct xpc_channel *);
715extern void xpc_deliver_msg(struct xpc_channel *); 669extern void xpc_deliver_msg(struct xpc_channel *);
716extern void xpc_disconnect_channel(const int, struct xpc_channel *, 670extern void xpc_disconnect_channel(const int, struct xpc_channel *,
717 enum xpc_retval, unsigned long *); 671 enum xpc_retval, unsigned long *);
718extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); 672extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
719extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); 673extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
720extern void xpc_teardown_infrastructure(struct xpc_partition *); 674extern void xpc_teardown_infrastructure(struct xpc_partition *);
721 675
722
723
724static inline void 676static inline void
725xpc_wakeup_channel_mgr(struct xpc_partition *part) 677xpc_wakeup_channel_mgr(struct xpc_partition *part)
726{ 678{
727 if (atomic_inc_return(&part->channel_mgr_requests) == 1) { 679 if (atomic_inc_return(&part->channel_mgr_requests) == 1)
728 wake_up(&part->channel_mgr_wq); 680 wake_up(&part->channel_mgr_wq);
729 }
730} 681}
731 682
732
733
734/* 683/*
735 * These next two inlines are used to keep us from tearing down a channel's 684 * These next two inlines are used to keep us from tearing down a channel's
736 * msg queues while a thread may be referencing them. 685 * msg queues while a thread may be referencing them.
@@ -747,17 +696,13 @@ xpc_msgqueue_deref(struct xpc_channel *ch)
747 s32 refs = atomic_dec_return(&ch->references); 696 s32 refs = atomic_dec_return(&ch->references);
748 697
749 DBUG_ON(refs < 0); 698 DBUG_ON(refs < 0);
750 if (refs == 0) { 699 if (refs == 0)
751 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); 700 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
752 }
753} 701}
754 702
755
756
757#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ 703#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
758 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) 704 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
759 705
760
761/* 706/*
762 * These two inlines are used to keep us from tearing down a partition's 707 * These two inlines are used to keep us from tearing down a partition's
763 * setup infrastructure while a thread may be referencing it. 708 * setup infrastructure while a thread may be referencing it.
@@ -767,11 +712,9 @@ xpc_part_deref(struct xpc_partition *part)
767{ 712{
768 s32 refs = atomic_dec_return(&part->references); 713 s32 refs = atomic_dec_return(&part->references);
769 714
770
771 DBUG_ON(refs < 0); 715 DBUG_ON(refs < 0);
772 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { 716 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
773 wake_up(&part->teardown_wq); 717 wake_up(&part->teardown_wq);
774 }
775} 718}
776 719
777static inline int 720static inline int
@@ -779,17 +722,14 @@ xpc_part_ref(struct xpc_partition *part)
779{ 722{
780 int setup; 723 int setup;
781 724
782
783 atomic_inc(&part->references); 725 atomic_inc(&part->references);
784 setup = (part->setup_state == XPC_P_SETUP); 726 setup = (part->setup_state == XPC_P_SETUP);
785 if (!setup) { 727 if (!setup)
786 xpc_part_deref(part); 728 xpc_part_deref(part);
787 } 729
788 return setup; 730 return setup;
789} 731}
790 732
791
792
793/* 733/*
794 * The following macro is to be used for the setting of the reason and 734 * The following macro is to be used for the setting of the reason and
795 * reason_line fields in both the struct xpc_channel and struct xpc_partition 735 * reason_line fields in both the struct xpc_channel and struct xpc_partition
@@ -801,8 +741,6 @@ xpc_part_ref(struct xpc_partition *part)
801 (_p)->reason_line = _line; \ 741 (_p)->reason_line = _line; \
802 } 742 }
803 743
804
805
806/* 744/*
807 * This next set of inlines are used to keep track of when a partition is 745 * This next set of inlines are used to keep track of when a partition is
808 * potentially engaged in accessing memory belonging to another partition. 746 * potentially engaged in accessing memory belonging to another partition.
@@ -812,23 +750,24 @@ static inline void
812xpc_mark_partition_engaged(struct xpc_partition *part) 750xpc_mark_partition_engaged(struct xpc_partition *part)
813{ 751{
814 unsigned long irq_flags; 752 unsigned long irq_flags;
815 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 753 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
816 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); 754 (XPC_ENGAGED_PARTITIONS_AMO *
817 755 sizeof(AMO_t)));
818 756
819 local_irq_save(irq_flags); 757 local_irq_save(irq_flags);
820 758
821 /* set bit corresponding to our partid in remote partition's AMO */ 759 /* set bit corresponding to our partid in remote partition's AMO */
822 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, 760 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
823 (1UL << sn_partition_id)); 761 (1UL << sn_partition_id));
824 /* 762 /*
825 * We must always use the nofault function regardless of whether we 763 * We must always use the nofault function regardless of whether we
826 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 764 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
827 * didn't, we'd never know that the other partition is down and would 765 * didn't, we'd never know that the other partition is down and would
828 * keep sending IPIs and AMOs to it until the heartbeat times out. 766 * keep sending IPIs and AMOs to it until the heartbeat times out.
829 */ 767 */
830 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 768 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
831 variable), xp_nofault_PIOR_target)); 769 variable),
770 xp_nofault_PIOR_target));
832 771
833 local_irq_restore(irq_flags); 772 local_irq_restore(irq_flags);
834} 773}
@@ -837,23 +776,24 @@ static inline void
837xpc_mark_partition_disengaged(struct xpc_partition *part) 776xpc_mark_partition_disengaged(struct xpc_partition *part)
838{ 777{
839 unsigned long irq_flags; 778 unsigned long irq_flags;
840 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 779 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
841 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t))); 780 (XPC_ENGAGED_PARTITIONS_AMO *
842 781 sizeof(AMO_t)));
843 782
844 local_irq_save(irq_flags); 783 local_irq_save(irq_flags);
845 784
846 /* clear bit corresponding to our partid in remote partition's AMO */ 785 /* clear bit corresponding to our partid in remote partition's AMO */
847 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 786 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
848 ~(1UL << sn_partition_id)); 787 ~(1UL << sn_partition_id));
849 /* 788 /*
850 * We must always use the nofault function regardless of whether we 789 * We must always use the nofault function regardless of whether we
851 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
852 * didn't, we'd never know that the other partition is down and would 791 * didn't, we'd never know that the other partition is down and would
853 * keep sending IPIs and AMOs to it until the heartbeat times out. 792 * keep sending IPIs and AMOs to it until the heartbeat times out.
854 */ 793 */
855 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
856 variable), xp_nofault_PIOR_target)); 795 variable),
796 xp_nofault_PIOR_target));
857 797
858 local_irq_restore(irq_flags); 798 local_irq_restore(irq_flags);
859} 799}
@@ -862,23 +802,23 @@ static inline void
862xpc_request_partition_disengage(struct xpc_partition *part) 802xpc_request_partition_disengage(struct xpc_partition *part)
863{ 803{
864 unsigned long irq_flags; 804 unsigned long irq_flags;
865 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 805 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
866 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 806 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
867
868 807
869 local_irq_save(irq_flags); 808 local_irq_save(irq_flags);
870 809
871 /* set bit corresponding to our partid in remote partition's AMO */ 810 /* set bit corresponding to our partid in remote partition's AMO */
872 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, 811 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
873 (1UL << sn_partition_id)); 812 (1UL << sn_partition_id));
874 /* 813 /*
875 * We must always use the nofault function regardless of whether we 814 * We must always use the nofault function regardless of whether we
876 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 815 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
877 * didn't, we'd never know that the other partition is down and would 816 * didn't, we'd never know that the other partition is down and would
878 * keep sending IPIs and AMOs to it until the heartbeat times out. 817 * keep sending IPIs and AMOs to it until the heartbeat times out.
879 */ 818 */
880 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 819 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
881 variable), xp_nofault_PIOR_target)); 820 variable),
821 xp_nofault_PIOR_target));
882 822
883 local_irq_restore(irq_flags); 823 local_irq_restore(irq_flags);
884} 824}
@@ -887,23 +827,23 @@ static inline void
887xpc_cancel_partition_disengage_request(struct xpc_partition *part) 827xpc_cancel_partition_disengage_request(struct xpc_partition *part)
888{ 828{
889 unsigned long irq_flags; 829 unsigned long irq_flags;
890 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa + 830 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
891 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t))); 831 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
892
893 832
894 local_irq_save(irq_flags); 833 local_irq_save(irq_flags);
895 834
896 /* clear bit corresponding to our partid in remote partition's AMO */ 835 /* clear bit corresponding to our partid in remote partition's AMO */
897 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 836 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
898 ~(1UL << sn_partition_id)); 837 ~(1UL << sn_partition_id));
899 /* 838 /*
900 * We must always use the nofault function regardless of whether we 839 * We must always use the nofault function regardless of whether we
901 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 840 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
902 * didn't, we'd never know that the other partition is down and would 841 * didn't, we'd never know that the other partition is down and would
903 * keep sending IPIs and AMOs to it until the heartbeat times out. 842 * keep sending IPIs and AMOs to it until the heartbeat times out.
904 */ 843 */
905 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo-> 844 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
906 variable), xp_nofault_PIOR_target)); 845 variable),
846 xp_nofault_PIOR_target));
907 847
908 local_irq_restore(irq_flags); 848 local_irq_restore(irq_flags);
909} 849}
@@ -913,10 +853,9 @@ xpc_partition_engaged(u64 partid_mask)
913{ 853{
914 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 854 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
915 855
916
917 /* return our partition's AMO variable ANDed with partid_mask */ 856 /* return our partition's AMO variable ANDed with partid_mask */
918 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & 857 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
919 partid_mask); 858 partid_mask);
920} 859}
921 860
922static inline u64 861static inline u64
@@ -924,10 +863,9 @@ xpc_partition_disengage_requested(u64 partid_mask)
924{ 863{
925 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 864 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
926 865
927
928 /* return our partition's AMO variable ANDed with partid_mask */ 866 /* return our partition's AMO variable ANDed with partid_mask */
929 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) & 867 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
930 partid_mask); 868 partid_mask);
931} 869}
932 870
933static inline void 871static inline void
@@ -935,10 +873,9 @@ xpc_clear_partition_engaged(u64 partid_mask)
935{ 873{
936 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 874 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
937 875
938
939 /* clear bit(s) based on partid_mask in our partition's AMO */ 876 /* clear bit(s) based on partid_mask in our partition's AMO */
940 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 877 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
941 ~partid_mask); 878 ~partid_mask);
942} 879}
943 880
944static inline void 881static inline void
@@ -946,14 +883,11 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
946{ 883{
947 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 884 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
948 885
949
950 /* clear bit(s) based on partid_mask in our partition's AMO */ 886 /* clear bit(s) based on partid_mask in our partition's AMO */
951 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND, 887 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
952 ~partid_mask); 888 ~partid_mask);
953} 889}
954 890
955
956
957/* 891/*
958 * The following set of macros and inlines are used for the sending and 892 * The following set of macros and inlines are used for the sending and
959 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 893 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -964,20 +898,18 @@ xpc_clear_partition_disengage_request(u64 partid_mask)
964static inline u64 898static inline u64
965xpc_IPI_receive(AMO_t *amo) 899xpc_IPI_receive(AMO_t *amo)
966{ 900{
967 return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); 901 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
968} 902}
969 903
970
971static inline enum xpc_retval 904static inline enum xpc_retval
972xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) 905xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
973{ 906{
974 int ret = 0; 907 int ret = 0;
975 unsigned long irq_flags; 908 unsigned long irq_flags;
976 909
977
978 local_irq_save(irq_flags); 910 local_irq_save(irq_flags);
979 911
980 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); 912 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
981 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); 913 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
982 914
983 /* 915 /*
@@ -986,15 +918,14 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
986 * didn't, we'd never know that the other partition is down and would 918 * didn't, we'd never know that the other partition is down and would
987 * keep sending IPIs and AMOs to it until the heartbeat times out. 919 * keep sending IPIs and AMOs to it until the heartbeat times out.
988 */ 920 */
989 ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 921 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
990 xp_nofault_PIOR_target)); 922 xp_nofault_PIOR_target));
991 923
992 local_irq_restore(irq_flags); 924 local_irq_restore(irq_flags);
993 925
994 return ((ret == 0) ? xpcSuccess : xpcPioReadError); 926 return ((ret == 0) ? xpcSuccess : xpcPioReadError);
995} 927}
996 928
997
998/* 929/*
999 * IPIs associated with SGI_XPC_ACTIVATE IRQ. 930 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
1000 */ 931 */
@@ -1004,47 +935,47 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
1004 */ 935 */
1005static inline void 936static inline void
1006xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid, 937xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
1007 int to_phys_cpuid) 938 int to_phys_cpuid)
1008{ 939{
1009 int w_index = XPC_NASID_W_INDEX(from_nasid); 940 int w_index = XPC_NASID_W_INDEX(from_nasid);
1010 int b_index = XPC_NASID_B_INDEX(from_nasid); 941 int b_index = XPC_NASID_B_INDEX(from_nasid);
1011 AMO_t *amos = (AMO_t *) __va(amos_page_pa + 942 AMO_t *amos = (AMO_t *)__va(amos_page_pa +
1012 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 943 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
1013 944
1014 945 (void)xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
1015 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 946 to_phys_cpuid, SGI_XPC_ACTIVATE);
1016 to_phys_cpuid, SGI_XPC_ACTIVATE);
1017} 947}
1018 948
1019static inline void 949static inline void
1020xpc_IPI_send_activate(struct xpc_vars *vars) 950xpc_IPI_send_activate(struct xpc_vars *vars)
1021{ 951{
1022 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), 952 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
1023 vars->act_nasid, vars->act_phys_cpuid); 953 vars->act_nasid, vars->act_phys_cpuid);
1024} 954}
1025 955
1026static inline void 956static inline void
1027xpc_IPI_send_activated(struct xpc_partition *part) 957xpc_IPI_send_activated(struct xpc_partition *part)
1028{ 958{
1029 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 959 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1030 part->remote_act_nasid, part->remote_act_phys_cpuid); 960 part->remote_act_nasid,
961 part->remote_act_phys_cpuid);
1031} 962}
1032 963
1033static inline void 964static inline void
1034xpc_IPI_send_reactivate(struct xpc_partition *part) 965xpc_IPI_send_reactivate(struct xpc_partition *part)
1035{ 966{
1036 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, 967 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
1037 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 968 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
1038} 969}
1039 970
1040static inline void 971static inline void
1041xpc_IPI_send_disengage(struct xpc_partition *part) 972xpc_IPI_send_disengage(struct xpc_partition *part)
1042{ 973{
1043 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), 974 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1044 part->remote_act_nasid, part->remote_act_phys_cpuid); 975 part->remote_act_nasid,
976 part->remote_act_phys_cpuid);
1045} 977}
1046 978
1047
1048/* 979/*
1049 * IPIs associated with SGI_XPC_NOTIFY IRQ. 980 * IPIs associated with SGI_XPC_NOTIFY IRQ.
1050 */ 981 */
@@ -1058,33 +989,28 @@ xpc_IPI_send_disengage(struct xpc_partition *part)
1058 989
1059static inline void 990static inline void
1060xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, 991xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1061 unsigned long *irq_flags) 992 unsigned long *irq_flags)
1062{ 993{
1063 struct xpc_partition *part = &xpc_partitions[ch->partid]; 994 struct xpc_partition *part = &xpc_partitions[ch->partid];
1064 enum xpc_retval ret; 995 enum xpc_retval ret;
1065 996
1066
1067 if (likely(part->act_state != XPC_P_DEACTIVATING)) { 997 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
1068 ret = xpc_IPI_send(part->remote_IPI_amo_va, 998 ret = xpc_IPI_send(part->remote_IPI_amo_va,
1069 (u64) ipi_flag << (ch->number * 8), 999 (u64)ipi_flag << (ch->number * 8),
1070 part->remote_IPI_nasid, 1000 part->remote_IPI_nasid,
1071 part->remote_IPI_phys_cpuid, 1001 part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
1072 SGI_XPC_NOTIFY);
1073 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 1002 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1074 ipi_flag_string, ch->partid, ch->number, ret); 1003 ipi_flag_string, ch->partid, ch->number, ret);
1075 if (unlikely(ret != xpcSuccess)) { 1004 if (unlikely(ret != xpcSuccess)) {
1076 if (irq_flags != NULL) { 1005 if (irq_flags != NULL)
1077 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1006 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1078 }
1079 XPC_DEACTIVATE_PARTITION(part, ret); 1007 XPC_DEACTIVATE_PARTITION(part, ret);
1080 if (irq_flags != NULL) { 1008 if (irq_flags != NULL)
1081 spin_lock_irqsave(&ch->lock, *irq_flags); 1009 spin_lock_irqsave(&ch->lock, *irq_flags);
1082 }
1083 } 1010 }
1084 } 1011 }
1085} 1012}
1086 1013
1087
1088/* 1014/*
1089 * Make it look like the remote partition, which is associated with the 1015 * Make it look like the remote partition, which is associated with the
1090 * specified channel, sent us an IPI. This faked IPI will be handled 1016 * specified channel, sent us an IPI. This faked IPI will be handled
@@ -1095,18 +1021,16 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1095 1021
1096static inline void 1022static inline void
1097xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, 1023xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1098 char *ipi_flag_string) 1024 char *ipi_flag_string)
1099{ 1025{
1100 struct xpc_partition *part = &xpc_partitions[ch->partid]; 1026 struct xpc_partition *part = &xpc_partitions[ch->partid];
1101 1027
1102 1028 FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable),
1103 FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), 1029 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
1104 FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
1105 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", 1030 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
1106 ipi_flag_string, ch->partid, ch->number); 1031 ipi_flag_string, ch->partid, ch->number);
1107} 1032}
1108 1033
1109
1110/* 1034/*
1111 * The sending and receiving of IPIs includes the setting of an AMO variable 1035 * The sending and receiving of IPIs includes the setting of an AMO variable
1112 * to indicate the reason the IPI was sent. The 64-bit variable is divided 1036 * to indicate the reason the IPI was sent. The 64-bit variable is divided
@@ -1121,21 +1045,18 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1121#define XPC_IPI_OPENREPLY 0x08 1045#define XPC_IPI_OPENREPLY 0x08
1122#define XPC_IPI_MSGREQUEST 0x10 1046#define XPC_IPI_MSGREQUEST 0x10
1123 1047
1124
1125/* given an AMO variable and a channel#, get its associated IPI flags */ 1048/* given an AMO variable and a channel#, get its associated IPI flags */
1126#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1049#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1127#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) 1050#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1128 1051
1129#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) 1052#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
1130#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) 1053#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
1131
1132 1054
1133static inline void 1055static inline void
1134xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) 1056xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
1135{ 1057{
1136 struct xpc_openclose_args *args = ch->local_openclose_args; 1058 struct xpc_openclose_args *args = ch->local_openclose_args;
1137 1059
1138
1139 args->reason = ch->reason; 1060 args->reason = ch->reason;
1140 1061
1141 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); 1062 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
@@ -1152,7 +1073,6 @@ xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
1152{ 1073{
1153 struct xpc_openclose_args *args = ch->local_openclose_args; 1074 struct xpc_openclose_args *args = ch->local_openclose_args;
1154 1075
1155
1156 args->msg_size = ch->msg_size; 1076 args->msg_size = ch->msg_size;
1157 args->local_nentries = ch->local_nentries; 1077 args->local_nentries = ch->local_nentries;
1158 1078
@@ -1164,7 +1084,6 @@ xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
1164{ 1084{
1165 struct xpc_openclose_args *args = ch->local_openclose_args; 1085 struct xpc_openclose_args *args = ch->local_openclose_args;
1166 1086
1167
1168 args->remote_nentries = ch->remote_nentries; 1087 args->remote_nentries = ch->remote_nentries;
1169 args->local_nentries = ch->local_nentries; 1088 args->local_nentries = ch->local_nentries;
1170 args->local_msgqueue_pa = __pa(ch->local_msgqueue); 1089 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
@@ -1184,7 +1103,6 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
1184 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); 1103 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
1185} 1104}
1186 1105
1187
1188/* 1106/*
1189 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These 1107 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
1190 * pages are located in the lowest granule. The lowest granule uses 4k pages 1108 * pages are located in the lowest granule. The lowest granule uses 4k pages
@@ -1201,13 +1119,10 @@ xpc_IPI_init(int index)
1201{ 1119{
1202 AMO_t *amo = xpc_vars->amos_page + index; 1120 AMO_t *amo = xpc_vars->amos_page + index;
1203 1121
1204 1122 (void)xpc_IPI_receive(amo); /* clear AMO variable */
1205 (void) xpc_IPI_receive(amo); /* clear AMO variable */
1206 return amo; 1123 return amo;
1207} 1124}
1208 1125
1209
1210
1211static inline enum xpc_retval 1126static inline enum xpc_retval
1212xpc_map_bte_errors(bte_result_t error) 1127xpc_map_bte_errors(bte_result_t error)
1213{ 1128{
@@ -1220,22 +1135,31 @@ xpc_map_bte_errors(bte_result_t error)
1220 return xpcBteUnmappedError; 1135 return xpcBteUnmappedError;
1221 } 1136 }
1222 switch (error) { 1137 switch (error) {
1223 case BTE_SUCCESS: return xpcSuccess; 1138 case BTE_SUCCESS:
1224 case BTEFAIL_DIR: return xpcBteDirectoryError; 1139 return xpcSuccess;
1225 case BTEFAIL_POISON: return xpcBtePoisonError; 1140 case BTEFAIL_DIR:
1226 case BTEFAIL_WERR: return xpcBteWriteError; 1141 return xpcBteDirectoryError;
1227 case BTEFAIL_ACCESS: return xpcBteAccessError; 1142 case BTEFAIL_POISON:
1228 case BTEFAIL_PWERR: return xpcBtePWriteError; 1143 return xpcBtePoisonError;
1229 case BTEFAIL_PRERR: return xpcBtePReadError; 1144 case BTEFAIL_WERR:
1230 case BTEFAIL_TOUT: return xpcBteTimeOutError; 1145 return xpcBteWriteError;
1231 case BTEFAIL_XTERR: return xpcBteXtalkError; 1146 case BTEFAIL_ACCESS:
1232 case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; 1147 return xpcBteAccessError;
1233 default: return xpcBteUnmappedError; 1148 case BTEFAIL_PWERR:
1149 return xpcBtePWriteError;
1150 case BTEFAIL_PRERR:
1151 return xpcBtePReadError;
1152 case BTEFAIL_TOUT:
1153 return xpcBteTimeOutError;
1154 case BTEFAIL_XTERR:
1155 return xpcBteXtalkError;
1156 case BTEFAIL_NOTAVAIL:
1157 return xpcBteNotAvailable;
1158 default:
1159 return xpcBteUnmappedError;
1234 } 1160 }
1235} 1161}
1236 1162
1237
1238
1239/* 1163/*
1240 * Check to see if there is any channel activity to/from the specified 1164 * Check to see if there is any channel activity to/from the specified
1241 * partition. 1165 * partition.
@@ -1246,11 +1170,9 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
1246 u64 IPI_amo; 1170 u64 IPI_amo;
1247 unsigned long irq_flags; 1171 unsigned long irq_flags;
1248 1172
1249
1250 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); 1173 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1251 if (IPI_amo == 0) { 1174 if (IPI_amo == 0)
1252 return; 1175 return;
1253 }
1254 1176
1255 spin_lock_irqsave(&part->IPI_lock, irq_flags); 1177 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1256 part->local_IPI_amo |= IPI_amo; 1178 part->local_IPI_amo |= IPI_amo;
@@ -1262,6 +1184,4 @@ xpc_check_for_channel_activity(struct xpc_partition *part)
1262 xpc_wakeup_channel_mgr(part); 1184 xpc_wakeup_channel_mgr(part);
1263} 1185}
1264 1186
1265 1187#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
1266#endif /* _ASM_IA64_SN_XPC_H */
1267
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 44ccc0d789c9..bfcb9ea968e9 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) channel support. 10 * Cross Partition Communication (XPC) channel support.
12 * 11 *
@@ -15,7 +14,6 @@
15 * 14 *
16 */ 15 */
17 16
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/init.h> 18#include <linux/init.h>
21#include <linux/sched.h> 19#include <linux/sched.h>
@@ -25,8 +23,7 @@
25#include <linux/completion.h> 23#include <linux/completion.h>
26#include <asm/sn/bte.h> 24#include <asm/sn/bte.h>
27#include <asm/sn/sn_sal.h> 25#include <asm/sn/sn_sal.h>
28#include <asm/sn/xpc.h> 26#include "xpc.h"
29
30 27
31/* 28/*
32 * Guarantee that the kzalloc'd memory is cacheline aligned. 29 * Guarantee that the kzalloc'd memory is cacheline aligned.
@@ -36,22 +33,21 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
36{ 33{
37 /* see if kzalloc will give us cachline aligned memory by default */ 34 /* see if kzalloc will give us cachline aligned memory by default */
38 *base = kzalloc(size, flags); 35 *base = kzalloc(size, flags);
39 if (*base == NULL) { 36 if (*base == NULL)
40 return NULL; 37 return NULL;
41 } 38
42 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 39 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
43 return *base; 40 return *base;
44 } 41
45 kfree(*base); 42 kfree(*base);
46 43
47 /* nope, we'll have to do it ourselves */ 44 /* nope, we'll have to do it ourselves */
48 *base = kzalloc(size + L1_CACHE_BYTES, flags); 45 *base = kzalloc(size + L1_CACHE_BYTES, flags);
49 if (*base == NULL) { 46 if (*base == NULL)
50 return NULL; 47 return NULL;
51 }
52 return (void *) L1_CACHE_ALIGN((u64) *base);
53}
54 48
49 return (void *)L1_CACHE_ALIGN((u64)*base);
50}
55 51
56/* 52/*
57 * Set up the initial values for the XPartition Communication channels. 53 * Set up the initial values for the XPartition Communication channels.
@@ -62,7 +58,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
62 int ch_number; 58 int ch_number;
63 struct xpc_channel *ch; 59 struct xpc_channel *ch;
64 60
65
66 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 61 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
67 ch = &part->channels[ch_number]; 62 ch = &part->channels[ch_number];
68 63
@@ -72,7 +67,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
72 67
73 ch->local_GP = &part->local_GPs[ch_number]; 68 ch->local_GP = &part->local_GPs[ch_number];
74 ch->local_openclose_args = 69 ch->local_openclose_args =
75 &part->local_openclose_args[ch_number]; 70 &part->local_openclose_args[ch_number];
76 71
77 atomic_set(&ch->kthreads_assigned, 0); 72 atomic_set(&ch->kthreads_assigned, 0);
78 atomic_set(&ch->kthreads_idle, 0); 73 atomic_set(&ch->kthreads_idle, 0);
@@ -91,7 +86,6 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
91 } 86 }
92} 87}
93 88
94
95/* 89/*
96 * Setup the infrastructure necessary to support XPartition Communication 90 * Setup the infrastructure necessary to support XPartition Communication
97 * between the specified remote partition and the local one. 91 * between the specified remote partition and the local one.
@@ -103,7 +97,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
103 struct timer_list *timer; 97 struct timer_list *timer;
104 partid_t partid = XPC_PARTID(part); 98 partid_t partid = XPC_PARTID(part);
105 99
106
107 /* 100 /*
108 * Zero out MOST of the entry for this partition. Only the fields 101 * Zero out MOST of the entry for this partition. Only the fields
109 * starting with `nchannels' will be zeroed. The preceding fields must 102 * starting with `nchannels' will be zeroed. The preceding fields must
@@ -111,14 +104,14 @@ xpc_setup_infrastructure(struct xpc_partition *part)
111 * referenced during this memset() operation. 104 * referenced during this memset() operation.
112 */ 105 */
113 memset(&part->nchannels, 0, sizeof(struct xpc_partition) - 106 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
114 offsetof(struct xpc_partition, nchannels)); 107 offsetof(struct xpc_partition, nchannels));
115 108
116 /* 109 /*
117 * Allocate all of the channel structures as a contiguous chunk of 110 * Allocate all of the channel structures as a contiguous chunk of
118 * memory. 111 * memory.
119 */ 112 */
120 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, 113 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
121 GFP_KERNEL); 114 GFP_KERNEL);
122 if (part->channels == NULL) { 115 if (part->channels == NULL) {
123 dev_err(xpc_chan, "can't get memory for channels\n"); 116 dev_err(xpc_chan, "can't get memory for channels\n");
124 return xpcNoMemory; 117 return xpcNoMemory;
@@ -126,11 +119,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
126 119
127 part->nchannels = XPC_NCHANNELS; 120 part->nchannels = XPC_NCHANNELS;
128 121
129
130 /* allocate all the required GET/PUT values */ 122 /* allocate all the required GET/PUT values */
131 123
132 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 124 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
133 GFP_KERNEL, &part->local_GPs_base); 125 GFP_KERNEL,
126 &part->local_GPs_base);
134 if (part->local_GPs == NULL) { 127 if (part->local_GPs == NULL) {
135 kfree(part->channels); 128 kfree(part->channels);
136 part->channels = NULL; 129 part->channels = NULL;
@@ -140,7 +133,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
140 } 133 }
141 134
142 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 135 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
143 GFP_KERNEL, &part->remote_GPs_base); 136 GFP_KERNEL,
137 &part->
138 remote_GPs_base);
144 if (part->remote_GPs == NULL) { 139 if (part->remote_GPs == NULL) {
145 dev_err(xpc_chan, "can't get memory for remote get/put " 140 dev_err(xpc_chan, "can't get memory for remote get/put "
146 "values\n"); 141 "values\n");
@@ -151,12 +146,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
151 return xpcNoMemory; 146 return xpcNoMemory;
152 } 147 }
153 148
154
155 /* allocate all the required open and close args */ 149 /* allocate all the required open and close args */
156 150
157 part->local_openclose_args = xpc_kzalloc_cacheline_aligned( 151 part->local_openclose_args =
158 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 152 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
159 &part->local_openclose_args_base); 153 &part->local_openclose_args_base);
160 if (part->local_openclose_args == NULL) { 154 if (part->local_openclose_args == NULL) {
161 dev_err(xpc_chan, "can't get memory for local connect args\n"); 155 dev_err(xpc_chan, "can't get memory for local connect args\n");
162 kfree(part->remote_GPs_base); 156 kfree(part->remote_GPs_base);
@@ -168,9 +162,9 @@ xpc_setup_infrastructure(struct xpc_partition *part)
168 return xpcNoMemory; 162 return xpcNoMemory;
169 } 163 }
170 164
171 part->remote_openclose_args = xpc_kzalloc_cacheline_aligned( 165 part->remote_openclose_args =
172 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, 166 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
173 &part->remote_openclose_args_base); 167 &part->remote_openclose_args_base);
174 if (part->remote_openclose_args == NULL) { 168 if (part->remote_openclose_args == NULL) {
175 dev_err(xpc_chan, "can't get memory for remote connect args\n"); 169 dev_err(xpc_chan, "can't get memory for remote connect args\n");
176 kfree(part->local_openclose_args_base); 170 kfree(part->local_openclose_args_base);
@@ -184,13 +178,11 @@ xpc_setup_infrastructure(struct xpc_partition *part)
184 return xpcNoMemory; 178 return xpcNoMemory;
185 } 179 }
186 180
187
188 xpc_initialize_channels(part, partid); 181 xpc_initialize_channels(part, partid);
189 182
190 atomic_set(&part->nchannels_active, 0); 183 atomic_set(&part->nchannels_active, 0);
191 atomic_set(&part->nchannels_engaged, 0); 184 atomic_set(&part->nchannels_engaged, 0);
192 185
193
194 /* local_IPI_amo were set to 0 by an earlier memset() */ 186 /* local_IPI_amo were set to 0 by an earlier memset() */
195 187
196 /* Initialize this partitions AMO_t structure */ 188 /* Initialize this partitions AMO_t structure */
@@ -203,7 +195,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
203 195
204 sprintf(part->IPI_owner, "xpc%02d", partid); 196 sprintf(part->IPI_owner, "xpc%02d", partid);
205 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, 197 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
206 part->IPI_owner, (void *) (u64) partid); 198 part->IPI_owner, (void *)(u64)partid);
207 if (ret != 0) { 199 if (ret != 0) {
208 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 200 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
209 "errno=%d\n", -ret); 201 "errno=%d\n", -ret);
@@ -223,8 +215,8 @@ xpc_setup_infrastructure(struct xpc_partition *part)
223 /* Setup a timer to check for dropped IPIs */ 215 /* Setup a timer to check for dropped IPIs */
224 timer = &part->dropped_IPI_timer; 216 timer = &part->dropped_IPI_timer;
225 init_timer(timer); 217 init_timer(timer);
226 timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; 218 timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check;
227 timer->data = (unsigned long) part; 219 timer->data = (unsigned long)part;
228 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; 220 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
229 add_timer(timer); 221 add_timer(timer);
230 222
@@ -234,7 +226,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
234 */ 226 */
235 part->setup_state = XPC_P_SETUP; 227 part->setup_state = XPC_P_SETUP;
236 228
237
238 /* 229 /*
239 * Setup the per partition specific variables required by the 230 * Setup the per partition specific variables required by the
240 * remote partition to establish channel connections with us. 231 * remote partition to establish channel connections with us.
@@ -244,7 +235,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
244 */ 235 */
245 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); 236 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
246 xpc_vars_part[partid].openclose_args_pa = 237 xpc_vars_part[partid].openclose_args_pa =
247 __pa(part->local_openclose_args); 238 __pa(part->local_openclose_args);
248 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); 239 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
249 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 240 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
250 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); 241 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
@@ -255,7 +246,6 @@ xpc_setup_infrastructure(struct xpc_partition *part)
255 return xpcSuccess; 246 return xpcSuccess;
256} 247}
257 248
258
259/* 249/*
260 * Create a wrapper that hides the underlying mechanism for pulling a cacheline 250 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
261 * (or multiple cachelines) from a remote partition. 251 * (or multiple cachelines) from a remote partition.
@@ -266,24 +256,21 @@ xpc_setup_infrastructure(struct xpc_partition *part)
266 */ 256 */
267static enum xpc_retval 257static enum xpc_retval
268xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, 258xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
269 const void *src, size_t cnt) 259 const void *src, size_t cnt)
270{ 260{
271 bte_result_t bte_ret; 261 bte_result_t bte_ret;
272 262
273 263 DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
274 DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); 264 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
275 DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
276 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); 265 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
277 266
278 if (part->act_state == XPC_P_DEACTIVATING) { 267 if (part->act_state == XPC_P_DEACTIVATING)
279 return part->reason; 268 return part->reason;
280 }
281 269
282 bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, 270 bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
283 (BTE_NORMAL | BTE_WACQUIRE), NULL); 271 (BTE_NORMAL | BTE_WACQUIRE), NULL);
284 if (bte_ret == BTE_SUCCESS) { 272 if (bte_ret == BTE_SUCCESS)
285 return xpcSuccess; 273 return xpcSuccess;
286 }
287 274
288 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", 275 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
289 XPC_PARTID(part), bte_ret); 276 XPC_PARTID(part), bte_ret);
@@ -291,7 +278,6 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
291 return xpc_map_bte_errors(bte_ret); 278 return xpc_map_bte_errors(bte_ret);
292} 279}
293 280
294
295/* 281/*
296 * Pull the remote per partition specific variables from the specified 282 * Pull the remote per partition specific variables from the specified
297 * partition. 283 * partition.
@@ -301,41 +287,40 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
301{ 287{
302 u8 buffer[L1_CACHE_BYTES * 2]; 288 u8 buffer[L1_CACHE_BYTES * 2];
303 struct xpc_vars_part *pulled_entry_cacheline = 289 struct xpc_vars_part *pulled_entry_cacheline =
304 (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); 290 (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
305 struct xpc_vars_part *pulled_entry; 291 struct xpc_vars_part *pulled_entry;
306 u64 remote_entry_cacheline_pa, remote_entry_pa; 292 u64 remote_entry_cacheline_pa, remote_entry_pa;
307 partid_t partid = XPC_PARTID(part); 293 partid_t partid = XPC_PARTID(part);
308 enum xpc_retval ret; 294 enum xpc_retval ret;
309 295
310
311 /* pull the cacheline that contains the variables we're interested in */ 296 /* pull the cacheline that contains the variables we're interested in */
312 297
313 DBUG_ON(part->remote_vars_part_pa != 298 DBUG_ON(part->remote_vars_part_pa !=
314 L1_CACHE_ALIGN(part->remote_vars_part_pa)); 299 L1_CACHE_ALIGN(part->remote_vars_part_pa));
315 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); 300 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
316 301
317 remote_entry_pa = part->remote_vars_part_pa + 302 remote_entry_pa = part->remote_vars_part_pa +
318 sn_partition_id * sizeof(struct xpc_vars_part); 303 sn_partition_id * sizeof(struct xpc_vars_part);
319 304
320 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); 305 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
321 306
322 pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + 307 pulled_entry = (struct xpc_vars_part *)((u64)pulled_entry_cacheline +
323 (remote_entry_pa & (L1_CACHE_BYTES - 1))); 308 (remote_entry_pa &
309 (L1_CACHE_BYTES - 1)));
324 310
325 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, 311 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
326 (void *) remote_entry_cacheline_pa, 312 (void *)remote_entry_cacheline_pa,
327 L1_CACHE_BYTES); 313 L1_CACHE_BYTES);
328 if (ret != xpcSuccess) { 314 if (ret != xpcSuccess) {
329 dev_dbg(xpc_chan, "failed to pull XPC vars_part from " 315 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
330 "partition %d, ret=%d\n", partid, ret); 316 "partition %d, ret=%d\n", partid, ret);
331 return ret; 317 return ret;
332 } 318 }
333 319
334
335 /* see if they've been set up yet */ 320 /* see if they've been set up yet */
336 321
337 if (pulled_entry->magic != XPC_VP_MAGIC1 && 322 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
338 pulled_entry->magic != XPC_VP_MAGIC2) { 323 pulled_entry->magic != XPC_VP_MAGIC2) {
339 324
340 if (pulled_entry->magic != 0) { 325 if (pulled_entry->magic != 0) {
341 dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 326 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
@@ -353,8 +338,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
353 /* validate the variables */ 338 /* validate the variables */
354 339
355 if (pulled_entry->GPs_pa == 0 || 340 if (pulled_entry->GPs_pa == 0 ||
356 pulled_entry->openclose_args_pa == 0 || 341 pulled_entry->openclose_args_pa == 0 ||
357 pulled_entry->IPI_amo_pa == 0) { 342 pulled_entry->IPI_amo_pa == 0) {
358 343
359 dev_err(xpc_chan, "partition %d's XPC vars_part for " 344 dev_err(xpc_chan, "partition %d's XPC vars_part for "
360 "partition %d are not valid\n", partid, 345 "partition %d are not valid\n", partid,
@@ -366,29 +351,26 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
366 351
367 part->remote_GPs_pa = pulled_entry->GPs_pa; 352 part->remote_GPs_pa = pulled_entry->GPs_pa;
368 part->remote_openclose_args_pa = 353 part->remote_openclose_args_pa =
369 pulled_entry->openclose_args_pa; 354 pulled_entry->openclose_args_pa;
370 part->remote_IPI_amo_va = 355 part->remote_IPI_amo_va =
371 (AMO_t *) __va(pulled_entry->IPI_amo_pa); 356 (AMO_t *)__va(pulled_entry->IPI_amo_pa);
372 part->remote_IPI_nasid = pulled_entry->IPI_nasid; 357 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
373 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 358 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
374 359
375 if (part->nchannels > pulled_entry->nchannels) { 360 if (part->nchannels > pulled_entry->nchannels)
376 part->nchannels = pulled_entry->nchannels; 361 part->nchannels = pulled_entry->nchannels;
377 }
378 362
379 /* let the other side know that we've pulled their variables */ 363 /* let the other side know that we've pulled their variables */
380 364
381 xpc_vars_part[partid].magic = XPC_VP_MAGIC2; 365 xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
382 } 366 }
383 367
384 if (pulled_entry->magic == XPC_VP_MAGIC1) { 368 if (pulled_entry->magic == XPC_VP_MAGIC1)
385 return xpcRetry; 369 return xpcRetry;
386 }
387 370
388 return xpcSuccess; 371 return xpcSuccess;
389} 372}
390 373
391
392/* 374/*
393 * Get the IPI flags and pull the openclose args and/or remote GPs as needed. 375 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
394 */ 376 */
@@ -399,23 +381,23 @@ xpc_get_IPI_flags(struct xpc_partition *part)
399 u64 IPI_amo; 381 u64 IPI_amo;
400 enum xpc_retval ret; 382 enum xpc_retval ret;
401 383
402
403 /* 384 /*
404 * See if there are any IPI flags to be handled. 385 * See if there are any IPI flags to be handled.
405 */ 386 */
406 387
407 spin_lock_irqsave(&part->IPI_lock, irq_flags); 388 spin_lock_irqsave(&part->IPI_lock, irq_flags);
408 if ((IPI_amo = part->local_IPI_amo) != 0) { 389 IPI_amo = part->local_IPI_amo;
390 if (IPI_amo != 0)
409 part->local_IPI_amo = 0; 391 part->local_IPI_amo = 0;
410 }
411 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
412 392
393 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
413 394
414 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 395 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
415 ret = xpc_pull_remote_cachelines(part, 396 ret = xpc_pull_remote_cachelines(part,
416 part->remote_openclose_args, 397 part->remote_openclose_args,
417 (void *) part->remote_openclose_args_pa, 398 (void *)part->
418 XPC_OPENCLOSE_ARGS_SIZE); 399 remote_openclose_args_pa,
400 XPC_OPENCLOSE_ARGS_SIZE);
419 if (ret != xpcSuccess) { 401 if (ret != xpcSuccess) {
420 XPC_DEACTIVATE_PARTITION(part, ret); 402 XPC_DEACTIVATE_PARTITION(part, ret);
421 403
@@ -430,8 +412,8 @@ xpc_get_IPI_flags(struct xpc_partition *part)
430 412
431 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { 413 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
432 ret = xpc_pull_remote_cachelines(part, part->remote_GPs, 414 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
433 (void *) part->remote_GPs_pa, 415 (void *)part->remote_GPs_pa,
434 XPC_GP_SIZE); 416 XPC_GP_SIZE);
435 if (ret != xpcSuccess) { 417 if (ret != xpcSuccess) {
436 XPC_DEACTIVATE_PARTITION(part, ret); 418 XPC_DEACTIVATE_PARTITION(part, ret);
437 419
@@ -446,7 +428,6 @@ xpc_get_IPI_flags(struct xpc_partition *part)
446 return IPI_amo; 428 return IPI_amo;
447} 429}
448 430
449
450/* 431/*
451 * Allocate the local message queue and the notify queue. 432 * Allocate the local message queue and the notify queue.
452 */ 433 */
@@ -457,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
457 int nentries; 438 int nentries;
458 size_t nbytes; 439 size_t nbytes;
459 440
460
461 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
462 // >>> iterations of the for-loop, bail if set?
463
464 // >>> should we impose a minimum #of entries? like 4 or 8?
465 for (nentries = ch->local_nentries; nentries > 0; nentries--) { 441 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
466 442
467 nbytes = nentries * ch->msg_size; 443 nbytes = nentries * ch->msg_size;
468 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 444 ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
469 GFP_KERNEL, 445 GFP_KERNEL,
470 &ch->local_msgqueue_base); 446 &ch->local_msgqueue_base);
471 if (ch->local_msgqueue == NULL) { 447 if (ch->local_msgqueue == NULL)
472 continue; 448 continue;
473 }
474 449
475 nbytes = nentries * sizeof(struct xpc_notify); 450 nbytes = nentries * sizeof(struct xpc_notify);
476 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); 451 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
@@ -497,7 +472,6 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
497 return xpcNoMemory; 472 return xpcNoMemory;
498} 473}
499 474
500
501/* 475/*
502 * Allocate the cached remote message queue. 476 * Allocate the cached remote message queue.
503 */ 477 */
@@ -508,22 +482,16 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
508 int nentries; 482 int nentries;
509 size_t nbytes; 483 size_t nbytes;
510 484
511
512 DBUG_ON(ch->remote_nentries <= 0); 485 DBUG_ON(ch->remote_nentries <= 0);
513 486
514 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
515 // >>> iterations of the for-loop, bail if set?
516
517 // >>> should we impose a minimum #of entries? like 4 or 8?
518 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { 487 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
519 488
520 nbytes = nentries * ch->msg_size; 489 nbytes = nentries * ch->msg_size;
521 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, 490 ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
522 GFP_KERNEL, 491 GFP_KERNEL,
523 &ch->remote_msgqueue_base); 492 &ch->remote_msgqueue_base);
524 if (ch->remote_msgqueue == NULL) { 493 if (ch->remote_msgqueue == NULL)
525 continue; 494 continue;
526 }
527 495
528 spin_lock_irqsave(&ch->lock, irq_flags); 496 spin_lock_irqsave(&ch->lock, irq_flags);
529 if (nentries < ch->remote_nentries) { 497 if (nentries < ch->remote_nentries) {
@@ -542,7 +510,6 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
542 return xpcNoMemory; 510 return xpcNoMemory;
543} 511}
544 512
545
546/* 513/*
547 * Allocate message queues and other stuff associated with a channel. 514 * Allocate message queues and other stuff associated with a channel.
548 * 515 *
@@ -554,14 +521,14 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
554 unsigned long irq_flags; 521 unsigned long irq_flags;
555 enum xpc_retval ret; 522 enum xpc_retval ret;
556 523
557
558 DBUG_ON(ch->flags & XPC_C_SETUP); 524 DBUG_ON(ch->flags & XPC_C_SETUP);
559 525
560 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { 526 ret = xpc_allocate_local_msgqueue(ch);
527 if (ret != xpcSuccess)
561 return ret; 528 return ret;
562 }
563 529
564 if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { 530 ret = xpc_allocate_remote_msgqueue(ch);
531 if (ret != xpcSuccess) {
565 kfree(ch->local_msgqueue_base); 532 kfree(ch->local_msgqueue_base);
566 ch->local_msgqueue = NULL; 533 ch->local_msgqueue = NULL;
567 kfree(ch->notify_queue); 534 kfree(ch->notify_queue);
@@ -576,7 +543,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
576 return xpcSuccess; 543 return xpcSuccess;
577} 544}
578 545
579
580/* 546/*
581 * Process a connect message from a remote partition. 547 * Process a connect message from a remote partition.
582 * 548 *
@@ -588,11 +554,10 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
588{ 554{
589 enum xpc_retval ret; 555 enum xpc_retval ret;
590 556
591
592 DBUG_ON(!spin_is_locked(&ch->lock)); 557 DBUG_ON(!spin_is_locked(&ch->lock));
593 558
594 if (!(ch->flags & XPC_C_OPENREQUEST) || 559 if (!(ch->flags & XPC_C_OPENREQUEST) ||
595 !(ch->flags & XPC_C_ROPENREQUEST)) { 560 !(ch->flags & XPC_C_ROPENREQUEST)) {
596 /* nothing more to do for now */ 561 /* nothing more to do for now */
597 return; 562 return;
598 } 563 }
@@ -603,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
603 ret = xpc_allocate_msgqueues(ch); 568 ret = xpc_allocate_msgqueues(ch);
604 spin_lock_irqsave(&ch->lock, *irq_flags); 569 spin_lock_irqsave(&ch->lock, *irq_flags);
605 570
606 if (ret != xpcSuccess) { 571 if (ret != xpcSuccess)
607 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); 572 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
608 } 573
609 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { 574 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
610 return; 575 return;
611 }
612 576
613 DBUG_ON(!(ch->flags & XPC_C_SETUP)); 577 DBUG_ON(!(ch->flags & XPC_C_SETUP));
614 DBUG_ON(ch->local_msgqueue == NULL); 578 DBUG_ON(ch->local_msgqueue == NULL);
@@ -620,23 +584,21 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
620 xpc_IPI_send_openreply(ch, irq_flags); 584 xpc_IPI_send_openreply(ch, irq_flags);
621 } 585 }
622 586
623 if (!(ch->flags & XPC_C_ROPENREPLY)) { 587 if (!(ch->flags & XPC_C_ROPENREPLY))
624 return; 588 return;
625 }
626 589
627 DBUG_ON(ch->remote_msgqueue_pa == 0); 590 DBUG_ON(ch->remote_msgqueue_pa == 0);
628 591
629 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ 592 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
630 593
631 dev_info(xpc_chan, "channel %d to partition %d connected\n", 594 dev_info(xpc_chan, "channel %d to partition %d connected\n",
632 ch->number, ch->partid); 595 ch->number, ch->partid);
633 596
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 597 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1, 0); 598 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 599 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 600}
638 601
639
640/* 602/*
641 * Notify those who wanted to be notified upon delivery of their message. 603 * Notify those who wanted to be notified upon delivery of their message.
642 */ 604 */
@@ -647,7 +609,6 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
647 u8 notify_type; 609 u8 notify_type;
648 s64 get = ch->w_remote_GP.get - 1; 610 s64 get = ch->w_remote_GP.get - 1;
649 611
650
651 while (++get < put && atomic_read(&ch->n_to_notify) > 0) { 612 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
652 613
653 notify = &ch->notify_queue[get % ch->local_nentries]; 614 notify = &ch->notify_queue[get % ch->local_nentries];
@@ -660,8 +621,7 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
660 */ 621 */
661 notify_type = notify->type; 622 notify_type = notify->type;
662 if (notify_type == 0 || 623 if (notify_type == 0 ||
663 cmpxchg(&notify->type, notify_type, 0) != 624 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
664 notify_type) {
665 continue; 625 continue;
666 } 626 }
667 627
@@ -672,20 +632,19 @@ xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
672 if (notify->func != NULL) { 632 if (notify->func != NULL) {
673 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " 633 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
674 "msg_number=%ld, partid=%d, channel=%d\n", 634 "msg_number=%ld, partid=%d, channel=%d\n",
675 (void *) notify, get, ch->partid, ch->number); 635 (void *)notify, get, ch->partid, ch->number);
676 636
677 notify->func(reason, ch->partid, ch->number, 637 notify->func(reason, ch->partid, ch->number,
678 notify->key); 638 notify->key);
679 639
680 dev_dbg(xpc_chan, "notify->func() returned, " 640 dev_dbg(xpc_chan, "notify->func() returned, "
681 "notify=0x%p, msg_number=%ld, partid=%d, " 641 "notify=0x%p, msg_number=%ld, partid=%d, "
682 "channel=%d\n", (void *) notify, get, 642 "channel=%d\n", (void *)notify, get,
683 ch->partid, ch->number); 643 ch->partid, ch->number);
684 } 644 }
685 } 645 }
686} 646}
687 647
688
689/* 648/*
690 * Free up message queues and other stuff that were allocated for the specified 649 * Free up message queues and other stuff that were allocated for the specified
691 * channel. 650 * channel.
@@ -733,7 +692,6 @@ xpc_free_msgqueues(struct xpc_channel *ch)
733 } 692 }
734} 693}
735 694
736
737/* 695/*
738 * spin_lock_irqsave() is expected to be held on entry. 696 * spin_lock_irqsave() is expected to be held on entry.
739 */ 697 */
@@ -743,46 +701,41 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
743 struct xpc_partition *part = &xpc_partitions[ch->partid]; 701 struct xpc_partition *part = &xpc_partitions[ch->partid];
744 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); 702 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
745 703
746
747 DBUG_ON(!spin_is_locked(&ch->lock)); 704 DBUG_ON(!spin_is_locked(&ch->lock));
748 705
749 if (!(ch->flags & XPC_C_DISCONNECTING)) { 706 if (!(ch->flags & XPC_C_DISCONNECTING))
750 return; 707 return;
751 }
752 708
753 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 709 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
754 710
755 /* make sure all activity has settled down first */ 711 /* make sure all activity has settled down first */
756 712
757 if (atomic_read(&ch->kthreads_assigned) > 0 || 713 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 atomic_read(&ch->references) > 0) { 714 atomic_read(&ch->references) > 0) {
759 return; 715 return;
760 } 716 }
761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 717 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 718 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 719
764 if (part->act_state == XPC_P_DEACTIVATING) { 720 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 721 /* can't proceed until the other side disengages from us */
766 if (xpc_partition_engaged(1UL << ch->partid)) { 722 if (xpc_partition_engaged(1UL << ch->partid))
767 return; 723 return;
768 }
769 724
770 } else { 725 } else {
771 726
772 /* as long as the other side is up do the full protocol */ 727 /* as long as the other side is up do the full protocol */
773 728
774 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 729 if (!(ch->flags & XPC_C_RCLOSEREQUEST))
775 return; 730 return;
776 }
777 731
778 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 732 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
779 ch->flags |= XPC_C_CLOSEREPLY; 733 ch->flags |= XPC_C_CLOSEREPLY;
780 xpc_IPI_send_closereply(ch, irq_flags); 734 xpc_IPI_send_closereply(ch, irq_flags);
781 } 735 }
782 736
783 if (!(ch->flags & XPC_C_RCLOSEREPLY)) { 737 if (!(ch->flags & XPC_C_RCLOSEREPLY))
784 return; 738 return;
785 }
786 } 739 }
787 740
788 /* wake those waiting for notify completion */ 741 /* wake those waiting for notify completion */
@@ -809,7 +762,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
809 762
810 if (channel_was_connected) { 763 if (channel_was_connected) {
811 dev_info(xpc_chan, "channel %d to partition %d disconnected, " 764 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
812 "reason=%d\n", ch->number, ch->partid, ch->reason); 765 "reason=%d\n", ch->number, ch->partid, ch->reason);
813 } 766 }
814 767
815 if (ch->flags & XPC_C_WDISCONNECT) { 768 if (ch->flags & XPC_C_WDISCONNECT) {
@@ -820,35 +773,32 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
820 /* time to take action on any delayed IPI flags */ 773 /* time to take action on any delayed IPI flags */
821 spin_lock(&part->IPI_lock); 774 spin_lock(&part->IPI_lock);
822 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, 775 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
823 ch->delayed_IPI_flags); 776 ch->delayed_IPI_flags);
824 spin_unlock(&part->IPI_lock); 777 spin_unlock(&part->IPI_lock);
825 } 778 }
826 ch->delayed_IPI_flags = 0; 779 ch->delayed_IPI_flags = 0;
827 } 780 }
828} 781}
829 782
830
831/* 783/*
832 * Process a change in the channel's remote connection state. 784 * Process a change in the channel's remote connection state.
833 */ 785 */
834static void 786static void
835xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, 787xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
836 u8 IPI_flags) 788 u8 IPI_flags)
837{ 789{
838 unsigned long irq_flags; 790 unsigned long irq_flags;
839 struct xpc_openclose_args *args = 791 struct xpc_openclose_args *args =
840 &part->remote_openclose_args[ch_number]; 792 &part->remote_openclose_args[ch_number];
841 struct xpc_channel *ch = &part->channels[ch_number]; 793 struct xpc_channel *ch = &part->channels[ch_number];
842 enum xpc_retval reason; 794 enum xpc_retval reason;
843 795
844
845
846 spin_lock_irqsave(&ch->lock, irq_flags); 796 spin_lock_irqsave(&ch->lock, irq_flags);
847 797
848again: 798again:
849 799
850 if ((ch->flags & XPC_C_DISCONNECTED) && 800 if ((ch->flags & XPC_C_DISCONNECTED) &&
851 (ch->flags & XPC_C_WDISCONNECT)) { 801 (ch->flags & XPC_C_WDISCONNECT)) {
852 /* 802 /*
853 * Delay processing IPI flags until thread waiting disconnect 803 * Delay processing IPI flags until thread waiting disconnect
854 * has had a chance to see that the channel is disconnected. 804 * has had a chance to see that the channel is disconnected.
@@ -858,7 +808,6 @@ again:
858 return; 808 return;
859 } 809 }
860 810
861
862 if (IPI_flags & XPC_IPI_CLOSEREQUEST) { 811 if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
863 812
864 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " 813 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
@@ -890,13 +839,14 @@ again:
890 if (ch->flags & XPC_C_DISCONNECTED) { 839 if (ch->flags & XPC_C_DISCONNECTED) {
891 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { 840 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
892 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, 841 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
893 ch_number) & XPC_IPI_OPENREQUEST)) { 842 ch_number) &
843 XPC_IPI_OPENREQUEST)) {
894 844
895 DBUG_ON(ch->delayed_IPI_flags != 0); 845 DBUG_ON(ch->delayed_IPI_flags != 0);
896 spin_lock(&part->IPI_lock); 846 spin_lock(&part->IPI_lock);
897 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 847 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
898 ch_number, 848 ch_number,
899 XPC_IPI_CLOSEREQUEST); 849 XPC_IPI_CLOSEREQUEST);
900 spin_unlock(&part->IPI_lock); 850 spin_unlock(&part->IPI_lock);
901 } 851 }
902 spin_unlock_irqrestore(&ch->lock, irq_flags); 852 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -921,11 +871,10 @@ again:
921 871
922 if (!(ch->flags & XPC_C_DISCONNECTING)) { 872 if (!(ch->flags & XPC_C_DISCONNECTING)) {
923 reason = args->reason; 873 reason = args->reason;
924 if (reason <= xpcSuccess || reason > xpcUnknownReason) { 874 if (reason <= xpcSuccess || reason > xpcUnknownReason)
925 reason = xpcUnknownReason; 875 reason = xpcUnknownReason;
926 } else if (reason == xpcUnregistering) { 876 else if (reason == xpcUnregistering)
927 reason = xpcOtherUnregistering; 877 reason = xpcOtherUnregistering;
928 }
929 878
930 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 879 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
931 880
@@ -937,7 +886,6 @@ again:
937 xpc_process_disconnect(ch, &irq_flags); 886 xpc_process_disconnect(ch, &irq_flags);
938 } 887 }
939 888
940
941 if (IPI_flags & XPC_IPI_CLOSEREPLY) { 889 if (IPI_flags & XPC_IPI_CLOSEREPLY) {
942 890
943 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," 891 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
@@ -953,12 +901,13 @@ again:
953 901
954 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 902 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
955 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) 903 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
956 & XPC_IPI_CLOSEREQUEST)) { 904 & XPC_IPI_CLOSEREQUEST)) {
957 905
958 DBUG_ON(ch->delayed_IPI_flags != 0); 906 DBUG_ON(ch->delayed_IPI_flags != 0);
959 spin_lock(&part->IPI_lock); 907 spin_lock(&part->IPI_lock);
960 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 908 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
961 ch_number, XPC_IPI_CLOSEREPLY); 909 ch_number,
910 XPC_IPI_CLOSEREPLY);
962 spin_unlock(&part->IPI_lock); 911 spin_unlock(&part->IPI_lock);
963 } 912 }
964 spin_unlock_irqrestore(&ch->lock, irq_flags); 913 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -973,7 +922,6 @@ again:
973 } 922 }
974 } 923 }
975 924
976
977 if (IPI_flags & XPC_IPI_OPENREQUEST) { 925 if (IPI_flags & XPC_IPI_OPENREQUEST) {
978 926
979 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " 927 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
@@ -982,7 +930,7 @@ again:
982 ch->partid, ch->number); 930 ch->partid, ch->number);
983 931
984 if (part->act_state == XPC_P_DEACTIVATING || 932 if (part->act_state == XPC_P_DEACTIVATING ||
985 (ch->flags & XPC_C_ROPENREQUEST)) { 933 (ch->flags & XPC_C_ROPENREQUEST)) {
986 spin_unlock_irqrestore(&ch->lock, irq_flags); 934 spin_unlock_irqrestore(&ch->lock, irq_flags);
987 return; 935 return;
988 } 936 }
@@ -993,9 +941,9 @@ again:
993 return; 941 return;
994 } 942 }
995 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | 943 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
996 XPC_C_OPENREQUEST))); 944 XPC_C_OPENREQUEST)));
997 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 945 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
998 XPC_C_OPENREPLY | XPC_C_CONNECTED)); 946 XPC_C_OPENREPLY | XPC_C_CONNECTED));
999 947
1000 /* 948 /*
1001 * The meaningful OPENREQUEST connection state fields are: 949 * The meaningful OPENREQUEST connection state fields are:
@@ -1011,11 +959,10 @@ again:
1011 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); 959 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
1012 ch->remote_nentries = args->local_nentries; 960 ch->remote_nentries = args->local_nentries;
1013 961
1014
1015 if (ch->flags & XPC_C_OPENREQUEST) { 962 if (ch->flags & XPC_C_OPENREQUEST) {
1016 if (args->msg_size != ch->msg_size) { 963 if (args->msg_size != ch->msg_size) {
1017 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 964 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1018 &irq_flags); 965 &irq_flags);
1019 spin_unlock_irqrestore(&ch->lock, irq_flags); 966 spin_unlock_irqrestore(&ch->lock, irq_flags);
1020 return; 967 return;
1021 } 968 }
@@ -1031,7 +978,6 @@ again:
1031 xpc_process_connect(ch, &irq_flags); 978 xpc_process_connect(ch, &irq_flags);
1032 } 979 }
1033 980
1034
1035 if (IPI_flags & XPC_IPI_OPENREPLY) { 981 if (IPI_flags & XPC_IPI_OPENREPLY) {
1036 982
1037 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " 983 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
@@ -1046,7 +992,7 @@ again:
1046 } 992 }
1047 if (!(ch->flags & XPC_C_OPENREQUEST)) { 993 if (!(ch->flags & XPC_C_OPENREQUEST)) {
1048 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, 994 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
1049 &irq_flags); 995 &irq_flags);
1050 spin_unlock_irqrestore(&ch->lock, irq_flags); 996 spin_unlock_irqrestore(&ch->lock, irq_flags);
1051 return; 997 return;
1052 } 998 }
@@ -1057,7 +1003,7 @@ again:
1057 /* 1003 /*
1058 * The meaningful OPENREPLY connection state fields are: 1004 * The meaningful OPENREPLY connection state fields are:
1059 * local_msgqueue_pa = physical address of remote 1005 * local_msgqueue_pa = physical address of remote
1060 * partition's local_msgqueue 1006 * partition's local_msgqueue
1061 * local_nentries = remote partition's local_nentries 1007 * local_nentries = remote partition's local_nentries
1062 * remote_nentries = remote partition's remote_nentries 1008 * remote_nentries = remote partition's remote_nentries
1063 */ 1009 */
@@ -1093,7 +1039,6 @@ again:
1093 spin_unlock_irqrestore(&ch->lock, irq_flags); 1039 spin_unlock_irqrestore(&ch->lock, irq_flags);
1094} 1040}
1095 1041
1096
1097/* 1042/*
1098 * Attempt to establish a channel connection to a remote partition. 1043 * Attempt to establish a channel connection to a remote partition.
1099 */ 1044 */
@@ -1103,10 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch)
1103 unsigned long irq_flags; 1048 unsigned long irq_flags;
1104 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1049 struct xpc_registration *registration = &xpc_registrations[ch->number];
1105 1050
1106 1051 if (mutex_trylock(&registration->mutex) == 0)
1107 if (mutex_trylock(&registration->mutex) == 0) {
1108 return xpcRetry; 1052 return xpcRetry;
1109 }
1110 1053
1111 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1054 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1112 mutex_unlock(&registration->mutex); 1055 mutex_unlock(&registration->mutex);
@@ -1124,7 +1067,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1124 return ch->reason; 1067 return ch->reason;
1125 } 1068 }
1126 1069
1127
1128 /* add info from the channel connect registration to the channel */ 1070 /* add info from the channel connect registration to the channel */
1129 1071
1130 ch->kthreads_assigned_limit = registration->assigned_limit; 1072 ch->kthreads_assigned_limit = registration->assigned_limit;
@@ -1154,7 +1096,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1154 */ 1096 */
1155 mutex_unlock(&registration->mutex); 1097 mutex_unlock(&registration->mutex);
1156 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1098 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1157 &irq_flags); 1099 &irq_flags);
1158 spin_unlock_irqrestore(&ch->lock, irq_flags); 1100 spin_unlock_irqrestore(&ch->lock, irq_flags);
1159 return xpcUnequalMsgSizes; 1101 return xpcUnequalMsgSizes;
1160 } 1102 }
@@ -1169,7 +1111,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1169 1111
1170 mutex_unlock(&registration->mutex); 1112 mutex_unlock(&registration->mutex);
1171 1113
1172
1173 /* initiate the connection */ 1114 /* initiate the connection */
1174 1115
1175 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 1116 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
@@ -1182,7 +1123,6 @@ xpc_connect_channel(struct xpc_channel *ch)
1182 return xpcSuccess; 1123 return xpcSuccess;
1183} 1124}
1184 1125
1185
1186/* 1126/*
1187 * Clear some of the msg flags in the local message queue. 1127 * Clear some of the msg flags in the local message queue.
1188 */ 1128 */
@@ -1192,16 +1132,15 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1192 struct xpc_msg *msg; 1132 struct xpc_msg *msg;
1193 s64 get; 1133 s64 get;
1194 1134
1195
1196 get = ch->w_remote_GP.get; 1135 get = ch->w_remote_GP.get;
1197 do { 1136 do {
1198 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1137 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1199 (get % ch->local_nentries) * ch->msg_size); 1138 (get % ch->local_nentries) *
1139 ch->msg_size);
1200 msg->flags = 0; 1140 msg->flags = 0;
1201 } while (++get < (volatile s64) ch->remote_GP.get); 1141 } while (++get < ch->remote_GP.get);
1202} 1142}
1203 1143
1204
1205/* 1144/*
1206 * Clear some of the msg flags in the remote message queue. 1145 * Clear some of the msg flags in the remote message queue.
1207 */ 1146 */
@@ -1211,43 +1150,39 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1211 struct xpc_msg *msg; 1150 struct xpc_msg *msg;
1212 s64 put; 1151 s64 put;
1213 1152
1214
1215 put = ch->w_remote_GP.put; 1153 put = ch->w_remote_GP.put;
1216 do { 1154 do {
1217 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 1155 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1218 (put % ch->remote_nentries) * ch->msg_size); 1156 (put % ch->remote_nentries) *
1157 ch->msg_size);
1219 msg->flags = 0; 1158 msg->flags = 0;
1220 } while (++put < (volatile s64) ch->remote_GP.put); 1159 } while (++put < ch->remote_GP.put);
1221} 1160}
1222 1161
1223
1224static void 1162static void
1225xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) 1163xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1226{ 1164{
1227 struct xpc_channel *ch = &part->channels[ch_number]; 1165 struct xpc_channel *ch = &part->channels[ch_number];
1228 int nmsgs_sent; 1166 int nmsgs_sent;
1229 1167
1230
1231 ch->remote_GP = part->remote_GPs[ch_number]; 1168 ch->remote_GP = part->remote_GPs[ch_number];
1232 1169
1233
1234 /* See what, if anything, has changed for each connected channel */ 1170 /* See what, if anything, has changed for each connected channel */
1235 1171
1236 xpc_msgqueue_ref(ch); 1172 xpc_msgqueue_ref(ch);
1237 1173
1238 if (ch->w_remote_GP.get == ch->remote_GP.get && 1174 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1239 ch->w_remote_GP.put == ch->remote_GP.put) { 1175 ch->w_remote_GP.put == ch->remote_GP.put) {
1240 /* nothing changed since GPs were last pulled */ 1176 /* nothing changed since GPs were last pulled */
1241 xpc_msgqueue_deref(ch); 1177 xpc_msgqueue_deref(ch);
1242 return; 1178 return;
1243 } 1179 }
1244 1180
1245 if (!(ch->flags & XPC_C_CONNECTED)){ 1181 if (!(ch->flags & XPC_C_CONNECTED)) {
1246 xpc_msgqueue_deref(ch); 1182 xpc_msgqueue_deref(ch);
1247 return; 1183 return;
1248 } 1184 }
1249 1185
1250
1251 /* 1186 /*
1252 * First check to see if messages recently sent by us have been 1187 * First check to see if messages recently sent by us have been
1253 * received by the other side. (The remote GET value will have 1188 * received by the other side. (The remote GET value will have
@@ -1269,7 +1204,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1269 * received and delivered by the other side. 1204 * received and delivered by the other side.
1270 */ 1205 */
1271 xpc_notify_senders(ch, xpcMsgDelivered, 1206 xpc_notify_senders(ch, xpcMsgDelivered,
1272 ch->remote_GP.get); 1207 ch->remote_GP.get);
1273 } 1208 }
1274 1209
1275 /* 1210 /*
@@ -1288,12 +1223,10 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1288 * If anyone was waiting for message queue entries to become 1223 * If anyone was waiting for message queue entries to become
1289 * available, wake them up. 1224 * available, wake them up.
1290 */ 1225 */
1291 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1226 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1292 wake_up(&ch->msg_allocate_wq); 1227 wake_up(&ch->msg_allocate_wq);
1293 }
1294 } 1228 }
1295 1229
1296
1297 /* 1230 /*
1298 * Now check for newly sent messages by the other side. (The remote 1231 * Now check for newly sent messages by the other side. (The remote
1299 * PUT value will have changed since we last looked at it.) 1232 * PUT value will have changed since we last looked at it.)
@@ -1318,16 +1251,14 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1318 "delivered=%d, partid=%d, channel=%d\n", 1251 "delivered=%d, partid=%d, channel=%d\n",
1319 nmsgs_sent, ch->partid, ch->number); 1252 nmsgs_sent, ch->partid, ch->number);
1320 1253
1321 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { 1254 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1322 xpc_activate_kthreads(ch, nmsgs_sent); 1255 xpc_activate_kthreads(ch, nmsgs_sent);
1323 }
1324 } 1256 }
1325 } 1257 }
1326 1258
1327 xpc_msgqueue_deref(ch); 1259 xpc_msgqueue_deref(ch);
1328} 1260}
1329 1261
1330
1331void 1262void
1332xpc_process_channel_activity(struct xpc_partition *part) 1263xpc_process_channel_activity(struct xpc_partition *part)
1333{ 1264{
@@ -1337,7 +1268,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1337 int ch_number; 1268 int ch_number;
1338 u32 ch_flags; 1269 u32 ch_flags;
1339 1270
1340
1341 IPI_amo = xpc_get_IPI_flags(part); 1271 IPI_amo = xpc_get_IPI_flags(part);
1342 1272
1343 /* 1273 /*
@@ -1350,7 +1280,6 @@ xpc_process_channel_activity(struct xpc_partition *part)
1350 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1280 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1351 ch = &part->channels[ch_number]; 1281 ch = &part->channels[ch_number];
1352 1282
1353
1354 /* 1283 /*
1355 * Process any open or close related IPI flags, and then deal 1284 * Process any open or close related IPI flags, and then deal
1356 * with connecting or disconnecting the channel as required. 1285 * with connecting or disconnecting the channel as required.
@@ -1358,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part)
1358 1287
1359 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); 1288 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
1360 1289
1361 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { 1290 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags))
1362 xpc_process_openclose_IPI(part, ch_number, IPI_flags); 1291 xpc_process_openclose_IPI(part, ch_number, IPI_flags);
1363 }
1364 1292
1365 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 1293 ch_flags = ch->flags; /* need an atomic snapshot of flags */
1366 1294
@@ -1371,14 +1299,13 @@ xpc_process_channel_activity(struct xpc_partition *part)
1371 continue; 1299 continue;
1372 } 1300 }
1373 1301
1374 if (part->act_state == XPC_P_DEACTIVATING) { 1302 if (part->act_state == XPC_P_DEACTIVATING)
1375 continue; 1303 continue;
1376 }
1377 1304
1378 if (!(ch_flags & XPC_C_CONNECTED)) { 1305 if (!(ch_flags & XPC_C_CONNECTED)) {
1379 if (!(ch_flags & XPC_C_OPENREQUEST)) { 1306 if (!(ch_flags & XPC_C_OPENREQUEST)) {
1380 DBUG_ON(ch_flags & XPC_C_SETUP); 1307 DBUG_ON(ch_flags & XPC_C_SETUP);
1381 (void) xpc_connect_channel(ch); 1308 (void)xpc_connect_channel(ch);
1382 } else { 1309 } else {
1383 spin_lock_irqsave(&ch->lock, irq_flags); 1310 spin_lock_irqsave(&ch->lock, irq_flags);
1384 xpc_process_connect(ch, &irq_flags); 1311 xpc_process_connect(ch, &irq_flags);
@@ -1387,20 +1314,17 @@ xpc_process_channel_activity(struct xpc_partition *part)
1387 continue; 1314 continue;
1388 } 1315 }
1389 1316
1390
1391 /* 1317 /*
1392 * Process any message related IPI flags, this may involve the 1318 * Process any message related IPI flags, this may involve the
1393 * activation of kthreads to deliver any pending messages sent 1319 * activation of kthreads to deliver any pending messages sent
1394 * from the other partition. 1320 * from the other partition.
1395 */ 1321 */
1396 1322
1397 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { 1323 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags))
1398 xpc_process_msg_IPI(part, ch_number); 1324 xpc_process_msg_IPI(part, ch_number);
1399 }
1400 } 1325 }
1401} 1326}
1402 1327
1403
1404/* 1328/*
1405 * XPC's heartbeat code calls this function to inform XPC that a partition is 1329 * XPC's heartbeat code calls this function to inform XPC that a partition is
1406 * going down. XPC responds by tearing down the XPartition Communication 1330 * going down. XPC responds by tearing down the XPartition Communication
@@ -1417,7 +1341,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1417 int ch_number; 1341 int ch_number;
1418 struct xpc_channel *ch; 1342 struct xpc_channel *ch;
1419 1343
1420
1421 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", 1344 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
1422 XPC_PARTID(part), reason); 1345 XPC_PARTID(part), reason);
1423 1346
@@ -1426,7 +1349,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1426 return; 1349 return;
1427 } 1350 }
1428 1351
1429
1430 /* disconnect channels associated with the partition going down */ 1352 /* disconnect channels associated with the partition going down */
1431 1353
1432 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { 1354 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
@@ -1446,7 +1368,6 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
1446 xpc_part_deref(part); 1368 xpc_part_deref(part);
1447} 1369}
1448 1370
1449
1450/* 1371/*
1451 * Teardown the infrastructure necessary to support XPartition Communication 1372 * Teardown the infrastructure necessary to support XPartition Communication
1452 * between the specified remote partition and the local one. 1373 * between the specified remote partition and the local one.
@@ -1456,7 +1377,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1456{ 1377{
1457 partid_t partid = XPC_PARTID(part); 1378 partid_t partid = XPC_PARTID(part);
1458 1379
1459
1460 /* 1380 /*
1461 * We start off by making this partition inaccessible to local 1381 * We start off by making this partition inaccessible to local
1462 * processes by marking it as no longer setup. Then we make it 1382 * processes by marking it as no longer setup. Then we make it
@@ -1473,9 +1393,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1473 1393
1474 xpc_vars_part[partid].magic = 0; 1394 xpc_vars_part[partid].magic = 0;
1475 1395
1476 1396 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1477 free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
1478
1479 1397
1480 /* 1398 /*
1481 * Before proceeding with the teardown we have to wait until all 1399 * Before proceeding with the teardown we have to wait until all
@@ -1483,7 +1401,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1483 */ 1401 */
1484 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); 1402 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1485 1403
1486
1487 /* now we can begin tearing down the infrastructure */ 1404 /* now we can begin tearing down the infrastructure */
1488 1405
1489 part->setup_state = XPC_P_TORNDOWN; 1406 part->setup_state = XPC_P_TORNDOWN;
@@ -1504,7 +1421,6 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
1504 part->local_IPI_amo_va = NULL; 1421 part->local_IPI_amo_va = NULL;
1505} 1422}
1506 1423
1507
1508/* 1424/*
1509 * Called by XP at the time of channel connection registration to cause 1425 * Called by XP at the time of channel connection registration to cause
1510 * XPC to establish connections to all currently active partitions. 1426 * XPC to establish connections to all currently active partitions.
@@ -1516,7 +1432,6 @@ xpc_initiate_connect(int ch_number)
1516 struct xpc_partition *part; 1432 struct xpc_partition *part;
1517 struct xpc_channel *ch; 1433 struct xpc_channel *ch;
1518 1434
1519
1520 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1435 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1521 1436
1522 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1437 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
@@ -1535,7 +1450,6 @@ xpc_initiate_connect(int ch_number)
1535 } 1450 }
1536} 1451}
1537 1452
1538
1539void 1453void
1540xpc_connected_callout(struct xpc_channel *ch) 1454xpc_connected_callout(struct xpc_channel *ch)
1541{ 1455{
@@ -1546,14 +1460,13 @@ xpc_connected_callout(struct xpc_channel *ch)
1546 "partid=%d, channel=%d\n", ch->partid, ch->number); 1460 "partid=%d, channel=%d\n", ch->partid, ch->number);
1547 1461
1548 ch->func(xpcConnected, ch->partid, ch->number, 1462 ch->func(xpcConnected, ch->partid, ch->number,
1549 (void *) (u64) ch->local_nentries, ch->key); 1463 (void *)(u64)ch->local_nentries, ch->key);
1550 1464
1551 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " 1465 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
1552 "partid=%d, channel=%d\n", ch->partid, ch->number); 1466 "partid=%d, channel=%d\n", ch->partid, ch->number);
1553 } 1467 }
1554} 1468}
1555 1469
1556
1557/* 1470/*
1558 * Called by XP at the time of channel connection unregistration to cause 1471 * Called by XP at the time of channel connection unregistration to cause
1559 * XPC to teardown all current connections for the specified channel. 1472 * XPC to teardown all current connections for the specified channel.
@@ -1575,7 +1488,6 @@ xpc_initiate_disconnect(int ch_number)
1575 struct xpc_partition *part; 1488 struct xpc_partition *part;
1576 struct xpc_channel *ch; 1489 struct xpc_channel *ch;
1577 1490
1578
1579 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 1491 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1580 1492
1581 /* initiate the channel disconnect for every active partition */ 1493 /* initiate the channel disconnect for every active partition */
@@ -1592,7 +1504,7 @@ xpc_initiate_disconnect(int ch_number)
1592 ch->flags |= XPC_C_WDISCONNECT; 1504 ch->flags |= XPC_C_WDISCONNECT;
1593 1505
1594 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, 1506 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
1595 &irq_flags); 1507 &irq_flags);
1596 } 1508 }
1597 1509
1598 spin_unlock_irqrestore(&ch->lock, irq_flags); 1510 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1605,7 +1517,6 @@ xpc_initiate_disconnect(int ch_number)
1605 xpc_disconnect_wait(ch_number); 1517 xpc_disconnect_wait(ch_number);
1606} 1518}
1607 1519
1608
1609/* 1520/*
1610 * To disconnect a channel, and reflect it back to all who may be waiting. 1521 * To disconnect a channel, and reflect it back to all who may be waiting.
1611 * 1522 *
@@ -1617,16 +1528,15 @@ xpc_initiate_disconnect(int ch_number)
1617 */ 1528 */
1618void 1529void
1619xpc_disconnect_channel(const int line, struct xpc_channel *ch, 1530xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1620 enum xpc_retval reason, unsigned long *irq_flags) 1531 enum xpc_retval reason, unsigned long *irq_flags)
1621{ 1532{
1622 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 1533 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
1623 1534
1624
1625 DBUG_ON(!spin_is_locked(&ch->lock)); 1535 DBUG_ON(!spin_is_locked(&ch->lock));
1626 1536
1627 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 1537 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
1628 return; 1538 return;
1629 } 1539
1630 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); 1540 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
1631 1541
1632 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", 1542 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
@@ -1637,14 +1547,13 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1637 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); 1547 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
1638 /* some of these may not have been set */ 1548 /* some of these may not have been set */
1639 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | 1549 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1640 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 1550 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1641 XPC_C_CONNECTING | XPC_C_CONNECTED); 1551 XPC_C_CONNECTING | XPC_C_CONNECTED);
1642 1552
1643 xpc_IPI_send_closerequest(ch, irq_flags); 1553 xpc_IPI_send_closerequest(ch, irq_flags);
1644 1554
1645 if (channel_was_connected) { 1555 if (channel_was_connected)
1646 ch->flags |= XPC_C_WASCONNECTED; 1556 ch->flags |= XPC_C_WASCONNECTED;
1647 }
1648 1557
1649 spin_unlock_irqrestore(&ch->lock, *irq_flags); 1558 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1650 1559
@@ -1653,20 +1562,18 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1653 wake_up_all(&ch->idle_wq); 1562 wake_up_all(&ch->idle_wq);
1654 1563
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 1564 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 1565 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */ 1566 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1); 1567 xpc_create_kthreads(ch, 1, 1);
1659 } 1568 }
1660 1569
1661 /* wake those waiting to allocate an entry from the local msg queue */ 1570 /* wake those waiting to allocate an entry from the local msg queue */
1662 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { 1571 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1663 wake_up(&ch->msg_allocate_wq); 1572 wake_up(&ch->msg_allocate_wq);
1664 }
1665 1573
1666 spin_lock_irqsave(&ch->lock, *irq_flags); 1574 spin_lock_irqsave(&ch->lock, *irq_flags);
1667} 1575}
1668 1576
1669
1670void 1577void
1671xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) 1578xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1672{ 1579{
@@ -1687,7 +1594,6 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1687 } 1594 }
1688} 1595}
1689 1596
1690
1691/* 1597/*
1692 * Wait for a message entry to become available for the specified channel, 1598 * Wait for a message entry to become available for the specified channel,
1693 * but don't wait any longer than 1 jiffy. 1599 * but don't wait any longer than 1 jiffy.
@@ -1697,9 +1603,8 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1697{ 1603{
1698 enum xpc_retval ret; 1604 enum xpc_retval ret;
1699 1605
1700
1701 if (ch->flags & XPC_C_DISCONNECTING) { 1606 if (ch->flags & XPC_C_DISCONNECTING) {
1702 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1607 DBUG_ON(ch->reason == xpcInterrupted);
1703 return ch->reason; 1608 return ch->reason;
1704 } 1609 }
1705 1610
@@ -1709,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1709 1614
1710 if (ch->flags & XPC_C_DISCONNECTING) { 1615 if (ch->flags & XPC_C_DISCONNECTING) {
1711 ret = ch->reason; 1616 ret = ch->reason;
1712 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? 1617 DBUG_ON(ch->reason == xpcInterrupted);
1713 } else if (ret == 0) { 1618 } else if (ret == 0) {
1714 ret = xpcTimeout; 1619 ret = xpcTimeout;
1715 } else { 1620 } else {
@@ -1719,20 +1624,18 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
1719 return ret; 1624 return ret;
1720} 1625}
1721 1626
1722
1723/* 1627/*
1724 * Allocate an entry for a message from the message queue associated with the 1628 * Allocate an entry for a message from the message queue associated with the
1725 * specified channel. 1629 * specified channel.
1726 */ 1630 */
1727static enum xpc_retval 1631static enum xpc_retval
1728xpc_allocate_msg(struct xpc_channel *ch, u32 flags, 1632xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1729 struct xpc_msg **address_of_msg) 1633 struct xpc_msg **address_of_msg)
1730{ 1634{
1731 struct xpc_msg *msg; 1635 struct xpc_msg *msg;
1732 enum xpc_retval ret; 1636 enum xpc_retval ret;
1733 s64 put; 1637 s64 put;
1734 1638
1735
1736 /* this reference will be dropped in xpc_send_msg() */ 1639 /* this reference will be dropped in xpc_send_msg() */
1737 xpc_msgqueue_ref(ch); 1640 xpc_msgqueue_ref(ch);
1738 1641
@@ -1745,7 +1648,6 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1745 return xpcNotConnected; 1648 return xpcNotConnected;
1746 } 1649 }
1747 1650
1748
1749 /* 1651 /*
1750 * Get the next available message entry from the local message queue. 1652 * Get the next available message entry from the local message queue.
1751 * If none are available, we'll make sure that we grab the latest 1653 * If none are available, we'll make sure that we grab the latest
@@ -1755,25 +1657,23 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1755 1657
1756 while (1) { 1658 while (1) {
1757 1659
1758 put = (volatile s64) ch->w_local_GP.put; 1660 put = ch->w_local_GP.put;
1759 if (put - (volatile s64) ch->w_remote_GP.get < 1661 rmb(); /* guarantee that .put loads before .get */
1760 ch->local_nentries) { 1662 if (put - ch->w_remote_GP.get < ch->local_nentries) {
1761 1663
1762 /* There are available message entries. We need to try 1664 /* There are available message entries. We need to try
1763 * to secure one for ourselves. We'll do this by trying 1665 * to secure one for ourselves. We'll do this by trying
1764 * to increment w_local_GP.put as long as someone else 1666 * to increment w_local_GP.put as long as someone else
1765 * doesn't beat us to it. If they do, we'll have to 1667 * doesn't beat us to it. If they do, we'll have to
1766 * try again. 1668 * try again.
1767 */ 1669 */
1768 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == 1670 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) {
1769 put) {
1770 /* we got the entry referenced by put */ 1671 /* we got the entry referenced by put */
1771 break; 1672 break;
1772 } 1673 }
1773 continue; /* try again */ 1674 continue; /* try again */
1774 } 1675 }
1775 1676
1776
1777 /* 1677 /*
1778 * There aren't any available msg entries at this time. 1678 * There aren't any available msg entries at this time.
1779 * 1679 *
@@ -1783,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1783 * that will cause the IPI handler to fetch the latest 1683 * that will cause the IPI handler to fetch the latest
1784 * GP values as if an IPI was sent by the other side. 1684 * GP values as if an IPI was sent by the other side.
1785 */ 1685 */
1786 if (ret == xpcTimeout) { 1686 if (ret == xpcTimeout)
1787 xpc_IPI_send_local_msgrequest(ch); 1687 xpc_IPI_send_local_msgrequest(ch);
1788 }
1789 1688
1790 if (flags & XPC_NOWAIT) { 1689 if (flags & XPC_NOWAIT) {
1791 xpc_msgqueue_deref(ch); 1690 xpc_msgqueue_deref(ch);
@@ -1799,25 +1698,22 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1799 } 1698 }
1800 } 1699 }
1801 1700
1802
1803 /* get the message's address and initialize it */ 1701 /* get the message's address and initialize it */
1804 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1702 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1805 (put % ch->local_nentries) * ch->msg_size); 1703 (put % ch->local_nentries) * ch->msg_size);
1806
1807 1704
1808 DBUG_ON(msg->flags != 0); 1705 DBUG_ON(msg->flags != 0);
1809 msg->number = put; 1706 msg->number = put;
1810 1707
1811 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " 1708 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1812 "msg_number=%ld, partid=%d, channel=%d\n", put + 1, 1709 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1813 (void *) msg, msg->number, ch->partid, ch->number); 1710 (void *)msg, msg->number, ch->partid, ch->number);
1814 1711
1815 *address_of_msg = msg; 1712 *address_of_msg = msg;
1816 1713
1817 return xpcSuccess; 1714 return xpcSuccess;
1818} 1715}
1819 1716
1820
1821/* 1717/*
1822 * Allocate an entry for a message from the message queue associated with the 1718 * Allocate an entry for a message from the message queue associated with the
1823 * specified channel. NOTE that this routine can sleep waiting for a message 1719 * specified channel. NOTE that this routine can sleep waiting for a message
@@ -1838,7 +1734,6 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1838 enum xpc_retval ret = xpcUnknownReason; 1734 enum xpc_retval ret = xpcUnknownReason;
1839 struct xpc_msg *msg = NULL; 1735 struct xpc_msg *msg = NULL;
1840 1736
1841
1842 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1737 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1843 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 1738 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1844 1739
@@ -1848,15 +1743,13 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1848 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); 1743 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1849 xpc_part_deref(part); 1744 xpc_part_deref(part);
1850 1745
1851 if (msg != NULL) { 1746 if (msg != NULL)
1852 *payload = &msg->payload; 1747 *payload = &msg->payload;
1853 }
1854 } 1748 }
1855 1749
1856 return ret; 1750 return ret;
1857} 1751}
1858 1752
1859
1860/* 1753/*
1861 * Now we actually send the messages that are ready to be sent by advancing 1754 * Now we actually send the messages that are ready to be sent by advancing
1862 * the local message queue's Put value and then send an IPI to the recipient 1755 * the local message queue's Put value and then send an IPI to the recipient
@@ -1869,20 +1762,18 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1869 s64 put = initial_put + 1; 1762 s64 put = initial_put + 1;
1870 int send_IPI = 0; 1763 int send_IPI = 0;
1871 1764
1872
1873 while (1) { 1765 while (1) {
1874 1766
1875 while (1) { 1767 while (1) {
1876 if (put == (volatile s64) ch->w_local_GP.put) { 1768 if (put == ch->w_local_GP.put)
1877 break; 1769 break;
1878 }
1879 1770
1880 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + 1771 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1881 (put % ch->local_nentries) * ch->msg_size); 1772 (put % ch->local_nentries) *
1773 ch->msg_size);
1882 1774
1883 if (!(msg->flags & XPC_M_READY)) { 1775 if (!(msg->flags & XPC_M_READY))
1884 break; 1776 break;
1885 }
1886 1777
1887 put++; 1778 put++;
1888 } 1779 }
@@ -1893,9 +1784,9 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1893 } 1784 }
1894 1785
1895 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != 1786 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1896 initial_put) { 1787 initial_put) {
1897 /* someone else beat us to it */ 1788 /* someone else beat us to it */
1898 DBUG_ON((volatile s64) ch->local_GP->put < initial_put); 1789 DBUG_ON(ch->local_GP->put < initial_put);
1899 break; 1790 break;
1900 } 1791 }
1901 1792
@@ -1914,12 +1805,10 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1914 initial_put = put; 1805 initial_put = put;
1915 } 1806 }
1916 1807
1917 if (send_IPI) { 1808 if (send_IPI)
1918 xpc_IPI_send_msgrequest(ch); 1809 xpc_IPI_send_msgrequest(ch);
1919 }
1920} 1810}
1921 1811
1922
1923/* 1812/*
1924 * Common code that does the actual sending of the message by advancing the 1813 * Common code that does the actual sending of the message by advancing the
1925 * local message queue's Put value and sends an IPI to the partition the 1814 * local message queue's Put value and sends an IPI to the partition the
@@ -1927,16 +1816,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1927 */ 1816 */
1928static enum xpc_retval 1817static enum xpc_retval
1929xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, 1818xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1930 xpc_notify_func func, void *key) 1819 xpc_notify_func func, void *key)
1931{ 1820{
1932 enum xpc_retval ret = xpcSuccess; 1821 enum xpc_retval ret = xpcSuccess;
1933 struct xpc_notify *notify = notify; 1822 struct xpc_notify *notify = notify;
1934 s64 put, msg_number = msg->number; 1823 s64 put, msg_number = msg->number;
1935 1824
1936
1937 DBUG_ON(notify_type == XPC_N_CALL && func == NULL); 1825 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1938 DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != 1826 DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
1939 msg_number % ch->local_nentries); 1827 msg_number % ch->local_nentries);
1940 DBUG_ON(msg->flags & XPC_M_READY); 1828 DBUG_ON(msg->flags & XPC_M_READY);
1941 1829
1942 if (ch->flags & XPC_C_DISCONNECTING) { 1830 if (ch->flags & XPC_C_DISCONNECTING) {
@@ -1959,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1959 notify->key = key; 1847 notify->key = key;
1960 notify->type = notify_type; 1848 notify->type = notify_type;
1961 1849
1962 // >>> is a mb() needed here? 1850 /* >>> is a mb() needed here? */
1963 1851
1964 if (ch->flags & XPC_C_DISCONNECTING) { 1852 if (ch->flags & XPC_C_DISCONNECTING) {
1965 /* 1853 /*
@@ -1970,7 +1858,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1970 * the notify entry. 1858 * the notify entry.
1971 */ 1859 */
1972 if (cmpxchg(&notify->type, notify_type, 0) == 1860 if (cmpxchg(&notify->type, notify_type, 0) ==
1973 notify_type) { 1861 notify_type) {
1974 atomic_dec(&ch->n_to_notify); 1862 atomic_dec(&ch->n_to_notify);
1975 ret = ch->reason; 1863 ret = ch->reason;
1976 } 1864 }
@@ -1992,16 +1880,14 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1992 /* see if the message is next in line to be sent, if so send it */ 1880 /* see if the message is next in line to be sent, if so send it */
1993 1881
1994 put = ch->local_GP->put; 1882 put = ch->local_GP->put;
1995 if (put == msg_number) { 1883 if (put == msg_number)
1996 xpc_send_msgs(ch, put); 1884 xpc_send_msgs(ch, put);
1997 }
1998 1885
1999 /* drop the reference grabbed in xpc_allocate_msg() */ 1886 /* drop the reference grabbed in xpc_allocate_msg() */
2000 xpc_msgqueue_deref(ch); 1887 xpc_msgqueue_deref(ch);
2001 return ret; 1888 return ret;
2002} 1889}
2003 1890
2004
2005/* 1891/*
2006 * Send a message previously allocated using xpc_initiate_allocate() on the 1892 * Send a message previously allocated using xpc_initiate_allocate() on the
2007 * specified channel connected to the specified partition. 1893 * specified channel connected to the specified partition.
@@ -2029,8 +1915,7 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2029 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1915 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2030 enum xpc_retval ret; 1916 enum xpc_retval ret;
2031 1917
2032 1918 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
2033 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2034 partid, ch_number); 1919 partid, ch_number);
2035 1920
2036 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1921 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2042,7 +1927,6 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2042 return ret; 1927 return ret;
2043} 1928}
2044 1929
2045
2046/* 1930/*
2047 * Send a message previously allocated using xpc_initiate_allocate on the 1931 * Send a message previously allocated using xpc_initiate_allocate on the
2048 * specified channel connected to the specified partition. 1932 * specified channel connected to the specified partition.
@@ -2075,14 +1959,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2075 */ 1959 */
2076enum xpc_retval 1960enum xpc_retval
2077xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, 1961xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2078 xpc_notify_func func, void *key) 1962 xpc_notify_func func, void *key)
2079{ 1963{
2080 struct xpc_partition *part = &xpc_partitions[partid]; 1964 struct xpc_partition *part = &xpc_partitions[partid];
2081 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 1965 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2082 enum xpc_retval ret; 1966 enum xpc_retval ret;
2083 1967
2084 1968 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
2085 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2086 partid, ch_number); 1969 partid, ch_number);
2087 1970
2088 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1971 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -2091,11 +1974,10 @@ xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2091 DBUG_ON(func == NULL); 1974 DBUG_ON(func == NULL);
2092 1975
2093 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, 1976 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
2094 func, key); 1977 func, key);
2095 return ret; 1978 return ret;
2096} 1979}
2097 1980
2098
2099static struct xpc_msg * 1981static struct xpc_msg *
2100xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) 1982xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2101{ 1983{
@@ -2105,7 +1987,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2105 u64 msg_offset; 1987 u64 msg_offset;
2106 enum xpc_retval ret; 1988 enum xpc_retval ret;
2107 1989
2108
2109 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { 1990 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
2110 /* we were interrupted by a signal */ 1991 /* we were interrupted by a signal */
2111 return NULL; 1992 return NULL;
@@ -2117,23 +1998,21 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2117 1998
2118 msg_index = ch->next_msg_to_pull % ch->remote_nentries; 1999 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2119 2000
2120 DBUG_ON(ch->next_msg_to_pull >= 2001 DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put);
2121 (volatile s64) ch->w_remote_GP.put); 2002 nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull;
2122 nmsgs = (volatile s64) ch->w_remote_GP.put -
2123 ch->next_msg_to_pull;
2124 if (msg_index + nmsgs > ch->remote_nentries) { 2003 if (msg_index + nmsgs > ch->remote_nentries) {
2125 /* ignore the ones that wrap the msg queue for now */ 2004 /* ignore the ones that wrap the msg queue for now */
2126 nmsgs = ch->remote_nentries - msg_index; 2005 nmsgs = ch->remote_nentries - msg_index;
2127 } 2006 }
2128 2007
2129 msg_offset = msg_index * ch->msg_size; 2008 msg_offset = msg_index * ch->msg_size;
2130 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2009 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2131 msg_offset); 2010 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
2132 remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + 2011 msg_offset);
2133 msg_offset);
2134 2012
2135 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, 2013 ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2136 nmsgs * ch->msg_size)) != xpcSuccess) { 2014 nmsgs * ch->msg_size);
2015 if (ret != xpcSuccess) {
2137 2016
2138 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 2017 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2139 " msg %ld from partition %d, channel=%d, " 2018 " msg %ld from partition %d, channel=%d, "
@@ -2146,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2146 return NULL; 2025 return NULL;
2147 } 2026 }
2148 2027
2149 mb(); /* >>> this may not be needed, we're not sure */
2150
2151 ch->next_msg_to_pull += nmsgs; 2028 ch->next_msg_to_pull += nmsgs;
2152 } 2029 }
2153 2030
@@ -2155,12 +2032,11 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2155 2032
2156 /* return the message we were looking for */ 2033 /* return the message we were looking for */
2157 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2034 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2158 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); 2035 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2159 2036
2160 return msg; 2037 return msg;
2161} 2038}
2162 2039
2163
2164/* 2040/*
2165 * Get a message to be delivered. 2041 * Get a message to be delivered.
2166 */ 2042 */
@@ -2170,23 +2046,21 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2170 struct xpc_msg *msg = NULL; 2046 struct xpc_msg *msg = NULL;
2171 s64 get; 2047 s64 get;
2172 2048
2173
2174 do { 2049 do {
2175 if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { 2050 if (ch->flags & XPC_C_DISCONNECTING)
2176 break; 2051 break;
2177 }
2178 2052
2179 get = (volatile s64) ch->w_local_GP.get; 2053 get = ch->w_local_GP.get;
2180 if (get == (volatile s64) ch->w_remote_GP.put) { 2054 rmb(); /* guarantee that .get loads before .put */
2055 if (get == ch->w_remote_GP.put)
2181 break; 2056 break;
2182 }
2183 2057
2184 /* There are messages waiting to be pulled and delivered. 2058 /* There are messages waiting to be pulled and delivered.
2185 * We need to try to secure one for ourselves. We'll do this 2059 * We need to try to secure one for ourselves. We'll do this
2186 * by trying to increment w_local_GP.get and hope that no one 2060 * by trying to increment w_local_GP.get and hope that no one
2187 * else beats us to it. If they do, we'll we'll simply have 2061 * else beats us to it. If they do, we'll we'll simply have
2188 * to try again for the next one. 2062 * to try again for the next one.
2189 */ 2063 */
2190 2064
2191 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { 2065 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2192 /* we got the entry referenced by get */ 2066 /* we got the entry referenced by get */
@@ -2211,7 +2085,6 @@ xpc_get_deliverable_msg(struct xpc_channel *ch)
2211 return msg; 2085 return msg;
2212} 2086}
2213 2087
2214
2215/* 2088/*
2216 * Deliver a message to its intended recipient. 2089 * Deliver a message to its intended recipient.
2217 */ 2090 */
@@ -2220,8 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch)
2220{ 2093{
2221 struct xpc_msg *msg; 2094 struct xpc_msg *msg;
2222 2095
2223 2096 msg = xpc_get_deliverable_msg(ch);
2224 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { 2097 if (msg != NULL) {
2225 2098
2226 /* 2099 /*
2227 * This ref is taken to protect the payload itself from being 2100 * This ref is taken to protect the payload itself from being
@@ -2235,16 +2108,16 @@ xpc_deliver_msg(struct xpc_channel *ch)
2235 if (ch->func != NULL) { 2108 if (ch->func != NULL) {
2236 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " 2109 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
2237 "msg_number=%ld, partid=%d, channel=%d\n", 2110 "msg_number=%ld, partid=%d, channel=%d\n",
2238 (void *) msg, msg->number, ch->partid, 2111 (void *)msg, msg->number, ch->partid,
2239 ch->number); 2112 ch->number);
2240 2113
2241 /* deliver the message to its intended recipient */ 2114 /* deliver the message to its intended recipient */
2242 ch->func(xpcMsgReceived, ch->partid, ch->number, 2115 ch->func(xpcMsgReceived, ch->partid, ch->number,
2243 &msg->payload, ch->key); 2116 &msg->payload, ch->key);
2244 2117
2245 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " 2118 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
2246 "msg_number=%ld, partid=%d, channel=%d\n", 2119 "msg_number=%ld, partid=%d, channel=%d\n",
2247 (void *) msg, msg->number, ch->partid, 2120 (void *)msg, msg->number, ch->partid,
2248 ch->number); 2121 ch->number);
2249 } 2122 }
2250 2123
@@ -2252,7 +2125,6 @@ xpc_deliver_msg(struct xpc_channel *ch)
2252 } 2125 }
2253} 2126}
2254 2127
2255
2256/* 2128/*
2257 * Now we actually acknowledge the messages that have been delivered and ack'd 2129 * Now we actually acknowledge the messages that have been delivered and ack'd
2258 * by advancing the cached remote message queue's Get value and if requested 2130 * by advancing the cached remote message queue's Get value and if requested
@@ -2265,20 +2137,18 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2265 s64 get = initial_get + 1; 2137 s64 get = initial_get + 1;
2266 int send_IPI = 0; 2138 int send_IPI = 0;
2267 2139
2268
2269 while (1) { 2140 while (1) {
2270 2141
2271 while (1) { 2142 while (1) {
2272 if (get == (volatile s64) ch->w_local_GP.get) { 2143 if (get == ch->w_local_GP.get)
2273 break; 2144 break;
2274 }
2275 2145
2276 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + 2146 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2277 (get % ch->remote_nentries) * ch->msg_size); 2147 (get % ch->remote_nentries) *
2148 ch->msg_size);
2278 2149
2279 if (!(msg->flags & XPC_M_DONE)) { 2150 if (!(msg->flags & XPC_M_DONE))
2280 break; 2151 break;
2281 }
2282 2152
2283 msg_flags |= msg->flags; 2153 msg_flags |= msg->flags;
2284 get++; 2154 get++;
@@ -2290,10 +2160,9 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2290 } 2160 }
2291 2161
2292 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != 2162 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2293 initial_get) { 2163 initial_get) {
2294 /* someone else beat us to it */ 2164 /* someone else beat us to it */
2295 DBUG_ON((volatile s64) ch->local_GP->get <= 2165 DBUG_ON(ch->local_GP->get <= initial_get);
2296 initial_get);
2297 break; 2166 break;
2298 } 2167 }
2299 2168
@@ -2312,12 +2181,10 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2312 initial_get = get; 2181 initial_get = get;
2313 } 2182 }
2314 2183
2315 if (send_IPI) { 2184 if (send_IPI)
2316 xpc_IPI_send_msgrequest(ch); 2185 xpc_IPI_send_msgrequest(ch);
2317 }
2318} 2186}
2319 2187
2320
2321/* 2188/*
2322 * Acknowledge receipt of a delivered message. 2189 * Acknowledge receipt of a delivered message.
2323 * 2190 *
@@ -2343,17 +2210,16 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2343 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); 2210 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2344 s64 get, msg_number = msg->number; 2211 s64 get, msg_number = msg->number;
2345 2212
2346
2347 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 2213 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2348 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); 2214 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2349 2215
2350 ch = &part->channels[ch_number]; 2216 ch = &part->channels[ch_number];
2351 2217
2352 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 2218 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2353 (void *) msg, msg_number, ch->partid, ch->number); 2219 (void *)msg, msg_number, ch->partid, ch->number);
2354 2220
2355 DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != 2221 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
2356 msg_number % ch->remote_nentries); 2222 msg_number % ch->remote_nentries);
2357 DBUG_ON(msg->flags & XPC_M_DONE); 2223 DBUG_ON(msg->flags & XPC_M_DONE);
2358 2224
2359 msg->flags |= XPC_M_DONE; 2225 msg->flags |= XPC_M_DONE;
@@ -2369,11 +2235,9 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2369 * been delivered. 2235 * been delivered.
2370 */ 2236 */
2371 get = ch->local_GP->get; 2237 get = ch->local_GP->get;
2372 if (get == msg_number) { 2238 if (get == msg_number)
2373 xpc_acknowledge_msgs(ch, get, msg->flags); 2239 xpc_acknowledge_msgs(ch, get, msg->flags);
2374 }
2375 2240
2376 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ 2241 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2377 xpc_msgqueue_deref(ch); 2242 xpc_msgqueue_deref(ch);
2378} 2243}
2379
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 9e0b164da9c2..f673ba90eb0e 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) support - standard version. 10 * Cross Partition Communication (XPC) support - standard version.
12 * 11 *
@@ -44,23 +43,20 @@
44 * 43 *
45 */ 44 */
46 45
47
48#include <linux/kernel.h> 46#include <linux/kernel.h>
49#include <linux/module.h> 47#include <linux/module.h>
50#include <linux/init.h> 48#include <linux/init.h>
51#include <linux/sched.h>
52#include <linux/syscalls.h>
53#include <linux/cache.h> 49#include <linux/cache.h>
54#include <linux/interrupt.h> 50#include <linux/interrupt.h>
55#include <linux/delay.h> 51#include <linux/delay.h>
56#include <linux/reboot.h> 52#include <linux/reboot.h>
57#include <linux/completion.h> 53#include <linux/completion.h>
58#include <linux/kdebug.h> 54#include <linux/kdebug.h>
55#include <linux/kthread.h>
56#include <linux/uaccess.h>
59#include <asm/sn/intr.h> 57#include <asm/sn/intr.h>
60#include <asm/sn/sn_sal.h> 58#include <asm/sn/sn_sal.h>
61#include <asm/uaccess.h> 59#include "xpc.h"
62#include <asm/sn/xpc.h>
63
64 60
65/* define two XPC debug device structures to be used with dev_dbg() et al */ 61/* define two XPC debug device structures to be used with dev_dbg() et al */
66 62
@@ -81,10 +77,8 @@ struct device xpc_chan_dbg_subname = {
81struct device *xpc_part = &xpc_part_dbg_subname; 77struct device *xpc_part = &xpc_part_dbg_subname;
82struct device *xpc_chan = &xpc_chan_dbg_subname; 78struct device *xpc_chan = &xpc_chan_dbg_subname;
83 79
84
85static int xpc_kdebug_ignore; 80static int xpc_kdebug_ignore;
86 81
87
88/* systune related variables for /proc/sys directories */ 82/* systune related variables for /proc/sys directories */
89 83
90static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; 84static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -96,61 +90,56 @@ static int xpc_hb_check_min_interval = 10;
96static int xpc_hb_check_max_interval = 120; 90static int xpc_hb_check_max_interval = 120;
97 91
98int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; 92int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
99static int xpc_disengage_request_min_timelimit = 0; 93static int xpc_disengage_request_min_timelimit; /* = 0 */
100static int xpc_disengage_request_max_timelimit = 120; 94static int xpc_disengage_request_max_timelimit = 120;
101 95
102static ctl_table xpc_sys_xpc_hb_dir[] = { 96static ctl_table xpc_sys_xpc_hb_dir[] = {
103 { 97 {
104 .ctl_name = CTL_UNNUMBERED, 98 .ctl_name = CTL_UNNUMBERED,
105 .procname = "hb_interval", 99 .procname = "hb_interval",
106 .data = &xpc_hb_interval, 100 .data = &xpc_hb_interval,
107 .maxlen = sizeof(int), 101 .maxlen = sizeof(int),
108 .mode = 0644, 102 .mode = 0644,
109 .proc_handler = &proc_dointvec_minmax, 103 .proc_handler = &proc_dointvec_minmax,
110 .strategy = &sysctl_intvec, 104 .strategy = &sysctl_intvec,
111 .extra1 = &xpc_hb_min_interval, 105 .extra1 = &xpc_hb_min_interval,
112 .extra2 = &xpc_hb_max_interval 106 .extra2 = &xpc_hb_max_interval},
113 },
114 { 107 {
115 .ctl_name = CTL_UNNUMBERED, 108 .ctl_name = CTL_UNNUMBERED,
116 .procname = "hb_check_interval", 109 .procname = "hb_check_interval",
117 .data = &xpc_hb_check_interval, 110 .data = &xpc_hb_check_interval,
118 .maxlen = sizeof(int), 111 .maxlen = sizeof(int),
119 .mode = 0644, 112 .mode = 0644,
120 .proc_handler = &proc_dointvec_minmax, 113 .proc_handler = &proc_dointvec_minmax,
121 .strategy = &sysctl_intvec, 114 .strategy = &sysctl_intvec,
122 .extra1 = &xpc_hb_check_min_interval, 115 .extra1 = &xpc_hb_check_min_interval,
123 .extra2 = &xpc_hb_check_max_interval 116 .extra2 = &xpc_hb_check_max_interval},
124 },
125 {} 117 {}
126}; 118};
127static ctl_table xpc_sys_xpc_dir[] = { 119static ctl_table xpc_sys_xpc_dir[] = {
128 { 120 {
129 .ctl_name = CTL_UNNUMBERED, 121 .ctl_name = CTL_UNNUMBERED,
130 .procname = "hb", 122 .procname = "hb",
131 .mode = 0555, 123 .mode = 0555,
132 .child = xpc_sys_xpc_hb_dir 124 .child = xpc_sys_xpc_hb_dir},
133 },
134 { 125 {
135 .ctl_name = CTL_UNNUMBERED, 126 .ctl_name = CTL_UNNUMBERED,
136 .procname = "disengage_request_timelimit", 127 .procname = "disengage_request_timelimit",
137 .data = &xpc_disengage_request_timelimit, 128 .data = &xpc_disengage_request_timelimit,
138 .maxlen = sizeof(int), 129 .maxlen = sizeof(int),
139 .mode = 0644, 130 .mode = 0644,
140 .proc_handler = &proc_dointvec_minmax, 131 .proc_handler = &proc_dointvec_minmax,
141 .strategy = &sysctl_intvec, 132 .strategy = &sysctl_intvec,
142 .extra1 = &xpc_disengage_request_min_timelimit, 133 .extra1 = &xpc_disengage_request_min_timelimit,
143 .extra2 = &xpc_disengage_request_max_timelimit 134 .extra2 = &xpc_disengage_request_max_timelimit},
144 },
145 {} 135 {}
146}; 136};
147static ctl_table xpc_sys_dir[] = { 137static ctl_table xpc_sys_dir[] = {
148 { 138 {
149 .ctl_name = CTL_UNNUMBERED, 139 .ctl_name = CTL_UNNUMBERED,
150 .procname = "xpc", 140 .procname = "xpc",
151 .mode = 0555, 141 .mode = 0555,
152 .child = xpc_sys_xpc_dir 142 .child = xpc_sys_xpc_dir},
153 },
154 {} 143 {}
155}; 144};
156static struct ctl_table_header *xpc_sysctl; 145static struct ctl_table_header *xpc_sysctl;
@@ -172,13 +161,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
172/* notification that the xpc_discovery thread has exited */ 161/* notification that the xpc_discovery thread has exited */
173static DECLARE_COMPLETION(xpc_discovery_exited); 162static DECLARE_COMPLETION(xpc_discovery_exited);
174 163
175
176static struct timer_list xpc_hb_timer; 164static struct timer_list xpc_hb_timer;
177 165
178
179static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); 166static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
180 167
181
182static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); 168static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
183static struct notifier_block xpc_reboot_notifier = { 169static struct notifier_block xpc_reboot_notifier = {
184 .notifier_call = xpc_system_reboot, 170 .notifier_call = xpc_system_reboot,
@@ -189,25 +175,22 @@ static struct notifier_block xpc_die_notifier = {
189 .notifier_call = xpc_system_die, 175 .notifier_call = xpc_system_die,
190}; 176};
191 177
192
193/* 178/*
194 * Timer function to enforce the timelimit on the partition disengage request. 179 * Timer function to enforce the timelimit on the partition disengage request.
195 */ 180 */
196static void 181static void
197xpc_timeout_partition_disengage_request(unsigned long data) 182xpc_timeout_partition_disengage_request(unsigned long data)
198{ 183{
199 struct xpc_partition *part = (struct xpc_partition *) data; 184 struct xpc_partition *part = (struct xpc_partition *)data;
200
201 185
202 DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); 186 DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
203 187
204 (void) xpc_partition_disengaged(part); 188 (void)xpc_partition_disengaged(part);
205 189
206 DBUG_ON(part->disengage_request_timeout != 0); 190 DBUG_ON(part->disengage_request_timeout != 0);
207 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); 191 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
208} 192}
209 193
210
211/* 194/*
212 * Notify the heartbeat check thread that an IRQ has been received. 195 * Notify the heartbeat check thread that an IRQ has been received.
213 */ 196 */
@@ -219,7 +202,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
219 return IRQ_HANDLED; 202 return IRQ_HANDLED;
220} 203}
221 204
222
223/* 205/*
224 * Timer to produce the heartbeat. The timer structures function is 206 * Timer to produce the heartbeat. The timer structures function is
225 * already set when this is initially called. A tunable is used to 207 * already set when this is initially called. A tunable is used to
@@ -230,15 +212,13 @@ xpc_hb_beater(unsigned long dummy)
230{ 212{
231 xpc_vars->heartbeat++; 213 xpc_vars->heartbeat++;
232 214
233 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 215 if (time_after_eq(jiffies, xpc_hb_check_timeout))
234 wake_up_interruptible(&xpc_act_IRQ_wq); 216 wake_up_interruptible(&xpc_act_IRQ_wq);
235 }
236 217
237 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); 218 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
238 add_timer(&xpc_hb_timer); 219 add_timer(&xpc_hb_timer);
239} 220}
240 221
241
242/* 222/*
243 * This thread is responsible for nearly all of the partition 223 * This thread is responsible for nearly all of the partition
244 * activation/deactivation. 224 * activation/deactivation.
@@ -248,27 +228,23 @@ xpc_hb_checker(void *ignore)
248{ 228{
249 int last_IRQ_count = 0; 229 int last_IRQ_count = 0;
250 int new_IRQ_count; 230 int new_IRQ_count;
251 int force_IRQ=0; 231 int force_IRQ = 0;
252
253 232
254 /* this thread was marked active by xpc_hb_init() */ 233 /* this thread was marked active by xpc_hb_init() */
255 234
256 daemonize(XPC_HB_CHECK_THREAD_NAME);
257
258 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
259 236
260 /* set our heartbeating to other partitions into motion */ 237 /* set our heartbeating to other partitions into motion */
261 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
262 xpc_hb_beater(0); 239 xpc_hb_beater(0);
263 240
264 while (!(volatile int) xpc_exiting) { 241 while (!xpc_exiting) {
265 242
266 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " 243 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
267 "been received\n", 244 "been received\n",
268 (int) (xpc_hb_check_timeout - jiffies), 245 (int)(xpc_hb_check_timeout - jiffies),
269 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); 246 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
270 247
271
272 /* checking of remote heartbeats is skewed by IRQ handling */ 248 /* checking of remote heartbeats is skewed by IRQ handling */
273 if (time_after_eq(jiffies, xpc_hb_check_timeout)) { 249 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
274 dev_dbg(xpc_part, "checking remote heartbeats\n"); 250 dev_dbg(xpc_part, "checking remote heartbeats\n");
@@ -282,7 +258,6 @@ xpc_hb_checker(void *ignore)
282 force_IRQ = 1; 258 force_IRQ = 1;
283 } 259 }
284 260
285
286 /* check for outstanding IRQs */ 261 /* check for outstanding IRQs */
287 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); 262 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
288 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { 263 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
@@ -294,30 +269,30 @@ xpc_hb_checker(void *ignore)
294 last_IRQ_count += xpc_identify_act_IRQ_sender(); 269 last_IRQ_count += xpc_identify_act_IRQ_sender();
295 if (last_IRQ_count < new_IRQ_count) { 270 if (last_IRQ_count < new_IRQ_count) {
296 /* retry once to help avoid missing AMO */ 271 /* retry once to help avoid missing AMO */
297 (void) xpc_identify_act_IRQ_sender(); 272 (void)xpc_identify_act_IRQ_sender();
298 } 273 }
299 last_IRQ_count = new_IRQ_count; 274 last_IRQ_count = new_IRQ_count;
300 275
301 xpc_hb_check_timeout = jiffies + 276 xpc_hb_check_timeout = jiffies +
302 (xpc_hb_check_interval * HZ); 277 (xpc_hb_check_interval * HZ);
303 } 278 }
304 279
305 /* wait for IRQ or timeout */ 280 /* wait for IRQ or timeout */
306 (void) wait_event_interruptible(xpc_act_IRQ_wq, 281 (void)wait_event_interruptible(xpc_act_IRQ_wq,
307 (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || 282 (last_IRQ_count <
308 time_after_eq(jiffies, xpc_hb_check_timeout) || 283 atomic_read(&xpc_act_IRQ_rcvd)
309 (volatile int) xpc_exiting)); 284 || time_after_eq(jiffies,
285 xpc_hb_check_timeout) ||
286 xpc_exiting));
310 } 287 }
311 288
312 dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 289 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
313 290
314
315 /* mark this thread as having exited */ 291 /* mark this thread as having exited */
316 complete(&xpc_hb_checker_exited); 292 complete(&xpc_hb_checker_exited);
317 return 0; 293 return 0;
318} 294}
319 295
320
321/* 296/*
322 * This thread will attempt to discover other partitions to activate 297 * This thread will attempt to discover other partitions to activate
323 * based on info provided by SAL. This new thread is short lived and 298 * based on info provided by SAL. This new thread is short lived and
@@ -326,8 +301,6 @@ xpc_hb_checker(void *ignore)
326static int 301static int
327xpc_initiate_discovery(void *ignore) 302xpc_initiate_discovery(void *ignore)
328{ 303{
329 daemonize(XPC_DISCOVERY_THREAD_NAME);
330
331 xpc_discovery(); 304 xpc_discovery();
332 305
333 dev_dbg(xpc_part, "discovery thread is exiting\n"); 306 dev_dbg(xpc_part, "discovery thread is exiting\n");
@@ -337,7 +310,6 @@ xpc_initiate_discovery(void *ignore)
337 return 0; 310 return 0;
338} 311}
339 312
340
341/* 313/*
342 * Establish first contact with the remote partititon. This involves pulling 314 * Establish first contact with the remote partititon. This involves pulling
343 * the XPC per partition variables from the remote partition and waiting for 315 * the XPC per partition variables from the remote partition and waiting for
@@ -348,7 +320,6 @@ xpc_make_first_contact(struct xpc_partition *part)
348{ 320{
349 enum xpc_retval ret; 321 enum xpc_retval ret;
350 322
351
352 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { 323 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
353 if (ret != xpcRetry) { 324 if (ret != xpcRetry) {
354 XPC_DEACTIVATE_PARTITION(part, ret); 325 XPC_DEACTIVATE_PARTITION(part, ret);
@@ -359,17 +330,15 @@ xpc_make_first_contact(struct xpc_partition *part)
359 "partition %d\n", XPC_PARTID(part)); 330 "partition %d\n", XPC_PARTID(part));
360 331
361 /* wait a 1/4 of a second or so */ 332 /* wait a 1/4 of a second or so */
362 (void) msleep_interruptible(250); 333 (void)msleep_interruptible(250);
363 334
364 if (part->act_state == XPC_P_DEACTIVATING) { 335 if (part->act_state == XPC_P_DEACTIVATING)
365 return part->reason; 336 return part->reason;
366 }
367 } 337 }
368 338
369 return xpc_mark_partition_active(part); 339 return xpc_mark_partition_active(part);
370} 340}
371 341
372
373/* 342/*
374 * The first kthread assigned to a newly activated partition is the one 343 * The first kthread assigned to a newly activated partition is the one
375 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to 344 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
@@ -386,12 +355,11 @@ static void
386xpc_channel_mgr(struct xpc_partition *part) 355xpc_channel_mgr(struct xpc_partition *part)
387{ 356{
388 while (part->act_state != XPC_P_DEACTIVATING || 357 while (part->act_state != XPC_P_DEACTIVATING ||
389 atomic_read(&part->nchannels_active) > 0 || 358 atomic_read(&part->nchannels_active) > 0 ||
390 !xpc_partition_disengaged(part)) { 359 !xpc_partition_disengaged(part)) {
391 360
392 xpc_process_channel_activity(part); 361 xpc_process_channel_activity(part);
393 362
394
395 /* 363 /*
396 * Wait until we've been requested to activate kthreads or 364 * Wait until we've been requested to activate kthreads or
397 * all of the channel's message queues have been torn down or 365 * all of the channel's message queues have been torn down or
@@ -406,21 +374,16 @@ xpc_channel_mgr(struct xpc_partition *part)
406 * wake him up. 374 * wake him up.
407 */ 375 */
408 atomic_dec(&part->channel_mgr_requests); 376 atomic_dec(&part->channel_mgr_requests);
409 (void) wait_event_interruptible(part->channel_mgr_wq, 377 (void)wait_event_interruptible(part->channel_mgr_wq,
410 (atomic_read(&part->channel_mgr_requests) > 0 || 378 (atomic_read(&part->channel_mgr_requests) > 0 ||
411 (volatile u64) part->local_IPI_amo != 0 || 379 part->local_IPI_amo != 0 ||
412 ((volatile u8) part->act_state == 380 (part->act_state == XPC_P_DEACTIVATING &&
413 XPC_P_DEACTIVATING && 381 atomic_read(&part->nchannels_active) == 0 &&
414 atomic_read(&part->nchannels_active) == 0 && 382 xpc_partition_disengaged(part))));
415 xpc_partition_disengaged(part))));
416 atomic_set(&part->channel_mgr_requests, 1); 383 atomic_set(&part->channel_mgr_requests, 1);
417
418 // >>> Does it need to wakeup periodically as well? In case we
419 // >>> miscalculated the #of kthreads to wakeup or create?
420 } 384 }
421} 385}
422 386
423
424/* 387/*
425 * When XPC HB determines that a partition has come up, it will create a new 388 * When XPC HB determines that a partition has come up, it will create a new
426 * kthread and that kthread will call this function to attempt to set up the 389 * kthread and that kthread will call this function to attempt to set up the
@@ -443,9 +406,8 @@ xpc_partition_up(struct xpc_partition *part)
443 406
444 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); 407 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
445 408
446 if (xpc_setup_infrastructure(part) != xpcSuccess) { 409 if (xpc_setup_infrastructure(part) != xpcSuccess)
447 return; 410 return;
448 }
449 411
450 /* 412 /*
451 * The kthread that XPC HB called us with will become the 413 * The kthread that XPC HB called us with will become the
@@ -454,27 +416,22 @@ xpc_partition_up(struct xpc_partition *part)
454 * has been dismantled. 416 * has been dismantled.
455 */ 417 */
456 418
457 (void) xpc_part_ref(part); /* this will always succeed */ 419 (void)xpc_part_ref(part); /* this will always succeed */
458 420
459 if (xpc_make_first_contact(part) == xpcSuccess) { 421 if (xpc_make_first_contact(part) == xpcSuccess)
460 xpc_channel_mgr(part); 422 xpc_channel_mgr(part);
461 }
462 423
463 xpc_part_deref(part); 424 xpc_part_deref(part);
464 425
465 xpc_teardown_infrastructure(part); 426 xpc_teardown_infrastructure(part);
466} 427}
467 428
468
469static int 429static int
470xpc_activating(void *__partid) 430xpc_activating(void *__partid)
471{ 431{
472 partid_t partid = (u64) __partid; 432 partid_t partid = (u64)__partid;
473 struct xpc_partition *part = &xpc_partitions[partid]; 433 struct xpc_partition *part = &xpc_partitions[partid];
474 unsigned long irq_flags; 434 unsigned long irq_flags;
475 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
476 int ret;
477
478 435
479 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 436 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
480 437
@@ -496,21 +453,6 @@ xpc_activating(void *__partid)
496 453
497 dev_dbg(xpc_part, "bringing partition %d up\n", partid); 454 dev_dbg(xpc_part, "bringing partition %d up\n", partid);
498 455
499 daemonize("xpc%02d", partid);
500
501 /*
502 * This thread needs to run at a realtime priority to prevent a
503 * significant performance degradation.
504 */
505 ret = sched_setscheduler(current, SCHED_FIFO, &param);
506 if (ret != 0) {
507 dev_warn(xpc_part, "unable to set pid %d to a realtime "
508 "priority, ret=%d\n", current->pid, ret);
509 }
510
511 /* allow this thread and its children to run on any CPU */
512 set_cpus_allowed(current, CPU_MASK_ALL);
513
514 /* 456 /*
515 * Register the remote partition's AMOs with SAL so it can handle 457 * Register the remote partition's AMOs with SAL so it can handle
516 * and cleanup errors within that address range should the remote 458 * and cleanup errors within that address range should the remote
@@ -522,9 +464,9 @@ xpc_activating(void *__partid)
522 * reloads and system reboots. 464 * reloads and system reboots.
523 */ 465 */
524 if (sn_register_xp_addr_region(part->remote_amos_page_pa, 466 if (sn_register_xp_addr_region(part->remote_amos_page_pa,
525 PAGE_SIZE, 1) < 0) { 467 PAGE_SIZE, 1) < 0) {
526 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " 468 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
527 "xp_addr region\n", partid); 469 "xp_addr region\n", partid);
528 470
529 spin_lock_irqsave(&part->act_lock, irq_flags); 471 spin_lock_irqsave(&part->act_lock, irq_flags);
530 part->act_state = XPC_P_INACTIVE; 472 part->act_state = XPC_P_INACTIVE;
@@ -537,12 +479,11 @@ xpc_activating(void *__partid)
537 xpc_allow_hb(partid, xpc_vars); 479 xpc_allow_hb(partid, xpc_vars);
538 xpc_IPI_send_activated(part); 480 xpc_IPI_send_activated(part);
539 481
540
541 /* 482 /*
542 * xpc_partition_up() holds this thread and marks this partition as 483 * xpc_partition_up() holds this thread and marks this partition as
543 * XPC_P_ACTIVE by calling xpc_hb_mark_active(). 484 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
544 */ 485 */
545 (void) xpc_partition_up(part); 486 (void)xpc_partition_up(part);
546 487
547 xpc_disallow_hb(partid, xpc_vars); 488 xpc_disallow_hb(partid, xpc_vars);
548 xpc_mark_partition_inactive(part); 489 xpc_mark_partition_inactive(part);
@@ -555,14 +496,12 @@ xpc_activating(void *__partid)
555 return 0; 496 return 0;
556} 497}
557 498
558
559void 499void
560xpc_activate_partition(struct xpc_partition *part) 500xpc_activate_partition(struct xpc_partition *part)
561{ 501{
562 partid_t partid = XPC_PARTID(part); 502 partid_t partid = XPC_PARTID(part);
563 unsigned long irq_flags; 503 unsigned long irq_flags;
564 pid_t pid; 504 struct task_struct *kthread;
565
566 505
567 spin_lock_irqsave(&part->act_lock, irq_flags); 506 spin_lock_irqsave(&part->act_lock, irq_flags);
568 507
@@ -573,9 +512,9 @@ xpc_activate_partition(struct xpc_partition *part)
573 512
574 spin_unlock_irqrestore(&part->act_lock, irq_flags); 513 spin_unlock_irqrestore(&part->act_lock, irq_flags);
575 514
576 pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); 515 kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
577 516 partid);
578 if (unlikely(pid <= 0)) { 517 if (IS_ERR(kthread)) {
579 spin_lock_irqsave(&part->act_lock, irq_flags); 518 spin_lock_irqsave(&part->act_lock, irq_flags);
580 part->act_state = XPC_P_INACTIVE; 519 part->act_state = XPC_P_INACTIVE;
581 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); 520 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
@@ -583,12 +522,11 @@ xpc_activate_partition(struct xpc_partition *part)
583 } 522 }
584} 523}
585 524
586
587/* 525/*
588 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 526 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
589 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 527 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
590 * than one partition, we use an AMO_t structure per partition to indicate 528 * than one partition, we use an AMO_t structure per partition to indicate
591 * whether a partition has sent an IPI or not. >>> If it has, then wake up the 529 * whether a partition has sent an IPI or not. If it has, then wake up the
592 * associated kthread to handle it. 530 * associated kthread to handle it.
593 * 531 *
594 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC 532 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
@@ -603,10 +541,9 @@ xpc_activate_partition(struct xpc_partition *part)
603irqreturn_t 541irqreturn_t
604xpc_notify_IRQ_handler(int irq, void *dev_id) 542xpc_notify_IRQ_handler(int irq, void *dev_id)
605{ 543{
606 partid_t partid = (partid_t) (u64) dev_id; 544 partid_t partid = (partid_t) (u64)dev_id;
607 struct xpc_partition *part = &xpc_partitions[partid]; 545 struct xpc_partition *part = &xpc_partitions[partid];
608 546
609
610 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 547 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
611 548
612 if (xpc_part_ref(part)) { 549 if (xpc_part_ref(part)) {
@@ -617,7 +554,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id)
617 return IRQ_HANDLED; 554 return IRQ_HANDLED;
618} 555}
619 556
620
621/* 557/*
622 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor 558 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
623 * because the write to their associated IPI amo completed after the IRQ/IPI 559 * because the write to their associated IPI amo completed after the IRQ/IPI
@@ -630,13 +566,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
630 xpc_check_for_channel_activity(part); 566 xpc_check_for_channel_activity(part);
631 567
632 part->dropped_IPI_timer.expires = jiffies + 568 part->dropped_IPI_timer.expires = jiffies +
633 XPC_P_DROPPED_IPI_WAIT; 569 XPC_P_DROPPED_IPI_WAIT;
634 add_timer(&part->dropped_IPI_timer); 570 add_timer(&part->dropped_IPI_timer);
635 xpc_part_deref(part); 571 xpc_part_deref(part);
636 } 572 }
637} 573}
638 574
639
640void 575void
641xpc_activate_kthreads(struct xpc_channel *ch, int needed) 576xpc_activate_kthreads(struct xpc_channel *ch, int needed)
642{ 577{
@@ -644,7 +579,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
644 int assigned = atomic_read(&ch->kthreads_assigned); 579 int assigned = atomic_read(&ch->kthreads_assigned);
645 int wakeup; 580 int wakeup;
646 581
647
648 DBUG_ON(needed <= 0); 582 DBUG_ON(needed <= 0);
649 583
650 if (idle > 0) { 584 if (idle > 0) {
@@ -658,16 +592,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
658 wake_up_nr(&ch->idle_wq, wakeup); 592 wake_up_nr(&ch->idle_wq, wakeup);
659 } 593 }
660 594
661 if (needed <= 0) { 595 if (needed <= 0)
662 return; 596 return;
663 }
664 597
665 if (needed + assigned > ch->kthreads_assigned_limit) { 598 if (needed + assigned > ch->kthreads_assigned_limit) {
666 needed = ch->kthreads_assigned_limit - assigned; 599 needed = ch->kthreads_assigned_limit - assigned;
667 // >>>should never be less than 0 600 if (needed <= 0)
668 if (needed <= 0) {
669 return; 601 return;
670 }
671 } 602 }
672 603
673 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 604 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
@@ -676,7 +607,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
676 xpc_create_kthreads(ch, needed, 0); 607 xpc_create_kthreads(ch, needed, 0);
677} 608}
678 609
679
680/* 610/*
681 * This function is where XPC's kthreads wait for messages to deliver. 611 * This function is where XPC's kthreads wait for messages to deliver.
682 */ 612 */
@@ -686,15 +616,13 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
686 do { 616 do {
687 /* deliver messages to their intended recipients */ 617 /* deliver messages to their intended recipients */
688 618
689 while ((volatile s64) ch->w_local_GP.get < 619 while (ch->w_local_GP.get < ch->w_remote_GP.put &&
690 (volatile s64) ch->w_remote_GP.put && 620 !(ch->flags & XPC_C_DISCONNECTING)) {
691 !((volatile u32) ch->flags &
692 XPC_C_DISCONNECTING)) {
693 xpc_deliver_msg(ch); 621 xpc_deliver_msg(ch);
694 } 622 }
695 623
696 if (atomic_inc_return(&ch->kthreads_idle) > 624 if (atomic_inc_return(&ch->kthreads_idle) >
697 ch->kthreads_idle_limit) { 625 ch->kthreads_idle_limit) {
698 /* too many idle kthreads on this channel */ 626 /* too many idle kthreads on this channel */
699 atomic_dec(&ch->kthreads_idle); 627 atomic_dec(&ch->kthreads_idle);
700 break; 628 break;
@@ -703,20 +631,17 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
703 dev_dbg(xpc_chan, "idle kthread calling " 631 dev_dbg(xpc_chan, "idle kthread calling "
704 "wait_event_interruptible_exclusive()\n"); 632 "wait_event_interruptible_exclusive()\n");
705 633
706 (void) wait_event_interruptible_exclusive(ch->idle_wq, 634 (void)wait_event_interruptible_exclusive(ch->idle_wq,
707 ((volatile s64) ch->w_local_GP.get < 635 (ch->w_local_GP.get < ch->w_remote_GP.put ||
708 (volatile s64) ch->w_remote_GP.put || 636 (ch->flags & XPC_C_DISCONNECTING)));
709 ((volatile u32) ch->flags &
710 XPC_C_DISCONNECTING)));
711 637
712 atomic_dec(&ch->kthreads_idle); 638 atomic_dec(&ch->kthreads_idle);
713 639
714 } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); 640 } while (!(ch->flags & XPC_C_DISCONNECTING));
715} 641}
716 642
717
718static int 643static int
719xpc_daemonize_kthread(void *args) 644xpc_kthread_start(void *args)
720{ 645{
721 partid_t partid = XPC_UNPACK_ARG1(args); 646 partid_t partid = XPC_UNPACK_ARG1(args);
722 u16 ch_number = XPC_UNPACK_ARG2(args); 647 u16 ch_number = XPC_UNPACK_ARG2(args);
@@ -725,9 +650,6 @@ xpc_daemonize_kthread(void *args)
725 int n_needed; 650 int n_needed;
726 unsigned long irq_flags; 651 unsigned long irq_flags;
727 652
728
729 daemonize("xpc%02dc%d", partid, ch_number);
730
731 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", 653 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
732 partid, ch_number); 654 partid, ch_number);
733 655
@@ -756,10 +678,9 @@ xpc_daemonize_kthread(void *args)
756 * need one less than total #of messages to deliver. 678 * need one less than total #of messages to deliver.
757 */ 679 */
758 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 680 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
759 if (n_needed > 0 && 681 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
760 !(ch->flags & XPC_C_DISCONNECTING)) {
761 xpc_activate_kthreads(ch, n_needed); 682 xpc_activate_kthreads(ch, n_needed);
762 } 683
763 } else { 684 } else {
764 spin_unlock_irqrestore(&ch->lock, irq_flags); 685 spin_unlock_irqrestore(&ch->lock, irq_flags);
765 } 686 }
@@ -771,7 +692,7 @@ xpc_daemonize_kthread(void *args)
771 692
772 spin_lock_irqsave(&ch->lock, irq_flags); 693 spin_lock_irqsave(&ch->lock, irq_flags);
773 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 694 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
774 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 695 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
775 ch->flags |= XPC_C_DISCONNECTINGCALLOUT; 696 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
776 spin_unlock_irqrestore(&ch->lock, irq_flags); 697 spin_unlock_irqrestore(&ch->lock, irq_flags);
777 698
@@ -798,7 +719,6 @@ xpc_daemonize_kthread(void *args)
798 return 0; 719 return 0;
799} 720}
800 721
801
802/* 722/*
803 * For each partition that XPC has established communications with, there is 723 * For each partition that XPC has established communications with, there is
804 * a minimum of one kernel thread assigned to perform any operation that 724 * a minimum of one kernel thread assigned to perform any operation that
@@ -813,13 +733,12 @@ xpc_daemonize_kthread(void *args)
813 */ 733 */
814void 734void
815xpc_create_kthreads(struct xpc_channel *ch, int needed, 735xpc_create_kthreads(struct xpc_channel *ch, int needed,
816 int ignore_disconnecting) 736 int ignore_disconnecting)
817{ 737{
818 unsigned long irq_flags; 738 unsigned long irq_flags;
819 pid_t pid;
820 u64 args = XPC_PACK_ARGS(ch->partid, ch->number); 739 u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
821 struct xpc_partition *part = &xpc_partitions[ch->partid]; 740 struct xpc_partition *part = &xpc_partitions[ch->partid];
822 741 struct task_struct *kthread;
823 742
824 while (needed-- > 0) { 743 while (needed-- > 0) {
825 744
@@ -832,7 +751,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
832 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { 751 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
833 /* kthreads assigned had gone to zero */ 752 /* kthreads assigned had gone to zero */
834 BUG_ON(!(ch->flags & 753 BUG_ON(!(ch->flags &
835 XPC_C_DISCONNECTINGCALLOUT_MADE)); 754 XPC_C_DISCONNECTINGCALLOUT_MADE));
836 break; 755 break;
837 } 756 }
838 757
@@ -843,11 +762,12 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
843 if (atomic_inc_return(&part->nchannels_engaged) == 1) 762 if (atomic_inc_return(&part->nchannels_engaged) == 1)
844 xpc_mark_partition_engaged(part); 763 xpc_mark_partition_engaged(part);
845 } 764 }
846 (void) xpc_part_ref(part); 765 (void)xpc_part_ref(part);
847 xpc_msgqueue_ref(ch); 766 xpc_msgqueue_ref(ch);
848 767
849 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 768 kthread = kthread_run(xpc_kthread_start, (void *)args,
850 if (pid < 0) { 769 "xpc%02dc%d", ch->partid, ch->number);
770 if (IS_ERR(kthread)) {
851 /* the fork failed */ 771 /* the fork failed */
852 772
853 /* 773 /*
@@ -857,7 +777,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
857 * to this channel are blocked in the channel's 777 * to this channel are blocked in the channel's
858 * registerer, because the only thing that will unblock 778 * registerer, because the only thing that will unblock
859 * them is the xpcDisconnecting callout that this 779 * them is the xpcDisconnecting callout that this
860 * failed kernel_thread would have made. 780 * failed kthread_run() would have made.
861 */ 781 */
862 782
863 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 783 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
@@ -869,7 +789,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
869 xpc_part_deref(part); 789 xpc_part_deref(part);
870 790
871 if (atomic_read(&ch->kthreads_assigned) < 791 if (atomic_read(&ch->kthreads_assigned) <
872 ch->kthreads_idle_limit) { 792 ch->kthreads_idle_limit) {
873 /* 793 /*
874 * Flag this as an error only if we have an 794 * Flag this as an error only if we have an
875 * insufficient #of kthreads for the channel 795 * insufficient #of kthreads for the channel
@@ -877,17 +797,14 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
877 */ 797 */
878 spin_lock_irqsave(&ch->lock, irq_flags); 798 spin_lock_irqsave(&ch->lock, irq_flags);
879 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 799 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
880 &irq_flags); 800 &irq_flags);
881 spin_unlock_irqrestore(&ch->lock, irq_flags); 801 spin_unlock_irqrestore(&ch->lock, irq_flags);
882 } 802 }
883 break; 803 break;
884 } 804 }
885
886 ch->kthreads_created++; // >>> temporary debug only!!!
887 } 805 }
888} 806}
889 807
890
891void 808void
892xpc_disconnect_wait(int ch_number) 809xpc_disconnect_wait(int ch_number)
893{ 810{
@@ -897,14 +814,12 @@ xpc_disconnect_wait(int ch_number)
897 struct xpc_channel *ch; 814 struct xpc_channel *ch;
898 int wakeup_channel_mgr; 815 int wakeup_channel_mgr;
899 816
900
901 /* now wait for all callouts to the caller's function to cease */ 817 /* now wait for all callouts to the caller's function to cease */
902 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 818 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
903 part = &xpc_partitions[partid]; 819 part = &xpc_partitions[partid];
904 820
905 if (!xpc_part_ref(part)) { 821 if (!xpc_part_ref(part))
906 continue; 822 continue;
907 }
908 823
909 ch = &part->channels[ch_number]; 824 ch = &part->channels[ch_number];
910 825
@@ -923,7 +838,8 @@ xpc_disconnect_wait(int ch_number)
923 if (part->act_state != XPC_P_DEACTIVATING) { 838 if (part->act_state != XPC_P_DEACTIVATING) {
924 spin_lock(&part->IPI_lock); 839 spin_lock(&part->IPI_lock);
925 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 840 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
926 ch->number, ch->delayed_IPI_flags); 841 ch->number,
842 ch->delayed_IPI_flags);
927 spin_unlock(&part->IPI_lock); 843 spin_unlock(&part->IPI_lock);
928 wakeup_channel_mgr = 1; 844 wakeup_channel_mgr = 1;
929 } 845 }
@@ -933,15 +849,13 @@ xpc_disconnect_wait(int ch_number)
933 ch->flags &= ~XPC_C_WDISCONNECT; 849 ch->flags &= ~XPC_C_WDISCONNECT;
934 spin_unlock_irqrestore(&ch->lock, irq_flags); 850 spin_unlock_irqrestore(&ch->lock, irq_flags);
935 851
936 if (wakeup_channel_mgr) { 852 if (wakeup_channel_mgr)
937 xpc_wakeup_channel_mgr(part); 853 xpc_wakeup_channel_mgr(part);
938 }
939 854
940 xpc_part_deref(part); 855 xpc_part_deref(part);
941 } 856 }
942} 857}
943 858
944
945static void 859static void
946xpc_do_exit(enum xpc_retval reason) 860xpc_do_exit(enum xpc_retval reason)
947{ 861{
@@ -950,7 +864,6 @@ xpc_do_exit(enum xpc_retval reason)
950 struct xpc_partition *part; 864 struct xpc_partition *part;
951 unsigned long printmsg_time, disengage_request_timeout = 0; 865 unsigned long printmsg_time, disengage_request_timeout = 0;
952 866
953
954 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 867 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
955 DBUG_ON(xpc_exiting == 1); 868 DBUG_ON(xpc_exiting == 1);
956 869
@@ -971,10 +884,8 @@ xpc_do_exit(enum xpc_retval reason)
971 /* wait for the heartbeat checker thread to exit */ 884 /* wait for the heartbeat checker thread to exit */
972 wait_for_completion(&xpc_hb_checker_exited); 885 wait_for_completion(&xpc_hb_checker_exited);
973 886
974
975 /* sleep for a 1/3 of a second or so */ 887 /* sleep for a 1/3 of a second or so */
976 (void) msleep_interruptible(300); 888 (void)msleep_interruptible(300);
977
978 889
979 /* wait for all partitions to become inactive */ 890 /* wait for all partitions to become inactive */
980 891
@@ -988,7 +899,7 @@ xpc_do_exit(enum xpc_retval reason)
988 part = &xpc_partitions[partid]; 899 part = &xpc_partitions[partid];
989 900
990 if (xpc_partition_disengaged(part) && 901 if (xpc_partition_disengaged(part) &&
991 part->act_state == XPC_P_INACTIVE) { 902 part->act_state == XPC_P_INACTIVE) {
992 continue; 903 continue;
993 } 904 }
994 905
@@ -997,47 +908,46 @@ xpc_do_exit(enum xpc_retval reason)
997 XPC_DEACTIVATE_PARTITION(part, reason); 908 XPC_DEACTIVATE_PARTITION(part, reason);
998 909
999 if (part->disengage_request_timeout > 910 if (part->disengage_request_timeout >
1000 disengage_request_timeout) { 911 disengage_request_timeout) {
1001 disengage_request_timeout = 912 disengage_request_timeout =
1002 part->disengage_request_timeout; 913 part->disengage_request_timeout;
1003 } 914 }
1004 } 915 }
1005 916
1006 if (xpc_partition_engaged(-1UL)) { 917 if (xpc_partition_engaged(-1UL)) {
1007 if (time_after(jiffies, printmsg_time)) { 918 if (time_after(jiffies, printmsg_time)) {
1008 dev_info(xpc_part, "waiting for remote " 919 dev_info(xpc_part, "waiting for remote "
1009 "partitions to disengage, timeout in " 920 "partitions to disengage, timeout in "
1010 "%ld seconds\n", 921 "%ld seconds\n",
1011 (disengage_request_timeout - jiffies) 922 (disengage_request_timeout - jiffies)
1012 / HZ); 923 / HZ);
1013 printmsg_time = jiffies + 924 printmsg_time = jiffies +
1014 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 925 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
1015 printed_waiting_msg = 1; 926 printed_waiting_msg = 1;
1016 } 927 }
1017 928
1018 } else if (active_part_count > 0) { 929 } else if (active_part_count > 0) {
1019 if (printed_waiting_msg) { 930 if (printed_waiting_msg) {
1020 dev_info(xpc_part, "waiting for local partition" 931 dev_info(xpc_part, "waiting for local partition"
1021 " to disengage\n"); 932 " to disengage\n");
1022 printed_waiting_msg = 0; 933 printed_waiting_msg = 0;
1023 } 934 }
1024 935
1025 } else { 936 } else {
1026 if (!xpc_disengage_request_timedout) { 937 if (!xpc_disengage_request_timedout) {
1027 dev_info(xpc_part, "all partitions have " 938 dev_info(xpc_part, "all partitions have "
1028 "disengaged\n"); 939 "disengaged\n");
1029 } 940 }
1030 break; 941 break;
1031 } 942 }
1032 943
1033 /* sleep for a 1/3 of a second or so */ 944 /* sleep for a 1/3 of a second or so */
1034 (void) msleep_interruptible(300); 945 (void)msleep_interruptible(300);
1035 946
1036 } while (1); 947 } while (1);
1037 948
1038 DBUG_ON(xpc_partition_engaged(-1UL)); 949 DBUG_ON(xpc_partition_engaged(-1UL));
1039 950
1040
1041 /* indicate to others that our reserved page is uninitialized */ 951 /* indicate to others that our reserved page is uninitialized */
1042 xpc_rsvd_page->vars_pa = 0; 952 xpc_rsvd_page->vars_pa = 0;
1043 953
@@ -1047,27 +957,24 @@ xpc_do_exit(enum xpc_retval reason)
1047 957
1048 if (reason == xpcUnloading) { 958 if (reason == xpcUnloading) {
1049 /* take ourselves off of the reboot_notifier_list */ 959 /* take ourselves off of the reboot_notifier_list */
1050 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 960 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1051 961
1052 /* take ourselves off of the die_notifier list */ 962 /* take ourselves off of the die_notifier list */
1053 (void) unregister_die_notifier(&xpc_die_notifier); 963 (void)unregister_die_notifier(&xpc_die_notifier);
1054 } 964 }
1055 965
1056 /* close down protections for IPI operations */ 966 /* close down protections for IPI operations */
1057 xpc_restrict_IPI_ops(); 967 xpc_restrict_IPI_ops();
1058 968
1059
1060 /* clear the interface to XPC's functions */ 969 /* clear the interface to XPC's functions */
1061 xpc_clear_interface(); 970 xpc_clear_interface();
1062 971
1063 if (xpc_sysctl) { 972 if (xpc_sysctl)
1064 unregister_sysctl_table(xpc_sysctl); 973 unregister_sysctl_table(xpc_sysctl);
1065 }
1066 974
1067 kfree(xpc_remote_copy_buffer_base); 975 kfree(xpc_remote_copy_buffer_base);
1068} 976}
1069 977
1070
1071/* 978/*
1072 * This function is called when the system is being rebooted. 979 * This function is called when the system is being rebooted.
1073 */ 980 */
@@ -1076,7 +983,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1076{ 983{
1077 enum xpc_retval reason; 984 enum xpc_retval reason;
1078 985
1079
1080 switch (event) { 986 switch (event) {
1081 case SYS_RESTART: 987 case SYS_RESTART:
1082 reason = xpcSystemReboot; 988 reason = xpcSystemReboot;
@@ -1095,7 +1001,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1095 return NOTIFY_DONE; 1001 return NOTIFY_DONE;
1096} 1002}
1097 1003
1098
1099/* 1004/*
1100 * Notify other partitions to disengage from all references to our memory. 1005 * Notify other partitions to disengage from all references to our memory.
1101 */ 1006 */
@@ -1107,17 +1012,16 @@ xpc_die_disengage(void)
1107 unsigned long engaged; 1012 unsigned long engaged;
1108 long time, printmsg_time, disengage_request_timeout; 1013 long time, printmsg_time, disengage_request_timeout;
1109 1014
1110
1111 /* keep xpc_hb_checker thread from doing anything (just in case) */ 1015 /* keep xpc_hb_checker thread from doing anything (just in case) */
1112 xpc_exiting = 1; 1016 xpc_exiting = 1;
1113 1017
1114 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ 1018 xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
1115 1019
1116 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1020 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1117 part = &xpc_partitions[partid]; 1021 part = &xpc_partitions[partid];
1118 1022
1119 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 1023 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1120 remote_vars_version)) { 1024 remote_vars_version)) {
1121 1025
1122 /* just in case it was left set by an earlier XPC */ 1026 /* just in case it was left set by an earlier XPC */
1123 xpc_clear_partition_engaged(1UL << partid); 1027 xpc_clear_partition_engaged(1UL << partid);
@@ -1125,7 +1029,7 @@ xpc_die_disengage(void)
1125 } 1029 }
1126 1030
1127 if (xpc_partition_engaged(1UL << partid) || 1031 if (xpc_partition_engaged(1UL << partid) ||
1128 part->act_state != XPC_P_INACTIVE) { 1032 part->act_state != XPC_P_INACTIVE) {
1129 xpc_request_partition_disengage(part); 1033 xpc_request_partition_disengage(part);
1130 xpc_mark_partition_disengaged(part); 1034 xpc_mark_partition_disengaged(part);
1131 xpc_IPI_send_disengage(part); 1035 xpc_IPI_send_disengage(part);
@@ -1134,9 +1038,9 @@ xpc_die_disengage(void)
1134 1038
1135 time = rtc_time(); 1039 time = rtc_time();
1136 printmsg_time = time + 1040 printmsg_time = time +
1137 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 1041 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1138 disengage_request_timeout = time + 1042 disengage_request_timeout = time +
1139 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1043 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1140 1044
1141 /* wait for all other partitions to disengage from us */ 1045 /* wait for all other partitions to disengage from us */
1142 1046
@@ -1152,8 +1056,8 @@ xpc_die_disengage(void)
1152 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1056 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1153 if (engaged & (1UL << partid)) { 1057 if (engaged & (1UL << partid)) {
1154 dev_info(xpc_part, "disengage from " 1058 dev_info(xpc_part, "disengage from "
1155 "remote partition %d timed " 1059 "remote partition %d timed "
1156 "out\n", partid); 1060 "out\n", partid);
1157 } 1061 }
1158 } 1062 }
1159 break; 1063 break;
@@ -1161,17 +1065,16 @@ xpc_die_disengage(void)
1161 1065
1162 if (time >= printmsg_time) { 1066 if (time >= printmsg_time) {
1163 dev_info(xpc_part, "waiting for remote partitions to " 1067 dev_info(xpc_part, "waiting for remote partitions to "
1164 "disengage, timeout in %ld seconds\n", 1068 "disengage, timeout in %ld seconds\n",
1165 (disengage_request_timeout - time) / 1069 (disengage_request_timeout - time) /
1166 sn_rtc_cycles_per_second); 1070 sn_rtc_cycles_per_second);
1167 printmsg_time = time + 1071 printmsg_time = time +
1168 (XPC_DISENGAGE_PRINTMSG_INTERVAL * 1072 (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1169 sn_rtc_cycles_per_second); 1073 sn_rtc_cycles_per_second);
1170 } 1074 }
1171 } 1075 }
1172} 1076}
1173 1077
1174
1175/* 1078/*
1176 * This function is called when the system is being restarted or halted due 1079 * This function is called when the system is being restarted or halted due
1177 * to some sort of system failure. If this is the case we need to notify the 1080 * to some sort of system failure. If this is the case we need to notify the
@@ -1191,9 +1094,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1191 1094
1192 case DIE_KDEBUG_ENTER: 1095 case DIE_KDEBUG_ENTER:
1193 /* Should lack of heartbeat be ignored by other partitions? */ 1096 /* Should lack of heartbeat be ignored by other partitions? */
1194 if (!xpc_kdebug_ignore) { 1097 if (!xpc_kdebug_ignore)
1195 break; 1098 break;
1196 } 1099
1197 /* fall through */ 1100 /* fall through */
1198 case DIE_MCA_MONARCH_ENTER: 1101 case DIE_MCA_MONARCH_ENTER:
1199 case DIE_INIT_MONARCH_ENTER: 1102 case DIE_INIT_MONARCH_ENTER:
@@ -1203,9 +1106,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1203 1106
1204 case DIE_KDEBUG_LEAVE: 1107 case DIE_KDEBUG_LEAVE:
1205 /* Is lack of heartbeat being ignored by other partitions? */ 1108 /* Is lack of heartbeat being ignored by other partitions? */
1206 if (!xpc_kdebug_ignore) { 1109 if (!xpc_kdebug_ignore)
1207 break; 1110 break;
1208 } 1111
1209 /* fall through */ 1112 /* fall through */
1210 case DIE_MCA_MONARCH_LEAVE: 1113 case DIE_MCA_MONARCH_LEAVE:
1211 case DIE_INIT_MONARCH_LEAVE: 1114 case DIE_INIT_MONARCH_LEAVE:
@@ -1217,26 +1120,23 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1217 return NOTIFY_DONE; 1120 return NOTIFY_DONE;
1218} 1121}
1219 1122
1220
1221int __init 1123int __init
1222xpc_init(void) 1124xpc_init(void)
1223{ 1125{
1224 int ret; 1126 int ret;
1225 partid_t partid; 1127 partid_t partid;
1226 struct xpc_partition *part; 1128 struct xpc_partition *part;
1227 pid_t pid; 1129 struct task_struct *kthread;
1228 size_t buf_size; 1130 size_t buf_size;
1229 1131
1230 1132 if (!ia64_platform_is("sn2"))
1231 if (!ia64_platform_is("sn2")) {
1232 return -ENODEV; 1133 return -ENODEV;
1233 }
1234
1235 1134
1236 buf_size = max(XPC_RP_VARS_SIZE, 1135 buf_size = max(XPC_RP_VARS_SIZE,
1237 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); 1136 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1238 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, 1137 xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1239 GFP_KERNEL, &xpc_remote_copy_buffer_base); 1138 GFP_KERNEL,
1139 &xpc_remote_copy_buffer_base);
1240 if (xpc_remote_copy_buffer == NULL) 1140 if (xpc_remote_copy_buffer == NULL)
1241 return -ENOMEM; 1141 return -ENOMEM;
1242 1142
@@ -1256,7 +1156,7 @@ xpc_init(void)
1256 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1156 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1257 part = &xpc_partitions[partid]; 1157 part = &xpc_partitions[partid];
1258 1158
1259 DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); 1159 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1260 1160
1261 part->act_IRQ_rcvd = 0; 1161 part->act_IRQ_rcvd = 0;
1262 spin_lock_init(&part->act_lock); 1162 spin_lock_init(&part->act_lock);
@@ -1265,8 +1165,8 @@ xpc_init(void)
1265 1165
1266 init_timer(&part->disengage_request_timer); 1166 init_timer(&part->disengage_request_timer);
1267 part->disengage_request_timer.function = 1167 part->disengage_request_timer.function =
1268 xpc_timeout_partition_disengage_request; 1168 xpc_timeout_partition_disengage_request;
1269 part->disengage_request_timer.data = (unsigned long) part; 1169 part->disengage_request_timer.data = (unsigned long)part;
1270 1170
1271 part->setup_state = XPC_P_UNSET; 1171 part->setup_state = XPC_P_UNSET;
1272 init_waitqueue_head(&part->teardown_wq); 1172 init_waitqueue_head(&part->teardown_wq);
@@ -1292,16 +1192,15 @@ xpc_init(void)
1292 * but rather immediately process the interrupt. 1192 * but rather immediately process the interrupt.
1293 */ 1193 */
1294 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, 1194 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1295 "xpc hb", NULL); 1195 "xpc hb", NULL);
1296 if (ret != 0) { 1196 if (ret != 0) {
1297 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " 1197 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1298 "errno=%d\n", -ret); 1198 "errno=%d\n", -ret);
1299 1199
1300 xpc_restrict_IPI_ops(); 1200 xpc_restrict_IPI_ops();
1301 1201
1302 if (xpc_sysctl) { 1202 if (xpc_sysctl)
1303 unregister_sysctl_table(xpc_sysctl); 1203 unregister_sysctl_table(xpc_sysctl);
1304 }
1305 1204
1306 kfree(xpc_remote_copy_buffer_base); 1205 kfree(xpc_remote_copy_buffer_base);
1307 return -EBUSY; 1206 return -EBUSY;
@@ -1319,26 +1218,22 @@ xpc_init(void)
1319 free_irq(SGI_XPC_ACTIVATE, NULL); 1218 free_irq(SGI_XPC_ACTIVATE, NULL);
1320 xpc_restrict_IPI_ops(); 1219 xpc_restrict_IPI_ops();
1321 1220
1322 if (xpc_sysctl) { 1221 if (xpc_sysctl)
1323 unregister_sysctl_table(xpc_sysctl); 1222 unregister_sysctl_table(xpc_sysctl);
1324 }
1325 1223
1326 kfree(xpc_remote_copy_buffer_base); 1224 kfree(xpc_remote_copy_buffer_base);
1327 return -EBUSY; 1225 return -EBUSY;
1328 } 1226 }
1329 1227
1330
1331 /* add ourselves to the reboot_notifier_list */ 1228 /* add ourselves to the reboot_notifier_list */
1332 ret = register_reboot_notifier(&xpc_reboot_notifier); 1229 ret = register_reboot_notifier(&xpc_reboot_notifier);
1333 if (ret != 0) { 1230 if (ret != 0)
1334 dev_warn(xpc_part, "can't register reboot notifier\n"); 1231 dev_warn(xpc_part, "can't register reboot notifier\n");
1335 }
1336 1232
1337 /* add ourselves to the die_notifier list */ 1233 /* add ourselves to the die_notifier list */
1338 ret = register_die_notifier(&xpc_die_notifier); 1234 ret = register_die_notifier(&xpc_die_notifier);
1339 if (ret != 0) { 1235 if (ret != 0)
1340 dev_warn(xpc_part, "can't register die notifier\n"); 1236 dev_warn(xpc_part, "can't register die notifier\n");
1341 }
1342 1237
1343 init_timer(&xpc_hb_timer); 1238 init_timer(&xpc_hb_timer);
1344 xpc_hb_timer.function = xpc_hb_beater; 1239 xpc_hb_timer.function = xpc_hb_beater;
@@ -1347,39 +1242,38 @@ xpc_init(void)
1347 * The real work-horse behind xpc. This processes incoming 1242 * The real work-horse behind xpc. This processes incoming
1348 * interrupts and monitors remote heartbeats. 1243 * interrupts and monitors remote heartbeats.
1349 */ 1244 */
1350 pid = kernel_thread(xpc_hb_checker, NULL, 0); 1245 kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1351 if (pid < 0) { 1246 if (IS_ERR(kthread)) {
1352 dev_err(xpc_part, "failed while forking hb check thread\n"); 1247 dev_err(xpc_part, "failed while forking hb check thread\n");
1353 1248
1354 /* indicate to others that our reserved page is uninitialized */ 1249 /* indicate to others that our reserved page is uninitialized */
1355 xpc_rsvd_page->vars_pa = 0; 1250 xpc_rsvd_page->vars_pa = 0;
1356 1251
1357 /* take ourselves off of the reboot_notifier_list */ 1252 /* take ourselves off of the reboot_notifier_list */
1358 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1253 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1359 1254
1360 /* take ourselves off of the die_notifier list */ 1255 /* take ourselves off of the die_notifier list */
1361 (void) unregister_die_notifier(&xpc_die_notifier); 1256 (void)unregister_die_notifier(&xpc_die_notifier);
1362 1257
1363 del_timer_sync(&xpc_hb_timer); 1258 del_timer_sync(&xpc_hb_timer);
1364 free_irq(SGI_XPC_ACTIVATE, NULL); 1259 free_irq(SGI_XPC_ACTIVATE, NULL);
1365 xpc_restrict_IPI_ops(); 1260 xpc_restrict_IPI_ops();
1366 1261
1367 if (xpc_sysctl) { 1262 if (xpc_sysctl)
1368 unregister_sysctl_table(xpc_sysctl); 1263 unregister_sysctl_table(xpc_sysctl);
1369 }
1370 1264
1371 kfree(xpc_remote_copy_buffer_base); 1265 kfree(xpc_remote_copy_buffer_base);
1372 return -EBUSY; 1266 return -EBUSY;
1373 } 1267 }
1374 1268
1375
1376 /* 1269 /*
1377 * Startup a thread that will attempt to discover other partitions to 1270 * Startup a thread that will attempt to discover other partitions to
1378 * activate based on info provided by SAL. This new thread is short 1271 * activate based on info provided by SAL. This new thread is short
1379 * lived and will exit once discovery is complete. 1272 * lived and will exit once discovery is complete.
1380 */ 1273 */
1381 pid = kernel_thread(xpc_initiate_discovery, NULL, 0); 1274 kthread = kthread_run(xpc_initiate_discovery, NULL,
1382 if (pid < 0) { 1275 XPC_DISCOVERY_THREAD_NAME);
1276 if (IS_ERR(kthread)) {
1383 dev_err(xpc_part, "failed while forking discovery thread\n"); 1277 dev_err(xpc_part, "failed while forking discovery thread\n");
1384 1278
1385 /* mark this new thread as a non-starter */ 1279 /* mark this new thread as a non-starter */
@@ -1389,7 +1283,6 @@ xpc_init(void)
1389 return -EBUSY; 1283 return -EBUSY;
1390 } 1284 }
1391 1285
1392
1393 /* set the interface to point at XPC's functions */ 1286 /* set the interface to point at XPC's functions */
1394 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, 1287 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1395 xpc_initiate_allocate, xpc_initiate_send, 1288 xpc_initiate_allocate, xpc_initiate_send,
@@ -1398,16 +1291,16 @@ xpc_init(void)
1398 1291
1399 return 0; 1292 return 0;
1400} 1293}
1401module_init(xpc_init);
1402 1294
1295module_init(xpc_init);
1403 1296
1404void __exit 1297void __exit
1405xpc_exit(void) 1298xpc_exit(void)
1406{ 1299{
1407 xpc_do_exit(xpcUnloading); 1300 xpc_do_exit(xpcUnloading);
1408} 1301}
1409module_exit(xpc_exit);
1410 1302
1303module_exit(xpc_exit);
1411 1304
1412MODULE_AUTHOR("Silicon Graphics, Inc."); 1305MODULE_AUTHOR("Silicon Graphics, Inc.");
1413MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); 1306MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
@@ -1415,17 +1308,16 @@ MODULE_LICENSE("GPL");
1415 1308
1416module_param(xpc_hb_interval, int, 0); 1309module_param(xpc_hb_interval, int, 0);
1417MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " 1310MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1418 "heartbeat increments."); 1311 "heartbeat increments.");
1419 1312
1420module_param(xpc_hb_check_interval, int, 0); 1313module_param(xpc_hb_check_interval, int, 0);
1421MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1314MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1422 "heartbeat checks."); 1315 "heartbeat checks.");
1423 1316
1424module_param(xpc_disengage_request_timelimit, int, 0); 1317module_param(xpc_disengage_request_timelimit, int, 0);
1425MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1318MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1426 "for disengage request to complete."); 1319 "for disengage request to complete.");
1427 1320
1428module_param(xpc_kdebug_ignore, int, 0); 1321module_param(xpc_kdebug_ignore, int, 0);
1429MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1322MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1430 "other partitions when dropping into kdebug."); 1323 "other partitions when dropping into kdebug.");
1431
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 9e97c2684832..27e200ec5826 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Communication (XPC) partition support. 10 * Cross Partition Communication (XPC) partition support.
12 * 11 *
@@ -16,7 +15,6 @@
16 * 15 *
17 */ 16 */
18 17
19
20#include <linux/kernel.h> 18#include <linux/kernel.h>
21#include <linux/sysctl.h> 19#include <linux/sysctl.h>
22#include <linux/cache.h> 20#include <linux/cache.h>
@@ -28,13 +26,11 @@
28#include <asm/sn/sn_sal.h> 26#include <asm/sn/sn_sal.h>
29#include <asm/sn/nodepda.h> 27#include <asm/sn/nodepda.h>
30#include <asm/sn/addrs.h> 28#include <asm/sn/addrs.h>
31#include <asm/sn/xpc.h> 29#include "xpc.h"
32
33 30
34/* XPC is exiting flag */ 31/* XPC is exiting flag */
35int xpc_exiting; 32int xpc_exiting;
36 33
37
38/* SH_IPI_ACCESS shub register value on startup */ 34/* SH_IPI_ACCESS shub register value on startup */
39static u64 xpc_sh1_IPI_access; 35static u64 xpc_sh1_IPI_access;
40static u64 xpc_sh2_IPI_access0; 36static u64 xpc_sh2_IPI_access0;
@@ -42,11 +38,9 @@ static u64 xpc_sh2_IPI_access1;
42static u64 xpc_sh2_IPI_access2; 38static u64 xpc_sh2_IPI_access2;
43static u64 xpc_sh2_IPI_access3; 39static u64 xpc_sh2_IPI_access3;
44 40
45
46/* original protection values for each node */ 41/* original protection values for each node */
47u64 xpc_prot_vec[MAX_NUMNODES]; 42u64 xpc_prot_vec[MAX_NUMNODES];
48 43
49
50/* this partition's reserved page pointers */ 44/* this partition's reserved page pointers */
51struct xpc_rsvd_page *xpc_rsvd_page; 45struct xpc_rsvd_page *xpc_rsvd_page;
52static u64 *xpc_part_nasids; 46static u64 *xpc_part_nasids;
@@ -57,7 +51,6 @@ struct xpc_vars_part *xpc_vars_part;
57static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ 51static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
58static int xp_nasid_mask_words; /* actual size in words of nasid mask */ 52static int xp_nasid_mask_words; /* actual size in words of nasid mask */
59 53
60
61/* 54/*
62 * For performance reasons, each entry of xpc_partitions[] is cacheline 55 * For performance reasons, each entry of xpc_partitions[] is cacheline
63 * aligned. And xpc_partitions[] is padded with an additional entry at the 56 * aligned. And xpc_partitions[] is padded with an additional entry at the
@@ -66,7 +59,6 @@ static int xp_nasid_mask_words; /* actual size in words of nasid mask */
66 */ 59 */
67struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; 60struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
68 61
69
70/* 62/*
71 * Generic buffer used to store a local copy of portions of a remote 63 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask, 64 * partition's reserved page (either its header and part_nasids mask,
@@ -75,7 +67,6 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
75char *xpc_remote_copy_buffer; 67char *xpc_remote_copy_buffer;
76void *xpc_remote_copy_buffer_base; 68void *xpc_remote_copy_buffer_base;
77 69
78
79/* 70/*
80 * Guarantee that the kmalloc'd memory is cacheline aligned. 71 * Guarantee that the kmalloc'd memory is cacheline aligned.
81 */ 72 */
@@ -84,22 +75,21 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
84{ 75{
85 /* see if kmalloc will give us cachline aligned memory by default */ 76 /* see if kmalloc will give us cachline aligned memory by default */
86 *base = kmalloc(size, flags); 77 *base = kmalloc(size, flags);
87 if (*base == NULL) { 78 if (*base == NULL)
88 return NULL; 79 return NULL;
89 } 80
90 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { 81 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
91 return *base; 82 return *base;
92 } 83
93 kfree(*base); 84 kfree(*base);
94 85
95 /* nope, we'll have to do it ourselves */ 86 /* nope, we'll have to do it ourselves */
96 *base = kmalloc(size + L1_CACHE_BYTES, flags); 87 *base = kmalloc(size + L1_CACHE_BYTES, flags);
97 if (*base == NULL) { 88 if (*base == NULL)
98 return NULL; 89 return NULL;
99 }
100 return (void *) L1_CACHE_ALIGN((u64) *base);
101}
102 90
91 return (void *)L1_CACHE_ALIGN((u64)*base);
92}
103 93
104/* 94/*
105 * Given a nasid, get the physical address of the partition's reserved page 95 * Given a nasid, get the physical address of the partition's reserved page
@@ -117,25 +107,24 @@ xpc_get_rsvd_page_pa(int nasid)
117 u64 buf_len = 0; 107 u64 buf_len = 0;
118 void *buf_base = NULL; 108 void *buf_base = NULL;
119 109
120
121 while (1) { 110 while (1) {
122 111
123 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, 112 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
124 &len); 113 &len);
125 114
126 dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" 115 dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
127 "0x%016lx, address=0x%016lx, len=0x%016lx\n", 116 "0x%016lx, address=0x%016lx, len=0x%016lx\n",
128 status, cookie, rp_pa, len); 117 status, cookie, rp_pa, len);
129 118
130 if (status != SALRET_MORE_PASSES) { 119 if (status != SALRET_MORE_PASSES)
131 break; 120 break;
132 }
133 121
134 if (L1_CACHE_ALIGN(len) > buf_len) { 122 if (L1_CACHE_ALIGN(len) > buf_len) {
135 kfree(buf_base); 123 kfree(buf_base);
136 buf_len = L1_CACHE_ALIGN(len); 124 buf_len = L1_CACHE_ALIGN(len);
137 buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, 125 buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
138 GFP_KERNEL, &buf_base); 126 GFP_KERNEL,
127 &buf_base);
139 if (buf_base == NULL) { 128 if (buf_base == NULL) {
140 dev_err(xpc_part, "unable to kmalloc " 129 dev_err(xpc_part, "unable to kmalloc "
141 "len=0x%016lx\n", buf_len); 130 "len=0x%016lx\n", buf_len);
@@ -145,7 +134,7 @@ xpc_get_rsvd_page_pa(int nasid)
145 } 134 }
146 135
147 bte_res = xp_bte_copy(rp_pa, buf, buf_len, 136 bte_res = xp_bte_copy(rp_pa, buf, buf_len,
148 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 137 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
149 if (bte_res != BTE_SUCCESS) { 138 if (bte_res != BTE_SUCCESS) {
150 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); 139 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
151 status = SALRET_ERROR; 140 status = SALRET_ERROR;
@@ -155,14 +144,13 @@ xpc_get_rsvd_page_pa(int nasid)
155 144
156 kfree(buf_base); 145 kfree(buf_base);
157 146
158 if (status != SALRET_OK) { 147 if (status != SALRET_OK)
159 rp_pa = 0; 148 rp_pa = 0;
160 } 149
161 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); 150 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
162 return rp_pa; 151 return rp_pa;
163} 152}
164 153
165
166/* 154/*
167 * Fill the partition reserved page with the information needed by 155 * Fill the partition reserved page with the information needed by
168 * other partitions to discover we are alive and establish initial 156 * other partitions to discover we are alive and establish initial
@@ -176,7 +164,6 @@ xpc_rsvd_page_init(void)
176 u64 rp_pa, nasid_array = 0; 164 u64 rp_pa, nasid_array = 0;
177 int i, ret; 165 int i, ret;
178 166
179
180 /* get the local reserved page's address */ 167 /* get the local reserved page's address */
181 168
182 preempt_disable(); 169 preempt_disable();
@@ -186,7 +173,7 @@ xpc_rsvd_page_init(void)
186 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 173 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
187 return NULL; 174 return NULL;
188 } 175 }
189 rp = (struct xpc_rsvd_page *) __va(rp_pa); 176 rp = (struct xpc_rsvd_page *)__va(rp_pa);
190 177
191 if (rp->partid != sn_partition_id) { 178 if (rp->partid != sn_partition_id) {
192 dev_err(xpc_part, "the reserved page's partid of %d should be " 179 dev_err(xpc_part, "the reserved page's partid of %d should be "
@@ -222,8 +209,9 @@ xpc_rsvd_page_init(void)
222 * on subsequent loads of XPC. This AMO page is never freed, and its 209 * on subsequent loads of XPC. This AMO page is never freed, and its
223 * memory protections are never restricted. 210 * memory protections are never restricted.
224 */ 211 */
225 if ((amos_page = xpc_vars->amos_page) == NULL) { 212 amos_page = xpc_vars->amos_page;
226 amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); 213 if (amos_page == NULL) {
214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
227 if (amos_page == NULL) { 215 if (amos_page == NULL) {
228 dev_err(xpc_part, "can't allocate page of AMOs\n"); 216 dev_err(xpc_part, "can't allocate page of AMOs\n");
229 return NULL; 217 return NULL;
@@ -234,30 +222,31 @@ xpc_rsvd_page_init(void)
234 * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 222 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
235 */ 223 */
236 if (!enable_shub_wars_1_1()) { 224 if (!enable_shub_wars_1_1()) {
237 ret = sn_change_memprotect(ia64_tpa((u64) amos_page), 225 ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
238 PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, 226 PAGE_SIZE,
239 &nasid_array); 227 SN_MEMPROT_ACCESS_CLASS_1,
228 &nasid_array);
240 if (ret != 0) { 229 if (ret != 0) {
241 dev_err(xpc_part, "can't change memory " 230 dev_err(xpc_part, "can't change memory "
242 "protections\n"); 231 "protections\n");
243 uncached_free_page(__IA64_UNCACHED_OFFSET | 232 uncached_free_page(__IA64_UNCACHED_OFFSET |
244 TO_PHYS((u64) amos_page)); 233 TO_PHYS((u64)amos_page));
245 return NULL; 234 return NULL;
246 } 235 }
247 } 236 }
248 } else if (!IS_AMO_ADDRESS((u64) amos_page)) { 237 } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
249 /* 238 /*
250 * EFI's XPBOOT can also set amos_page in the reserved page, 239 * EFI's XPBOOT can also set amos_page in the reserved page,
251 * but it happens to leave it as an uncached physical address 240 * but it happens to leave it as an uncached physical address
252 * and we need it to be an uncached virtual, so we'll have to 241 * and we need it to be an uncached virtual, so we'll have to
253 * convert it. 242 * convert it.
254 */ 243 */
255 if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { 244 if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
256 dev_err(xpc_part, "previously used amos_page address " 245 dev_err(xpc_part, "previously used amos_page address "
257 "is bad = 0x%p\n", (void *) amos_page); 246 "is bad = 0x%p\n", (void *)amos_page);
258 return NULL; 247 return NULL;
259 } 248 }
260 amos_page = (AMO_t *) TO_AMO((u64) amos_page); 249 amos_page = (AMO_t *)TO_AMO((u64)amos_page);
261 } 250 }
262 251
263 /* clear xpc_vars */ 252 /* clear xpc_vars */
@@ -267,22 +256,20 @@ xpc_rsvd_page_init(void)
267 xpc_vars->act_nasid = cpuid_to_nasid(0); 256 xpc_vars->act_nasid = cpuid_to_nasid(0);
268 xpc_vars->act_phys_cpuid = cpu_physical_id(0); 257 xpc_vars->act_phys_cpuid = cpu_physical_id(0);
269 xpc_vars->vars_part_pa = __pa(xpc_vars_part); 258 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
270 xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); 259 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
271 xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 260 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
272
273 261
274 /* clear xpc_vars_part */ 262 /* clear xpc_vars_part */
275 memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * 263 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
276 XP_MAX_PARTITIONS); 264 XP_MAX_PARTITIONS);
277 265
278 /* initialize the activate IRQ related AMO variables */ 266 /* initialize the activate IRQ related AMO variables */
279 for (i = 0; i < xp_nasid_mask_words; i++) { 267 for (i = 0; i < xp_nasid_mask_words; i++)
280 (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); 268 (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
281 }
282 269
283 /* initialize the engaged remote partitions related AMO variables */ 270 /* initialize the engaged remote partitions related AMO variables */
284 (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); 271 (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
285 (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); 272 (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
286 273
287 /* timestamp of when reserved page was setup by XPC */ 274 /* timestamp of when reserved page was setup by XPC */
288 rp->stamp = CURRENT_TIME; 275 rp->stamp = CURRENT_TIME;
@@ -296,7 +283,6 @@ xpc_rsvd_page_init(void)
296 return rp; 283 return rp;
297} 284}
298 285
299
300/* 286/*
301 * Change protections to allow IPI operations (and AMO operations on 287 * Change protections to allow IPI operations (and AMO operations on
302 * Shub 1.1 systems). 288 * Shub 1.1 systems).
@@ -307,39 +293,38 @@ xpc_allow_IPI_ops(void)
307 int node; 293 int node;
308 int nasid; 294 int nasid;
309 295
310 296 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
311 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
312 297
313 if (is_shub2()) { 298 if (is_shub2()) {
314 xpc_sh2_IPI_access0 = 299 xpc_sh2_IPI_access0 =
315 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 300 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
316 xpc_sh2_IPI_access1 = 301 xpc_sh2_IPI_access1 =
317 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 302 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
318 xpc_sh2_IPI_access2 = 303 xpc_sh2_IPI_access2 =
319 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 304 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
320 xpc_sh2_IPI_access3 = 305 xpc_sh2_IPI_access3 =
321 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 306 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
322 307
323 for_each_online_node(node) { 308 for_each_online_node(node) {
324 nasid = cnodeid_to_nasid(node); 309 nasid = cnodeid_to_nasid(node);
325 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 310 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
326 -1UL); 311 -1UL);
327 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 312 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
328 -1UL); 313 -1UL);
329 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 314 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
330 -1UL); 315 -1UL);
331 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 316 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
332 -1UL); 317 -1UL);
333 } 318 }
334 319
335 } else { 320 } else {
336 xpc_sh1_IPI_access = 321 xpc_sh1_IPI_access =
337 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 322 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
338 323
339 for_each_online_node(node) { 324 for_each_online_node(node) {
340 nasid = cnodeid_to_nasid(node); 325 nasid = cnodeid_to_nasid(node);
341 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 326 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
342 -1UL); 327 -1UL);
343 328
344 /* 329 /*
345 * Since the BIST collides with memory operations on 330 * Since the BIST collides with memory operations on
@@ -347,21 +332,23 @@ xpc_allow_IPI_ops(void)
347 */ 332 */
348 if (enable_shub_wars_1_1()) { 333 if (enable_shub_wars_1_1()) {
349 /* open up everything */ 334 /* open up everything */
350 xpc_prot_vec[node] = (u64) HUB_L((u64 *) 335 xpc_prot_vec[node] = (u64)HUB_L((u64 *)
351 GLOBAL_MMR_ADDR(nasid, 336 GLOBAL_MMR_ADDR
352 SH1_MD_DQLP_MMR_DIR_PRIVEC0)); 337 (nasid,
353 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 338 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
354 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 339 HUB_S((u64 *)
355 -1UL); 340 GLOBAL_MMR_ADDR(nasid,
356 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 341 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
357 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 342 -1UL);
358 -1UL); 343 HUB_S((u64 *)
344 GLOBAL_MMR_ADDR(nasid,
345 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
346 -1UL);
359 } 347 }
360 } 348 }
361 } 349 }
362} 350}
363 351
364
365/* 352/*
366 * Restrict protections to disallow IPI operations (and AMO operations on 353 * Restrict protections to disallow IPI operations (and AMO operations on
367 * Shub 1.1 systems). 354 * Shub 1.1 systems).
@@ -372,43 +359,41 @@ xpc_restrict_IPI_ops(void)
372 int node; 359 int node;
373 int nasid; 360 int nasid;
374 361
375 362 /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
376 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
377 363
378 if (is_shub2()) { 364 if (is_shub2()) {
379 365
380 for_each_online_node(node) { 366 for_each_online_node(node) {
381 nasid = cnodeid_to_nasid(node); 367 nasid = cnodeid_to_nasid(node);
382 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 368 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
383 xpc_sh2_IPI_access0); 369 xpc_sh2_IPI_access0);
384 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 370 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
385 xpc_sh2_IPI_access1); 371 xpc_sh2_IPI_access1);
386 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 372 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
387 xpc_sh2_IPI_access2); 373 xpc_sh2_IPI_access2);
388 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 374 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
389 xpc_sh2_IPI_access3); 375 xpc_sh2_IPI_access3);
390 } 376 }
391 377
392 } else { 378 } else {
393 379
394 for_each_online_node(node) { 380 for_each_online_node(node) {
395 nasid = cnodeid_to_nasid(node); 381 nasid = cnodeid_to_nasid(node);
396 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 382 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
397 xpc_sh1_IPI_access); 383 xpc_sh1_IPI_access);
398 384
399 if (enable_shub_wars_1_1()) { 385 if (enable_shub_wars_1_1()) {
400 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 386 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
401 SH1_MD_DQLP_MMR_DIR_PRIVEC0), 387 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
402 xpc_prot_vec[node]); 388 xpc_prot_vec[node]);
403 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, 389 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
404 SH1_MD_DQRP_MMR_DIR_PRIVEC0), 390 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
405 xpc_prot_vec[node]); 391 xpc_prot_vec[node]);
406 } 392 }
407 } 393 }
408 } 394 }
409} 395}
410 396
411
412/* 397/*
413 * At periodic intervals, scan through all active partitions and ensure 398 * At periodic intervals, scan through all active partitions and ensure
414 * their heartbeat is still active. If not, the partition is deactivated. 399 * their heartbeat is still active. If not, the partition is deactivated.
@@ -421,34 +406,31 @@ xpc_check_remote_hb(void)
421 partid_t partid; 406 partid_t partid;
422 bte_result_t bres; 407 bte_result_t bres;
423 408
424 409 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
425 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
426 410
427 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 411 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
428 412
429 if (xpc_exiting) { 413 if (xpc_exiting)
430 break; 414 break;
431 }
432 415
433 if (partid == sn_partition_id) { 416 if (partid == sn_partition_id)
434 continue; 417 continue;
435 }
436 418
437 part = &xpc_partitions[partid]; 419 part = &xpc_partitions[partid];
438 420
439 if (part->act_state == XPC_P_INACTIVE || 421 if (part->act_state == XPC_P_INACTIVE ||
440 part->act_state == XPC_P_DEACTIVATING) { 422 part->act_state == XPC_P_DEACTIVATING) {
441 continue; 423 continue;
442 } 424 }
443 425
444 /* pull the remote_hb cache line */ 426 /* pull the remote_hb cache line */
445 bres = xp_bte_copy(part->remote_vars_pa, 427 bres = xp_bte_copy(part->remote_vars_pa,
446 (u64) remote_vars, 428 (u64)remote_vars,
447 XPC_RP_VARS_SIZE, 429 XPC_RP_VARS_SIZE,
448 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 430 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
449 if (bres != BTE_SUCCESS) { 431 if (bres != BTE_SUCCESS) {
450 XPC_DEACTIVATE_PARTITION(part, 432 XPC_DEACTIVATE_PARTITION(part,
451 xpc_map_bte_errors(bres)); 433 xpc_map_bte_errors(bres));
452 continue; 434 continue;
453 } 435 }
454 436
@@ -459,8 +441,8 @@ xpc_check_remote_hb(void)
459 remote_vars->heartbeating_to_mask); 441 remote_vars->heartbeating_to_mask);
460 442
461 if (((remote_vars->heartbeat == part->last_heartbeat) && 443 if (((remote_vars->heartbeat == part->last_heartbeat) &&
462 (remote_vars->heartbeat_offline == 0)) || 444 (remote_vars->heartbeat_offline == 0)) ||
463 !xpc_hb_allowed(sn_partition_id, remote_vars)) { 445 !xpc_hb_allowed(sn_partition_id, remote_vars)) {
464 446
465 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); 447 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
466 continue; 448 continue;
@@ -470,7 +452,6 @@ xpc_check_remote_hb(void)
470 } 452 }
471} 453}
472 454
473
474/* 455/*
475 * Get a copy of a portion of the remote partition's rsvd page. 456 * Get a copy of a portion of the remote partition's rsvd page.
476 * 457 *
@@ -480,59 +461,48 @@ xpc_check_remote_hb(void)
480 */ 461 */
481static enum xpc_retval 462static enum xpc_retval
482xpc_get_remote_rp(int nasid, u64 *discovered_nasids, 463xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
483 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) 464 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
484{ 465{
485 int bres, i; 466 int bres, i;
486 467
487
488 /* get the reserved page's physical address */ 468 /* get the reserved page's physical address */
489 469
490 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); 470 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
491 if (*remote_rp_pa == 0) { 471 if (*remote_rp_pa == 0)
492 return xpcNoRsvdPageAddr; 472 return xpcNoRsvdPageAddr;
493 }
494
495 473
496 /* pull over the reserved page header and part_nasids mask */ 474 /* pull over the reserved page header and part_nasids mask */
497 bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, 475 bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
498 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, 476 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
499 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 477 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
500 if (bres != BTE_SUCCESS) { 478 if (bres != BTE_SUCCESS)
501 return xpc_map_bte_errors(bres); 479 return xpc_map_bte_errors(bres);
502 }
503
504 480
505 if (discovered_nasids != NULL) { 481 if (discovered_nasids != NULL) {
506 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); 482 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
507 483
508 484 for (i = 0; i < xp_nasid_mask_words; i++)
509 for (i = 0; i < xp_nasid_mask_words; i++) {
510 discovered_nasids[i] |= remote_part_nasids[i]; 485 discovered_nasids[i] |= remote_part_nasids[i];
511 }
512 } 486 }
513 487
514
515 /* check that the partid is for another partition */ 488 /* check that the partid is for another partition */
516 489
517 if (remote_rp->partid < 1 || 490 if (remote_rp->partid < 1 ||
518 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { 491 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
519 return xpcInvalidPartid; 492 return xpcInvalidPartid;
520 } 493 }
521 494
522 if (remote_rp->partid == sn_partition_id) { 495 if (remote_rp->partid == sn_partition_id)
523 return xpcLocalPartid; 496 return xpcLocalPartid;
524 }
525
526 497
527 if (XPC_VERSION_MAJOR(remote_rp->version) != 498 if (XPC_VERSION_MAJOR(remote_rp->version) !=
528 XPC_VERSION_MAJOR(XPC_RP_VERSION)) { 499 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
529 return xpcBadVersion; 500 return xpcBadVersion;
530 } 501 }
531 502
532 return xpcSuccess; 503 return xpcSuccess;
533} 504}
534 505
535
536/* 506/*
537 * Get a copy of the remote partition's XPC variables from the reserved page. 507 * Get a copy of the remote partition's XPC variables from the reserved page.
538 * 508 *
@@ -544,34 +514,30 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
544{ 514{
545 int bres; 515 int bres;
546 516
547 517 if (remote_vars_pa == 0)
548 if (remote_vars_pa == 0) {
549 return xpcVarsNotSet; 518 return xpcVarsNotSet;
550 }
551 519
552 /* pull over the cross partition variables */ 520 /* pull over the cross partition variables */
553 bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, 521 bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
554 (BTE_NOTIFY | BTE_WACQUIRE), NULL); 522 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
555 if (bres != BTE_SUCCESS) { 523 if (bres != BTE_SUCCESS)
556 return xpc_map_bte_errors(bres); 524 return xpc_map_bte_errors(bres);
557 }
558 525
559 if (XPC_VERSION_MAJOR(remote_vars->version) != 526 if (XPC_VERSION_MAJOR(remote_vars->version) !=
560 XPC_VERSION_MAJOR(XPC_V_VERSION)) { 527 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
561 return xpcBadVersion; 528 return xpcBadVersion;
562 } 529 }
563 530
564 return xpcSuccess; 531 return xpcSuccess;
565} 532}
566 533
567
568/* 534/*
569 * Update the remote partition's info. 535 * Update the remote partition's info.
570 */ 536 */
571static void 537static void
572xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version, 538xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
573 struct timespec *remote_rp_stamp, u64 remote_rp_pa, 539 struct timespec *remote_rp_stamp, u64 remote_rp_pa,
574 u64 remote_vars_pa, struct xpc_vars *remote_vars) 540 u64 remote_vars_pa, struct xpc_vars *remote_vars)
575{ 541{
576 part->remote_rp_version = remote_rp_version; 542 part->remote_rp_version = remote_rp_version;
577 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", 543 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
@@ -613,7 +579,6 @@ xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
613 part->remote_vars_version); 579 part->remote_vars_version);
614} 580}
615 581
616
617/* 582/*
618 * Prior code has determined the nasid which generated an IPI. Inspect 583 * Prior code has determined the nasid which generated an IPI. Inspect
619 * that nasid to determine if its partition needs to be activated or 584 * that nasid to determine if its partition needs to be activated or
@@ -643,54 +608,51 @@ xpc_identify_act_IRQ_req(int nasid)
643 struct xpc_partition *part; 608 struct xpc_partition *part;
644 enum xpc_retval ret; 609 enum xpc_retval ret;
645 610
646
647 /* pull over the reserved page structure */ 611 /* pull over the reserved page structure */
648 612
649 remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; 613 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
650 614
651 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); 615 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
652 if (ret != xpcSuccess) { 616 if (ret != xpcSuccess) {
653 dev_warn(xpc_part, "unable to get reserved page from nasid %d, " 617 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
654 "which sent interrupt, reason=%d\n", nasid, ret); 618 "which sent interrupt, reason=%d\n", nasid, ret);
655 return; 619 return;
656 } 620 }
657 621
658 remote_vars_pa = remote_rp->vars_pa; 622 remote_vars_pa = remote_rp->vars_pa;
659 remote_rp_version = remote_rp->version; 623 remote_rp_version = remote_rp->version;
660 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 624 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
661 remote_rp_stamp = remote_rp->stamp; 625 remote_rp_stamp = remote_rp->stamp;
662 } 626
663 partid = remote_rp->partid; 627 partid = remote_rp->partid;
664 part = &xpc_partitions[partid]; 628 part = &xpc_partitions[partid];
665 629
666
667 /* pull over the cross partition variables */ 630 /* pull over the cross partition variables */
668 631
669 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; 632 remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
670 633
671 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 634 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
672 if (ret != xpcSuccess) { 635 if (ret != xpcSuccess) {
673 636
674 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 637 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
675 "which sent interrupt, reason=%d\n", nasid, ret); 638 "which sent interrupt, reason=%d\n", nasid, ret);
676 639
677 XPC_DEACTIVATE_PARTITION(part, ret); 640 XPC_DEACTIVATE_PARTITION(part, ret);
678 return; 641 return;
679 } 642 }
680 643
681
682 part->act_IRQ_rcvd++; 644 part->act_IRQ_rcvd++;
683 645
684 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 646 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
685 "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, 647 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
686 remote_vars->heartbeat, remote_vars->heartbeating_to_mask); 648 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
687 649
688 if (xpc_partition_disengaged(part) && 650 if (xpc_partition_disengaged(part) &&
689 part->act_state == XPC_P_INACTIVE) { 651 part->act_state == XPC_P_INACTIVE) {
690 652
691 xpc_update_partition_info(part, remote_rp_version, 653 xpc_update_partition_info(part, remote_rp_version,
692 &remote_rp_stamp, remote_rp_pa, 654 &remote_rp_stamp, remote_rp_pa,
693 remote_vars_pa, remote_vars); 655 remote_vars_pa, remote_vars);
694 656
695 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 657 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
696 if (xpc_partition_disengage_requested(1UL << partid)) { 658 if (xpc_partition_disengage_requested(1UL << partid)) {
@@ -714,16 +676,15 @@ xpc_identify_act_IRQ_req(int nasid)
714 676
715 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) { 677 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
716 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 678 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
717 remote_vars_version)); 679 remote_vars_version));
718 680
719 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) { 681 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
720 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 682 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
721 version)); 683 version));
722 /* see if the other side rebooted */ 684 /* see if the other side rebooted */
723 if (part->remote_amos_page_pa == 685 if (part->remote_amos_page_pa ==
724 remote_vars->amos_page_pa && 686 remote_vars->amos_page_pa &&
725 xpc_hb_allowed(sn_partition_id, 687 xpc_hb_allowed(sn_partition_id, remote_vars)) {
726 remote_vars)) {
727 /* doesn't look that way, so ignore the IPI */ 688 /* doesn't look that way, so ignore the IPI */
728 return; 689 return;
729 } 690 }
@@ -735,8 +696,8 @@ xpc_identify_act_IRQ_req(int nasid)
735 */ 696 */
736 697
737 xpc_update_partition_info(part, remote_rp_version, 698 xpc_update_partition_info(part, remote_rp_version,
738 &remote_rp_stamp, remote_rp_pa, 699 &remote_rp_stamp, remote_rp_pa,
739 remote_vars_pa, remote_vars); 700 remote_vars_pa, remote_vars);
740 part->reactivate_nasid = nasid; 701 part->reactivate_nasid = nasid;
741 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 702 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
742 return; 703 return;
@@ -756,15 +717,15 @@ xpc_identify_act_IRQ_req(int nasid)
756 xpc_clear_partition_disengage_request(1UL << partid); 717 xpc_clear_partition_disengage_request(1UL << partid);
757 718
758 xpc_update_partition_info(part, remote_rp_version, 719 xpc_update_partition_info(part, remote_rp_version,
759 &remote_rp_stamp, remote_rp_pa, 720 &remote_rp_stamp, remote_rp_pa,
760 remote_vars_pa, remote_vars); 721 remote_vars_pa, remote_vars);
761 reactivate = 1; 722 reactivate = 1;
762 723
763 } else { 724 } else {
764 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version)); 725 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
765 726
766 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp, 727 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
767 &remote_rp_stamp); 728 &remote_rp_stamp);
768 if (stamp_diff != 0) { 729 if (stamp_diff != 0) {
769 DBUG_ON(stamp_diff >= 0); 730 DBUG_ON(stamp_diff >= 0);
770 731
@@ -775,17 +736,18 @@ xpc_identify_act_IRQ_req(int nasid)
775 736
776 DBUG_ON(xpc_partition_engaged(1UL << partid)); 737 DBUG_ON(xpc_partition_engaged(1UL << partid));
777 DBUG_ON(xpc_partition_disengage_requested(1UL << 738 DBUG_ON(xpc_partition_disengage_requested(1UL <<
778 partid)); 739 partid));
779 740
780 xpc_update_partition_info(part, remote_rp_version, 741 xpc_update_partition_info(part, remote_rp_version,
781 &remote_rp_stamp, remote_rp_pa, 742 &remote_rp_stamp,
782 remote_vars_pa, remote_vars); 743 remote_rp_pa, remote_vars_pa,
744 remote_vars);
783 reactivate = 1; 745 reactivate = 1;
784 } 746 }
785 } 747 }
786 748
787 if (part->disengage_request_timeout > 0 && 749 if (part->disengage_request_timeout > 0 &&
788 !xpc_partition_disengaged(part)) { 750 !xpc_partition_disengaged(part)) {
789 /* still waiting on other side to disengage from us */ 751 /* still waiting on other side to disengage from us */
790 return; 752 return;
791 } 753 }
@@ -795,12 +757,11 @@ xpc_identify_act_IRQ_req(int nasid)
795 XPC_DEACTIVATE_PARTITION(part, xpcReactivating); 757 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
796 758
797 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && 759 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
798 xpc_partition_disengage_requested(1UL << partid)) { 760 xpc_partition_disengage_requested(1UL << partid)) {
799 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); 761 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
800 } 762 }
801} 763}
802 764
803
804/* 765/*
805 * Loop through the activation AMO variables and process any bits 766 * Loop through the activation AMO variables and process any bits
806 * which are set. Each bit indicates a nasid sending a partition 767 * which are set. Each bit indicates a nasid sending a partition
@@ -813,20 +774,17 @@ xpc_identify_act_IRQ_sender(void)
813{ 774{
814 int word, bit; 775 int word, bit;
815 u64 nasid_mask; 776 u64 nasid_mask;
816 u64 nasid; /* remote nasid */ 777 u64 nasid; /* remote nasid */
817 int n_IRQs_detected = 0; 778 int n_IRQs_detected = 0;
818 AMO_t *act_amos; 779 AMO_t *act_amos;
819 780
820
821 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; 781 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
822 782
823
824 /* scan through act AMO variable looking for non-zero entries */ 783 /* scan through act AMO variable looking for non-zero entries */
825 for (word = 0; word < xp_nasid_mask_words; word++) { 784 for (word = 0; word < xp_nasid_mask_words; word++) {
826 785
827 if (xpc_exiting) { 786 if (xpc_exiting)
828 break; 787 break;
829 }
830 788
831 nasid_mask = xpc_IPI_receive(&act_amos[word]); 789 nasid_mask = xpc_IPI_receive(&act_amos[word]);
832 if (nasid_mask == 0) { 790 if (nasid_mask == 0) {
@@ -837,7 +795,6 @@ xpc_identify_act_IRQ_sender(void)
837 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, 795 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
838 nasid_mask); 796 nasid_mask);
839 797
840
841 /* 798 /*
842 * If this nasid has been added to the machine since 799 * If this nasid has been added to the machine since
843 * our partition was reset, this will retain the 800 * our partition was reset, this will retain the
@@ -846,7 +803,6 @@ xpc_identify_act_IRQ_sender(void)
846 */ 803 */
847 xpc_mach_nasids[word] |= nasid_mask; 804 xpc_mach_nasids[word] |= nasid_mask;
848 805
849
850 /* locate the nasid(s) which sent interrupts */ 806 /* locate the nasid(s) which sent interrupts */
851 807
852 for (bit = 0; bit < (8 * sizeof(u64)); bit++) { 808 for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
@@ -862,7 +818,6 @@ xpc_identify_act_IRQ_sender(void)
862 return n_IRQs_detected; 818 return n_IRQs_detected;
863} 819}
864 820
865
866/* 821/*
867 * See if the other side has responded to a partition disengage request 822 * See if the other side has responded to a partition disengage request
868 * from us. 823 * from us.
@@ -873,11 +828,11 @@ xpc_partition_disengaged(struct xpc_partition *part)
873 partid_t partid = XPC_PARTID(part); 828 partid_t partid = XPC_PARTID(part);
874 int disengaged; 829 int disengaged;
875 830
876
877 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 831 disengaged = (xpc_partition_engaged(1UL << partid) == 0);
878 if (part->disengage_request_timeout) { 832 if (part->disengage_request_timeout) {
879 if (!disengaged) { 833 if (!disengaged) {
880 if (time_before(jiffies, part->disengage_request_timeout)) { 834 if (time_before(jiffies,
835 part->disengage_request_timeout)) {
881 /* timelimit hasn't been reached yet */ 836 /* timelimit hasn't been reached yet */
882 return 0; 837 return 0;
883 } 838 }
@@ -888,7 +843,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
888 */ 843 */
889 844
890 dev_info(xpc_part, "disengage from remote partition %d " 845 dev_info(xpc_part, "disengage from remote partition %d "
891 "timed out\n", partid); 846 "timed out\n", partid);
892 xpc_disengage_request_timedout = 1; 847 xpc_disengage_request_timedout = 1;
893 xpc_clear_partition_engaged(1UL << partid); 848 xpc_clear_partition_engaged(1UL << partid);
894 disengaged = 1; 849 disengaged = 1;
@@ -898,23 +853,20 @@ xpc_partition_disengaged(struct xpc_partition *part)
898 /* cancel the timer function, provided it's not us */ 853 /* cancel the timer function, provided it's not us */
899 if (!in_interrupt()) { 854 if (!in_interrupt()) {
900 del_singleshot_timer_sync(&part-> 855 del_singleshot_timer_sync(&part->
901 disengage_request_timer); 856 disengage_request_timer);
902 } 857 }
903 858
904 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 859 DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
905 part->act_state != XPC_P_INACTIVE); 860 part->act_state != XPC_P_INACTIVE);
906 if (part->act_state != XPC_P_INACTIVE) { 861 if (part->act_state != XPC_P_INACTIVE)
907 xpc_wakeup_channel_mgr(part); 862 xpc_wakeup_channel_mgr(part);
908 }
909 863
910 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 864 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
911 xpc_cancel_partition_disengage_request(part); 865 xpc_cancel_partition_disengage_request(part);
912 }
913 } 866 }
914 return disengaged; 867 return disengaged;
915} 868}
916 869
917
918/* 870/*
919 * Mark specified partition as active. 871 * Mark specified partition as active.
920 */ 872 */
@@ -924,7 +876,6 @@ xpc_mark_partition_active(struct xpc_partition *part)
924 unsigned long irq_flags; 876 unsigned long irq_flags;
925 enum xpc_retval ret; 877 enum xpc_retval ret;
926 878
927
928 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); 879 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
929 880
930 spin_lock_irqsave(&part->act_lock, irq_flags); 881 spin_lock_irqsave(&part->act_lock, irq_flags);
@@ -940,17 +891,15 @@ xpc_mark_partition_active(struct xpc_partition *part)
940 return ret; 891 return ret;
941} 892}
942 893
943
944/* 894/*
945 * Notify XPC that the partition is down. 895 * Notify XPC that the partition is down.
946 */ 896 */
947void 897void
948xpc_deactivate_partition(const int line, struct xpc_partition *part, 898xpc_deactivate_partition(const int line, struct xpc_partition *part,
949 enum xpc_retval reason) 899 enum xpc_retval reason)
950{ 900{
951 unsigned long irq_flags; 901 unsigned long irq_flags;
952 902
953
954 spin_lock_irqsave(&part->act_lock, irq_flags); 903 spin_lock_irqsave(&part->act_lock, irq_flags);
955 904
956 if (part->act_state == XPC_P_INACTIVE) { 905 if (part->act_state == XPC_P_INACTIVE) {
@@ -964,7 +913,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
964 } 913 }
965 if (part->act_state == XPC_P_DEACTIVATING) { 914 if (part->act_state == XPC_P_DEACTIVATING) {
966 if ((part->reason == xpcUnloading && reason != xpcUnloading) || 915 if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
967 reason == xpcReactivating) { 916 reason == xpcReactivating) {
968 XPC_SET_REASON(part, reason, line); 917 XPC_SET_REASON(part, reason, line);
969 } 918 }
970 spin_unlock_irqrestore(&part->act_lock, irq_flags); 919 spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -982,9 +931,9 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
982 931
983 /* set a timelimit on the disengage request */ 932 /* set a timelimit on the disengage request */
984 part->disengage_request_timeout = jiffies + 933 part->disengage_request_timeout = jiffies +
985 (xpc_disengage_request_timelimit * HZ); 934 (xpc_disengage_request_timelimit * HZ);
986 part->disengage_request_timer.expires = 935 part->disengage_request_timer.expires =
987 part->disengage_request_timeout; 936 part->disengage_request_timeout;
988 add_timer(&part->disengage_request_timer); 937 add_timer(&part->disengage_request_timer);
989 } 938 }
990 939
@@ -994,7 +943,6 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
994 xpc_partition_going_down(part, reason); 943 xpc_partition_going_down(part, reason);
995} 944}
996 945
997
998/* 946/*
999 * Mark specified partition as inactive. 947 * Mark specified partition as inactive.
1000 */ 948 */
@@ -1003,7 +951,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
1003{ 951{
1004 unsigned long irq_flags; 952 unsigned long irq_flags;
1005 953
1006
1007 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", 954 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
1008 XPC_PARTID(part)); 955 XPC_PARTID(part));
1009 956
@@ -1013,7 +960,6 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
1013 part->remote_rp_pa = 0; 960 part->remote_rp_pa = 0;
1014} 961}
1015 962
1016
1017/* 963/*
1018 * SAL has provided a partition and machine mask. The partition mask 964 * SAL has provided a partition and machine mask. The partition mask
1019 * contains a bit for each even nasid in our partition. The machine 965 * contains a bit for each even nasid in our partition. The machine
@@ -1041,24 +987,22 @@ xpc_discovery(void)
1041 u64 *discovered_nasids; 987 u64 *discovered_nasids;
1042 enum xpc_retval ret; 988 enum xpc_retval ret;
1043 989
1044
1045 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + 990 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
1046 xp_nasid_mask_bytes, 991 xp_nasid_mask_bytes,
1047 GFP_KERNEL, &remote_rp_base); 992 GFP_KERNEL, &remote_rp_base);
1048 if (remote_rp == NULL) { 993 if (remote_rp == NULL)
1049 return; 994 return;
1050 }
1051 remote_vars = (struct xpc_vars *) remote_rp;
1052 995
996 remote_vars = (struct xpc_vars *)remote_rp;
1053 997
1054 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, 998 discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
1055 GFP_KERNEL); 999 GFP_KERNEL);
1056 if (discovered_nasids == NULL) { 1000 if (discovered_nasids == NULL) {
1057 kfree(remote_rp_base); 1001 kfree(remote_rp_base);
1058 return; 1002 return;
1059 } 1003 }
1060 1004
1061 rp = (struct xpc_rsvd_page *) xpc_rsvd_page; 1005 rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
1062 1006
1063 /* 1007 /*
1064 * The term 'region' in this context refers to the minimum number of 1008 * The term 'region' in this context refers to the minimum number of
@@ -1081,23 +1025,19 @@ xpc_discovery(void)
1081 1025
1082 for (region = 0; region < max_regions; region++) { 1026 for (region = 0; region < max_regions; region++) {
1083 1027
1084 if ((volatile int) xpc_exiting) { 1028 if (xpc_exiting)
1085 break; 1029 break;
1086 }
1087 1030
1088 dev_dbg(xpc_part, "searching region %d\n", region); 1031 dev_dbg(xpc_part, "searching region %d\n", region);
1089 1032
1090 for (nasid = (region * region_size * 2); 1033 for (nasid = (region * region_size * 2);
1091 nasid < ((region + 1) * region_size * 2); 1034 nasid < ((region + 1) * region_size * 2); nasid += 2) {
1092 nasid += 2) {
1093 1035
1094 if ((volatile int) xpc_exiting) { 1036 if (xpc_exiting)
1095 break; 1037 break;
1096 }
1097 1038
1098 dev_dbg(xpc_part, "checking nasid %d\n", nasid); 1039 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
1099 1040
1100
1101 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { 1041 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
1102 dev_dbg(xpc_part, "PROM indicates Nasid %d is " 1042 dev_dbg(xpc_part, "PROM indicates Nasid %d is "
1103 "part of the local partition; skipping " 1043 "part of the local partition; skipping "
@@ -1119,19 +1059,18 @@ xpc_discovery(void)
1119 continue; 1059 continue;
1120 } 1060 }
1121 1061
1122
1123 /* pull over the reserved page structure */ 1062 /* pull over the reserved page structure */
1124 1063
1125 ret = xpc_get_remote_rp(nasid, discovered_nasids, 1064 ret = xpc_get_remote_rp(nasid, discovered_nasids,
1126 remote_rp, &remote_rp_pa); 1065 remote_rp, &remote_rp_pa);
1127 if (ret != xpcSuccess) { 1066 if (ret != xpcSuccess) {
1128 dev_dbg(xpc_part, "unable to get reserved page " 1067 dev_dbg(xpc_part, "unable to get reserved page "
1129 "from nasid %d, reason=%d\n", nasid, 1068 "from nasid %d, reason=%d\n", nasid,
1130 ret); 1069 ret);
1131 1070
1132 if (ret == xpcLocalPartid) { 1071 if (ret == xpcLocalPartid)
1133 break; 1072 break;
1134 } 1073
1135 continue; 1074 continue;
1136 } 1075 }
1137 1076
@@ -1140,7 +1079,6 @@ xpc_discovery(void)
1140 partid = remote_rp->partid; 1079 partid = remote_rp->partid;
1141 part = &xpc_partitions[partid]; 1080 part = &xpc_partitions[partid];
1142 1081
1143
1144 /* pull over the cross partition variables */ 1082 /* pull over the cross partition variables */
1145 1083
1146 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); 1084 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
@@ -1171,15 +1109,15 @@ xpc_discovery(void)
1171 * get the same page for remote_act_amos_pa after 1109 * get the same page for remote_act_amos_pa after
1172 * module reloads and system reboots. 1110 * module reloads and system reboots.
1173 */ 1111 */
1174 if (sn_register_xp_addr_region( 1112 if (sn_register_xp_addr_region
1175 remote_vars->amos_page_pa, 1113 (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
1176 PAGE_SIZE, 1) < 0) { 1114 dev_dbg(xpc_part,
1177 dev_dbg(xpc_part, "partition %d failed to " 1115 "partition %d failed to "
1178 "register xp_addr region 0x%016lx\n", 1116 "register xp_addr region 0x%016lx\n",
1179 partid, remote_vars->amos_page_pa); 1117 partid, remote_vars->amos_page_pa);
1180 1118
1181 XPC_SET_REASON(part, xpcPhysAddrRegFailed, 1119 XPC_SET_REASON(part, xpcPhysAddrRegFailed,
1182 __LINE__); 1120 __LINE__);
1183 break; 1121 break;
1184 } 1122 }
1185 1123
@@ -1195,9 +1133,9 @@ xpc_discovery(void)
1195 remote_vars->act_phys_cpuid); 1133 remote_vars->act_phys_cpuid);
1196 1134
1197 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars-> 1135 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
1198 version)) { 1136 version)) {
1199 part->remote_amos_page_pa = 1137 part->remote_amos_page_pa =
1200 remote_vars->amos_page_pa; 1138 remote_vars->amos_page_pa;
1201 xpc_mark_partition_disengaged(part); 1139 xpc_mark_partition_disengaged(part);
1202 xpc_cancel_partition_disengage_request(part); 1140 xpc_cancel_partition_disengage_request(part);
1203 } 1141 }
@@ -1209,7 +1147,6 @@ xpc_discovery(void)
1209 kfree(remote_rp_base); 1147 kfree(remote_rp_base);
1210} 1148}
1211 1149
1212
1213/* 1150/*
1214 * Given a partid, get the nasids owned by that partition from the 1151 * Given a partid, get the nasids owned by that partition from the
1215 * remote partition's reserved page. 1152 * remote partition's reserved page.
@@ -1221,19 +1158,17 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1221 u64 part_nasid_pa; 1158 u64 part_nasid_pa;
1222 int bte_res; 1159 int bte_res;
1223 1160
1224
1225 part = &xpc_partitions[partid]; 1161 part = &xpc_partitions[partid];
1226 if (part->remote_rp_pa == 0) { 1162 if (part->remote_rp_pa == 0)
1227 return xpcPartitionDown; 1163 return xpcPartitionDown;
1228 }
1229 1164
1230 memset(nasid_mask, 0, XP_NASID_MASK_BYTES); 1165 memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
1231 1166
1232 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); 1167 part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
1233 1168
1234 bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, 1169 bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask,
1235 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 1170 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE),
1171 NULL);
1236 1172
1237 return xpc_map_bte_errors(bte_res); 1173 return xpc_map_bte_errors(bte_res);
1238} 1174}
1239
diff --git a/arch/ia64/sn/kernel/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index a5df672d8392..a9543c65814d 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -3,10 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1999-2008 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9
10/* 9/*
11 * Cross Partition Network Interface (XPNET) support 10 * Cross Partition Network Interface (XPNET) support
12 * 11 *
@@ -21,8 +20,8 @@
21 * 20 *
22 */ 21 */
23 22
24
25#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/types.h>
26#include <linux/kernel.h> 25#include <linux/kernel.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/ioport.h> 27#include <linux/ioport.h>
@@ -36,10 +35,8 @@
36#include <asm/sn/bte.h> 35#include <asm/sn/bte.h>
37#include <asm/sn/io.h> 36#include <asm/sn/io.h>
38#include <asm/sn/sn_sal.h> 37#include <asm/sn/sn_sal.h>
39#include <asm/types.h>
40#include <asm/atomic.h> 38#include <asm/atomic.h>
41#include <asm/sn/xp.h> 39#include "xp.h"
42
43 40
44/* 41/*
45 * The message payload transferred by XPC. 42 * The message payload transferred by XPC.
@@ -79,7 +76,6 @@ struct xpnet_message {
79#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) 76#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
80#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) 77#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
81 78
82
83#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) 79#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
84#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) 80#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
85 81
@@ -91,9 +87,9 @@ struct xpnet_message {
91#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) 87#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
92#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) 88#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
93 89
94#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ 90#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
95#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ 91#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
96#define XPNET_MAGIC 0x88786984 /* "XNET" */ 92#define XPNET_MAGIC 0x88786984 /* "XNET" */
97 93
98#define XPNET_VALID_MSG(_m) \ 94#define XPNET_VALID_MSG(_m) \
99 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ 95 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
@@ -101,7 +97,6 @@ struct xpnet_message {
101 97
102#define XPNET_DEVICE_NAME "xp0" 98#define XPNET_DEVICE_NAME "xp0"
103 99
104
105/* 100/*
106 * When messages are queued with xpc_send_notify, a kmalloc'd buffer 101 * When messages are queued with xpc_send_notify, a kmalloc'd buffer
107 * of the following type is passed as a notification cookie. When the 102 * of the following type is passed as a notification cookie. When the
@@ -145,7 +140,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
145/* 32KB has been determined to be the ideal */ 140/* 32KB has been determined to be the ideal */
146#define XPNET_DEF_MTU (0x8000UL) 141#define XPNET_DEF_MTU (0x8000UL)
147 142
148
149/* 143/*
150 * The partition id is encapsulated in the MAC address. The following 144 * The partition id is encapsulated in the MAC address. The following
151 * define locates the octet the partid is in. 145 * define locates the octet the partid is in.
@@ -153,7 +147,6 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
153#define XPNET_PARTID_OCTET 1 147#define XPNET_PARTID_OCTET 1
154#define XPNET_LICENSE_OCTET 2 148#define XPNET_LICENSE_OCTET 2
155 149
156
157/* 150/*
158 * Define the XPNET debug device structure that is to be used with dev_dbg(), 151 * Define the XPNET debug device structure that is to be used with dev_dbg(),
159 * dev_err(), dev_warn(), and dev_info(). 152 * dev_err(), dev_warn(), and dev_info().
@@ -163,7 +156,7 @@ struct device_driver xpnet_dbg_name = {
163}; 156};
164 157
165struct device xpnet_dbg_subname = { 158struct device xpnet_dbg_subname = {
166 .bus_id = {0}, /* set to "" */ 159 .bus_id = {0}, /* set to "" */
167 .driver = &xpnet_dbg_name 160 .driver = &xpnet_dbg_name
168}; 161};
169 162
@@ -178,14 +171,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
178 struct sk_buff *skb; 171 struct sk_buff *skb;
179 bte_result_t bret; 172 bte_result_t bret;
180 struct xpnet_dev_private *priv = 173 struct xpnet_dev_private *priv =
181 (struct xpnet_dev_private *) xpnet_device->priv; 174 (struct xpnet_dev_private *)xpnet_device->priv;
182
183 175
184 if (!XPNET_VALID_MSG(msg)) { 176 if (!XPNET_VALID_MSG(msg)) {
185 /* 177 /*
186 * Packet with a different XPC version. Ignore. 178 * Packet with a different XPC version. Ignore.
187 */ 179 */
188 xpc_received(partid, channel, (void *) msg); 180 xpc_received(partid, channel, (void *)msg);
189 181
190 priv->stats.rx_errors++; 182 priv->stats.rx_errors++;
191 183
@@ -194,14 +186,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
194 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, 186 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
195 msg->leadin_ignore, msg->tailout_ignore); 187 msg->leadin_ignore, msg->tailout_ignore);
196 188
197
198 /* reserve an extra cache line */ 189 /* reserve an extra cache line */
199 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); 190 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
200 if (!skb) { 191 if (!skb) {
201 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", 192 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
202 msg->size + L1_CACHE_BYTES); 193 msg->size + L1_CACHE_BYTES);
203 194
204 xpc_received(partid, channel, (void *) msg); 195 xpc_received(partid, channel, (void *)msg);
205 196
206 priv->stats.rx_errors++; 197 priv->stats.rx_errors++;
207 198
@@ -227,12 +218,13 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
227 * Move the data over from the other side. 218 * Move the data over from the other side.
228 */ 219 */
229 if ((XPNET_VERSION_MINOR(msg->version) == 1) && 220 if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
230 (msg->embedded_bytes != 0)) { 221 (msg->embedded_bytes != 0)) {
231 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " 222 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
232 "%lu)\n", skb->data, &msg->data, 223 "%lu)\n", skb->data, &msg->data,
233 (size_t) msg->embedded_bytes); 224 (size_t)msg->embedded_bytes);
234 225
235 skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes); 226 skb_copy_to_linear_data(skb, &msg->data,
227 (size_t)msg->embedded_bytes);
236 } else { 228 } else {
237 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" 229 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
238 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, 230 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
@@ -244,16 +236,18 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
244 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); 236 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
245 237
246 if (bret != BTE_SUCCESS) { 238 if (bret != BTE_SUCCESS) {
247 // >>> Need better way of cleaning skb. Currently skb 239 /*
248 // >>> appears in_use and we can't just call 240 * >>> Need better way of cleaning skb. Currently skb
249 // >>> dev_kfree_skb. 241 * >>> appears in_use and we can't just call
242 * >>> dev_kfree_skb.
243 */
250 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " 244 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
251 "error=0x%x\n", (void *)msg->buf_pa, 245 "error=0x%x\n", (void *)msg->buf_pa,
252 (void *)__pa((u64)skb->data & 246 (void *)__pa((u64)skb->data &
253 ~(L1_CACHE_BYTES - 1)), 247 ~(L1_CACHE_BYTES - 1)),
254 msg->size, bret); 248 msg->size, bret);
255 249
256 xpc_received(partid, channel, (void *) msg); 250 xpc_received(partid, channel, (void *)msg);
257 251
258 priv->stats.rx_errors++; 252 priv->stats.rx_errors++;
259 253
@@ -262,7 +256,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
262 } 256 }
263 257
264 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 258 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
265 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 259 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
266 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 260 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
267 skb->len); 261 skb->len);
268 262
@@ -275,16 +269,14 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
275 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 269 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
276 skb_end_pointer(skb), skb->len); 270 skb_end_pointer(skb), skb->len);
277 271
278
279 xpnet_device->last_rx = jiffies; 272 xpnet_device->last_rx = jiffies;
280 priv->stats.rx_packets++; 273 priv->stats.rx_packets++;
281 priv->stats.rx_bytes += skb->len + ETH_HLEN; 274 priv->stats.rx_bytes += skb->len + ETH_HLEN;
282 275
283 netif_rx_ni(skb); 276 netif_rx_ni(skb);
284 xpc_received(partid, channel, (void *) msg); 277 xpc_received(partid, channel, (void *)msg);
285} 278}
286 279
287
288/* 280/*
289 * This is the handler which XPC calls during any sort of change in 281 * This is the handler which XPC calls during any sort of change in
290 * state or message reception on a connection. 282 * state or message reception on a connection.
@@ -295,20 +287,19 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
295{ 287{
296 long bp; 288 long bp;
297 289
298
299 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 290 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
300 DBUG_ON(channel != XPC_NET_CHANNEL); 291 DBUG_ON(channel != XPC_NET_CHANNEL);
301 292
302 switch(reason) { 293 switch (reason) {
303 case xpcMsgReceived: /* message received */ 294 case xpcMsgReceived: /* message received */
304 DBUG_ON(data == NULL); 295 DBUG_ON(data == NULL);
305 296
306 xpnet_receive(partid, channel, (struct xpnet_message *) data); 297 xpnet_receive(partid, channel, (struct xpnet_message *)data);
307 break; 298 break;
308 299
309 case xpcConnected: /* connection completed to a partition */ 300 case xpcConnected: /* connection completed to a partition */
310 spin_lock_bh(&xpnet_broadcast_lock); 301 spin_lock_bh(&xpnet_broadcast_lock);
311 xpnet_broadcast_partitions |= 1UL << (partid -1 ); 302 xpnet_broadcast_partitions |= 1UL << (partid - 1);
312 bp = xpnet_broadcast_partitions; 303 bp = xpnet_broadcast_partitions;
313 spin_unlock_bh(&xpnet_broadcast_lock); 304 spin_unlock_bh(&xpnet_broadcast_lock);
314 305
@@ -321,13 +312,12 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
321 312
322 default: 313 default:
323 spin_lock_bh(&xpnet_broadcast_lock); 314 spin_lock_bh(&xpnet_broadcast_lock);
324 xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); 315 xpnet_broadcast_partitions &= ~(1UL << (partid - 1));
325 bp = xpnet_broadcast_partitions; 316 bp = xpnet_broadcast_partitions;
326 spin_unlock_bh(&xpnet_broadcast_lock); 317 spin_unlock_bh(&xpnet_broadcast_lock);
327 318
328 if (bp == 0) { 319 if (bp == 0)
329 netif_carrier_off(xpnet_device); 320 netif_carrier_off(xpnet_device);
330 }
331 321
332 dev_dbg(xpnet, "%s disconnected from partition %d; " 322 dev_dbg(xpnet, "%s disconnected from partition %d; "
333 "xpnet_broadcast_partitions=0x%lx\n", 323 "xpnet_broadcast_partitions=0x%lx\n",
@@ -337,13 +327,11 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
337 } 327 }
338} 328}
339 329
340
341static int 330static int
342xpnet_dev_open(struct net_device *dev) 331xpnet_dev_open(struct net_device *dev)
343{ 332{
344 enum xpc_retval ret; 333 enum xpc_retval ret;
345 334
346
347 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " 335 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
348 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, 336 "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
349 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, 337 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
@@ -364,7 +352,6 @@ xpnet_dev_open(struct net_device *dev)
364 return 0; 352 return 0;
365} 353}
366 354
367
368static int 355static int
369xpnet_dev_stop(struct net_device *dev) 356xpnet_dev_stop(struct net_device *dev)
370{ 357{
@@ -375,7 +362,6 @@ xpnet_dev_stop(struct net_device *dev)
375 return 0; 362 return 0;
376} 363}
377 364
378
379static int 365static int
380xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) 366xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
381{ 367{
@@ -392,7 +378,6 @@ xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
392 return 0; 378 return 0;
393} 379}
394 380
395
396/* 381/*
397 * Required for the net_device structure. 382 * Required for the net_device structure.
398 */ 383 */
@@ -402,7 +387,6 @@ xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
402 return 0; 387 return 0;
403} 388}
404 389
405
406/* 390/*
407 * Return statistics to the caller. 391 * Return statistics to the caller.
408 */ 392 */
@@ -411,13 +395,11 @@ xpnet_dev_get_stats(struct net_device *dev)
411{ 395{
412 struct xpnet_dev_private *priv; 396 struct xpnet_dev_private *priv;
413 397
414 398 priv = (struct xpnet_dev_private *)dev->priv;
415 priv = (struct xpnet_dev_private *) dev->priv;
416 399
417 return &priv->stats; 400 return &priv->stats;
418} 401}
419 402
420
421/* 403/*
422 * Notification that the other end has received the message and 404 * Notification that the other end has received the message and
423 * DMA'd the skb information. At this point, they are done with 405 * DMA'd the skb information. At this point, they are done with
@@ -426,11 +408,9 @@ xpnet_dev_get_stats(struct net_device *dev)
426 */ 408 */
427static void 409static void
428xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, 410xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
429 void *__qm) 411 void *__qm)
430{ 412{
431 struct xpnet_pending_msg *queued_msg = 413 struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
432 (struct xpnet_pending_msg *) __qm;
433
434 414
435 DBUG_ON(queued_msg == NULL); 415 DBUG_ON(queued_msg == NULL);
436 416
@@ -439,14 +419,13 @@ xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
439 419
440 if (atomic_dec_return(&queued_msg->use_count) == 0) { 420 if (atomic_dec_return(&queued_msg->use_count) == 0) {
441 dev_dbg(xpnet, "all acks for skb->head=-x%p\n", 421 dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
442 (void *) queued_msg->skb->head); 422 (void *)queued_msg->skb->head);
443 423
444 dev_kfree_skb_any(queued_msg->skb); 424 dev_kfree_skb_any(queued_msg->skb);
445 kfree(queued_msg); 425 kfree(queued_msg);
446 } 426 }
447} 427}
448 428
449
450/* 429/*
451 * Network layer has formatted a packet (skb) and is ready to place it 430 * Network layer has formatted a packet (skb) and is ready to place it
452 * "on the wire". Prepare and send an xpnet_message to all partitions 431 * "on the wire". Prepare and send an xpnet_message to all partitions
@@ -469,16 +448,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
469 struct xpnet_dev_private *priv; 448 struct xpnet_dev_private *priv;
470 u16 embedded_bytes; 449 u16 embedded_bytes;
471 450
472 451 priv = (struct xpnet_dev_private *)dev->priv;
473 priv = (struct xpnet_dev_private *) dev->priv;
474
475 452
476 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 453 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
477 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 454 "skb->end=0x%p skb->len=%d\n", (void *)skb->head,
478 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), 455 (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
479 skb->len); 456 skb->len);
480 457
481
482 /* 458 /*
483 * The xpnet_pending_msg tracks how many outstanding 459 * The xpnet_pending_msg tracks how many outstanding
484 * xpc_send_notifies are relying on this skb. When none 460 * xpc_send_notifies are relying on this skb. When none
@@ -487,16 +463,15 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
487 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); 463 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
488 if (queued_msg == NULL) { 464 if (queued_msg == NULL) {
489 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " 465 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
490 "packet\n", sizeof(struct xpnet_pending_msg)); 466 "packet\n", sizeof(struct xpnet_pending_msg));
491 467
492 priv->stats.tx_errors++; 468 priv->stats.tx_errors++;
493 469
494 return -ENOMEM; 470 return -ENOMEM;
495 } 471 }
496 472
497
498 /* get the beginning of the first cacheline and end of last */ 473 /* get the beginning of the first cacheline and end of last */
499 start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); 474 start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
500 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); 475 end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
501 476
502 /* calculate how many bytes to embed in the XPC message */ 477 /* calculate how many bytes to embed in the XPC message */
@@ -506,7 +481,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
506 embedded_bytes = skb->len; 481 embedded_bytes = skb->len;
507 } 482 }
508 483
509
510 /* 484 /*
511 * Since the send occurs asynchronously, we set the count to one 485 * Since the send occurs asynchronously, we set the count to one
512 * and begin sending. Any sends that happen to complete before 486 * and begin sending. Any sends that happen to complete before
@@ -517,14 +491,13 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
517 atomic_set(&queued_msg->use_count, 1); 491 atomic_set(&queued_msg->use_count, 1);
518 queued_msg->skb = skb; 492 queued_msg->skb = skb;
519 493
520
521 second_mac_octet = skb->data[XPNET_PARTID_OCTET]; 494 second_mac_octet = skb->data[XPNET_PARTID_OCTET];
522 if (second_mac_octet == 0xff) { 495 if (second_mac_octet == 0xff) {
523 /* we are being asked to broadcast to all partitions */ 496 /* we are being asked to broadcast to all partitions */
524 dp = xpnet_broadcast_partitions; 497 dp = xpnet_broadcast_partitions;
525 } else if (second_mac_octet != 0) { 498 } else if (second_mac_octet != 0) {
526 dp = xpnet_broadcast_partitions & 499 dp = xpnet_broadcast_partitions &
527 (1UL << (second_mac_octet - 1)); 500 (1UL << (second_mac_octet - 1));
528 } else { 501 } else {
529 /* 0 is an invalid partid. Ignore */ 502 /* 0 is an invalid partid. Ignore */
530 dp = 0; 503 dp = 0;
@@ -543,7 +516,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; 516 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
544 dest_partid++) { 517 dest_partid++) {
545 518
546
547 if (!(dp & (1UL << (dest_partid - 1)))) { 519 if (!(dp & (1UL << (dest_partid - 1)))) {
548 /* not destined for this partition */ 520 /* not destined for this partition */
549 continue; 521 continue;
@@ -552,20 +524,18 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
552 /* remove this partition from the destinations mask */ 524 /* remove this partition from the destinations mask */
553 dp &= ~(1UL << (dest_partid - 1)); 525 dp &= ~(1UL << (dest_partid - 1));
554 526
555
556 /* found a partition to send to */ 527 /* found a partition to send to */
557 528
558 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, 529 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
559 XPC_NOWAIT, (void **)&msg); 530 XPC_NOWAIT, (void **)&msg);
560 if (unlikely(ret != xpcSuccess)) { 531 if (unlikely(ret != xpcSuccess))
561 continue; 532 continue;
562 }
563 533
564 msg->embedded_bytes = embedded_bytes; 534 msg->embedded_bytes = embedded_bytes;
565 if (unlikely(embedded_bytes != 0)) { 535 if (unlikely(embedded_bytes != 0)) {
566 msg->version = XPNET_VERSION_EMBED; 536 msg->version = XPNET_VERSION_EMBED;
567 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", 537 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
568 &msg->data, skb->data, (size_t) embedded_bytes); 538 &msg->data, skb->data, (size_t)embedded_bytes);
569 skb_copy_from_linear_data(skb, &msg->data, 539 skb_copy_from_linear_data(skb, &msg->data,
570 (size_t)embedded_bytes); 540 (size_t)embedded_bytes);
571 } else { 541 } else {
@@ -573,7 +543,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
573 } 543 }
574 msg->magic = XPNET_MAGIC; 544 msg->magic = XPNET_MAGIC;
575 msg->size = end_addr - start_addr; 545 msg->size = end_addr - start_addr;
576 msg->leadin_ignore = (u64) skb->data - start_addr; 546 msg->leadin_ignore = (u64)skb->data - start_addr;
577 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); 547 msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
578 msg->buf_pa = __pa(start_addr); 548 msg->buf_pa = __pa(start_addr);
579 549
@@ -583,7 +553,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, 553 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
584 msg->leadin_ignore, msg->tailout_ignore); 554 msg->leadin_ignore, msg->tailout_ignore);
585 555
586
587 atomic_inc(&queued_msg->use_count); 556 atomic_inc(&queued_msg->use_count);
588 557
589 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, 558 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
@@ -592,14 +561,12 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
592 atomic_dec(&queued_msg->use_count); 561 atomic_dec(&queued_msg->use_count);
593 continue; 562 continue;
594 } 563 }
595
596 } 564 }
597 565
598 if (atomic_dec_return(&queued_msg->use_count) == 0) { 566 if (atomic_dec_return(&queued_msg->use_count) == 0) {
599 dev_dbg(xpnet, "no partitions to receive packet destined for " 567 dev_dbg(xpnet, "no partitions to receive packet destined for "
600 "%d\n", dest_partid); 568 "%d\n", dest_partid);
601 569
602
603 dev_kfree_skb(skb); 570 dev_kfree_skb(skb);
604 kfree(queued_msg); 571 kfree(queued_msg);
605 } 572 }
@@ -610,23 +577,20 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
610 return 0; 577 return 0;
611} 578}
612 579
613
614/* 580/*
615 * Deal with transmit timeouts coming from the network layer. 581 * Deal with transmit timeouts coming from the network layer.
616 */ 582 */
617static void 583static void
618xpnet_dev_tx_timeout (struct net_device *dev) 584xpnet_dev_tx_timeout(struct net_device *dev)
619{ 585{
620 struct xpnet_dev_private *priv; 586 struct xpnet_dev_private *priv;
621 587
622 588 priv = (struct xpnet_dev_private *)dev->priv;
623 priv = (struct xpnet_dev_private *) dev->priv;
624 589
625 priv->stats.tx_errors++; 590 priv->stats.tx_errors++;
626 return; 591 return;
627} 592}
628 593
629
630static int __init 594static int __init
631xpnet_init(void) 595xpnet_init(void)
632{ 596{
@@ -634,10 +598,8 @@ xpnet_init(void)
634 u32 license_num; 598 u32 license_num;
635 int result = -ENOMEM; 599 int result = -ENOMEM;
636 600
637 601 if (!ia64_platform_is("sn2"))
638 if (!ia64_platform_is("sn2")) {
639 return -ENODEV; 602 return -ENODEV;
640 }
641 603
642 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); 604 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
643 605
@@ -647,9 +609,8 @@ xpnet_init(void)
647 */ 609 */
648 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), 610 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
649 XPNET_DEVICE_NAME, ether_setup); 611 XPNET_DEVICE_NAME, ether_setup);
650 if (xpnet_device == NULL) { 612 if (xpnet_device == NULL)
651 return -ENOMEM; 613 return -ENOMEM;
652 }
653 614
654 netif_carrier_off(xpnet_device); 615 netif_carrier_off(xpnet_device);
655 616
@@ -672,7 +633,7 @@ xpnet_init(void)
672 license_num = sn_partition_serial_number_val(); 633 license_num = sn_partition_serial_number_val();
673 for (i = 3; i >= 0; i--) { 634 for (i = 3; i >= 0; i--) {
674 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = 635 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
675 license_num & 0xff; 636 license_num & 0xff;
676 license_num = license_num >> 8; 637 license_num = license_num >> 8;
677 } 638 }
678 639
@@ -690,29 +651,27 @@ xpnet_init(void)
690 xpnet_device->features = NETIF_F_NO_CSUM; 651 xpnet_device->features = NETIF_F_NO_CSUM;
691 652
692 result = register_netdev(xpnet_device); 653 result = register_netdev(xpnet_device);
693 if (result != 0) { 654 if (result != 0)
694 free_netdev(xpnet_device); 655 free_netdev(xpnet_device);
695 }
696 656
697 return result; 657 return result;
698} 658}
699module_init(xpnet_init);
700 659
660module_init(xpnet_init);
701 661
702static void __exit 662static void __exit
703xpnet_exit(void) 663xpnet_exit(void)
704{ 664{
705 dev_info(xpnet, "unregistering network device %s\n", 665 dev_info(xpnet, "unregistering network device %s\n",
706 xpnet_device[0].name); 666 xpnet_device[0].name);
707 667
708 unregister_netdev(xpnet_device); 668 unregister_netdev(xpnet_device);
709 669
710 free_netdev(xpnet_device); 670 free_netdev(xpnet_device);
711} 671}
712module_exit(xpnet_exit);
713 672
673module_exit(xpnet_exit);
714 674
715MODULE_AUTHOR("Silicon Graphics, Inc."); 675MODULE_AUTHOR("Silicon Graphics, Inc.");
716MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); 676MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
717MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
718
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index e04bf9926441..0b94833e23f7 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -1083,15 +1083,12 @@ static void start_timer(struct scc_priv *priv, int t, int r15)
1083 if (t == 0) { 1083 if (t == 0) {
1084 tm_isr(priv); 1084 tm_isr(priv);
1085 } else if (t > 0) { 1085 } else if (t > 0) {
1086 save_flags(flags);
1087 cli();
1088 outb(t & 0xFF, priv->tmr_cnt); 1086 outb(t & 0xFF, priv->tmr_cnt);
1089 outb((t >> 8) & 0xFF, priv->tmr_cnt); 1087 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1090 if (priv->type != TYPE_TWIN) { 1088 if (priv->type != TYPE_TWIN) {
1091 write_scc(priv, R15, r15 | CTSIE); 1089 write_scc(priv, R15, r15 | CTSIE);
1092 priv->rr0 |= CTS; 1090 priv->rr0 |= CTS;
1093 } 1091 }
1094 restore_flags(flags);
1095 } 1092 }
1096} 1093}
1097 1094
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index f844b738d34e..c4e631d14bfe 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -49,7 +49,9 @@ config IWL4965_HT
49 49
50config IWL4965_LEDS 50config IWL4965_LEDS
51 bool "Enable LEDS features in iwl4965 driver" 51 bool "Enable LEDS features in iwl4965 driver"
52 depends on IWL4965 && MAC80211_LEDS && LEDS_CLASS 52 depends on IWL4965
53 select MAC80211_LEDS
54 select LEDS_CLASS
53 select IWLWIFI_LEDS 55 select IWLWIFI_LEDS
54 ---help--- 56 ---help---
55 This option enables LEDS for the iwlwifi drivers 57 This option enables LEDS for the iwlwifi drivers
@@ -134,7 +136,9 @@ config IWL3945_SPECTRUM_MEASUREMENT
134 136
135config IWL3945_LEDS 137config IWL3945_LEDS
136 bool "Enable LEDS features in iwl3945 driver" 138 bool "Enable LEDS features in iwl3945 driver"
137 depends on IWL3945 && MAC80211_LEDS && LEDS_CLASS 139 depends on IWL3945
140 select MAC80211_LEDS
141 select LEDS_CLASS
138 ---help--- 142 ---help---
139 This option enables LEDS for the iwl3945 driver. 143 This option enables LEDS for the iwl3945 driver.
140 144
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 4f3e88b12e3a..ec6187b75c3b 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_IWLCORE) := iwlcore.o 1obj-$(CONFIG_IWLCORE) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o
3iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 3iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
4iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 4iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index a1e3938cba9b..ab1029e79884 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -60,7 +60,8 @@ config RT2400PCI_RFKILL
60 60
61config RT2400PCI_LEDS 61config RT2400PCI_LEDS
62 bool "RT2400 leds support" 62 bool "RT2400 leds support"
63 depends on RT2400PCI && LEDS_CLASS 63 depends on RT2400PCI
64 select LEDS_CLASS
64 select RT2X00_LIB_LEDS 65 select RT2X00_LIB_LEDS
65 ---help--- 66 ---help---
66 This adds support for led triggers provided my mac80211. 67 This adds support for led triggers provided my mac80211.
@@ -86,7 +87,8 @@ config RT2500PCI_RFKILL
86 87
87config RT2500PCI_LEDS 88config RT2500PCI_LEDS
88 bool "RT2500 leds support" 89 bool "RT2500 leds support"
89 depends on RT2500PCI && LEDS_CLASS 90 depends on RT2500PCI
91 select LEDS_CLASS
90 select RT2X00_LIB_LEDS 92 select RT2X00_LIB_LEDS
91 ---help--- 93 ---help---
92 This adds support for led triggers provided my mac80211. 94 This adds support for led triggers provided my mac80211.
@@ -114,7 +116,8 @@ config RT61PCI_RFKILL
114 116
115config RT61PCI_LEDS 117config RT61PCI_LEDS
116 bool "RT61 leds support" 118 bool "RT61 leds support"
117 depends on RT61PCI && LEDS_CLASS 119 depends on RT61PCI
120 select LEDS_CLASS
118 select RT2X00_LIB_LEDS 121 select RT2X00_LIB_LEDS
119 ---help--- 122 ---help---
120 This adds support for led triggers provided my mac80211. 123 This adds support for led triggers provided my mac80211.
@@ -130,7 +133,8 @@ config RT2500USB
130 133
131config RT2500USB_LEDS 134config RT2500USB_LEDS
132 bool "RT2500 leds support" 135 bool "RT2500 leds support"
133 depends on RT2500USB && LEDS_CLASS 136 depends on RT2500USB
137 select LEDS_CLASS
134 select RT2X00_LIB_LEDS 138 select RT2X00_LIB_LEDS
135 ---help--- 139 ---help---
136 This adds support for led triggers provided my mac80211. 140 This adds support for led triggers provided my mac80211.
@@ -148,7 +152,8 @@ config RT73USB
148 152
149config RT73USB_LEDS 153config RT73USB_LEDS
150 bool "RT73 leds support" 154 bool "RT73 leds support"
151 depends on RT73USB && LEDS_CLASS 155 depends on RT73USB
156 select LEDS_CLASS
152 select RT2X00_LIB_LEDS 157 select RT2X00_LIB_LEDS
153 ---help--- 158 ---help---
154 This adds support for led triggers provided my mac80211. 159 This adds support for led triggers provided my mac80211.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f9b7bdd27829..8ddb918f5f57 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -416,13 +416,13 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
416 * Reserve some resources for CardBus. We reserve 416 * Reserve some resources for CardBus. We reserve
417 * a fixed amount of bus space for CardBus bridges. 417 * a fixed amount of bus space for CardBus bridges.
418 */ 418 */
419 b_res[0].start = pci_cardbus_io_size; 419 b_res[0].start = 0;
420 b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; 420 b_res[0].end = pci_cardbus_io_size - 1;
421 b_res[0].flags |= IORESOURCE_IO; 421 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
422 422
423 b_res[1].start = pci_cardbus_io_size; 423 b_res[1].start = 0;
424 b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1; 424 b_res[1].end = pci_cardbus_io_size - 1;
425 b_res[1].flags |= IORESOURCE_IO; 425 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
426 426
427 /* 427 /*
428 * Check whether prefetchable memory is supported 428 * Check whether prefetchable memory is supported
@@ -441,17 +441,17 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
441 * twice the size. 441 * twice the size.
442 */ 442 */
443 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { 443 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
444 b_res[2].start = pci_cardbus_mem_size; 444 b_res[2].start = 0;
445 b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1; 445 b_res[2].end = pci_cardbus_mem_size - 1;
446 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 446 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
447 447
448 b_res[3].start = pci_cardbus_mem_size; 448 b_res[3].start = 0;
449 b_res[3].end = b_res[3].start + pci_cardbus_mem_size - 1; 449 b_res[3].end = pci_cardbus_mem_size - 1;
450 b_res[3].flags |= IORESOURCE_MEM; 450 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
451 } else { 451 } else {
452 b_res[3].start = pci_cardbus_mem_size * 2; 452 b_res[3].start = 0;
453 b_res[3].end = b_res[3].start + pci_cardbus_mem_size * 2 - 1; 453 b_res[3].end = pci_cardbus_mem_size * 2 - 1;
454 b_res[3].flags |= IORESOURCE_MEM; 454 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
455 } 455 }
456} 456}
457 457
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ed8c06904807..8d8852651fd2 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -200,7 +200,6 @@ config PCMCIA_AU1X00
200config PCMCIA_SA1100 200config PCMCIA_SA1100
201 tristate "SA1100 support" 201 tristate "SA1100 support"
202 depends on ARM && ARCH_SA1100 && PCMCIA 202 depends on ARM && ARCH_SA1100 && PCMCIA
203 depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE
204 help 203 help
205 Say Y here to include support for SA11x0-based PCMCIA or CF 204 Say Y here to include support for SA11x0-based PCMCIA or CF
206 sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/ 205 sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/
@@ -221,6 +220,7 @@ config PCMCIA_SA1111
221config PCMCIA_PXA2XX 220config PCMCIA_PXA2XX
222 tristate "PXA2xx support" 221 tristate "PXA2xx support"
223 depends on ARM && ARCH_PXA && PCMCIA 222 depends on ARM && ARCH_PXA && PCMCIA
223 depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE
224 help 224 help
225 Say Y here to include support for the PXA2xx PCMCIA controller 225 Say Y here to include support for the PXA2xx PCMCIA controller
226 226
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 2dcd1960aca8..98cbc9f18eed 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -84,10 +84,12 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
84 while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) && 84 while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) &&
85 i < PNP_MAX_IRQ) 85 i < PNP_MAX_IRQ)
86 i++; 86 i++;
87 if (i >= PNP_MAX_IRQ && !warned) { 87 if (i >= PNP_MAX_IRQ) {
88 printk(KERN_WARNING "pnpacpi: exceeded the max number of IRQ " 88 if (!warned) {
89 "resources: %d \n", PNP_MAX_IRQ); 89 printk(KERN_WARNING "pnpacpi: exceeded the max number"
90 warned = 1; 90 " of IRQ resources: %d\n", PNP_MAX_IRQ);
91 warned = 1;
92 }
91 return; 93 return;
92 } 94 }
93 /* 95 /*
diff --git a/fs/Kconfig b/fs/Kconfig
index 028ae38ecc52..8b18a8758677 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -689,6 +689,7 @@ config ZISOFS
689 689
690config UDF_FS 690config UDF_FS
691 tristate "UDF file system support" 691 tristate "UDF file system support"
692 select CRC_ITU_T
692 help 693 help
693 This is the new file system used on some CD-ROMs and DVDs. Say Y if 694 This is the new file system used on some CD-ROMs and DVDs. Say Y if
694 you intend to mount DVD discs or CDRW's written in packet mode, or 695 you intend to mount DVD discs or CDRW's written in packet mode, or
diff --git a/fs/dcache.c b/fs/dcache.c
index 43455776711e..3ee588d5f585 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1746,12 +1746,21 @@ shouldnt_be_hashed:
1746 goto shouldnt_be_hashed; 1746 goto shouldnt_be_hashed;
1747} 1747}
1748 1748
1749static int prepend(char **buffer, int *buflen, const char *str,
1750 int namelen)
1751{
1752 *buflen -= namelen;
1753 if (*buflen < 0)
1754 return -ENAMETOOLONG;
1755 *buffer -= namelen;
1756 memcpy(*buffer, str, namelen);
1757 return 0;
1758}
1759
1749/** 1760/**
1750 * d_path - return the path of a dentry 1761 * d_path - return the path of a dentry
1751 * @dentry: dentry to report 1762 * @path: the dentry/vfsmount to report
1752 * @vfsmnt: vfsmnt to which the dentry belongs 1763 * @root: root vfsmnt/dentry (may be modified by this function)
1753 * @root: root dentry
1754 * @rootmnt: vfsmnt to which the root dentry belongs
1755 * @buffer: buffer to return value in 1764 * @buffer: buffer to return value in
1756 * @buflen: buffer length 1765 * @buflen: buffer length
1757 * 1766 *
@@ -1761,23 +1770,22 @@ shouldnt_be_hashed:
1761 * Returns the buffer or an error code if the path was too long. 1770 * Returns the buffer or an error code if the path was too long.
1762 * 1771 *
1763 * "buflen" should be positive. Caller holds the dcache_lock. 1772 * "buflen" should be positive. Caller holds the dcache_lock.
1773 *
1774 * If path is not reachable from the supplied root, then the value of
1775 * root is changed (without modifying refcounts).
1764 */ 1776 */
1765static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt, 1777char *__d_path(const struct path *path, struct path *root,
1766 struct path *root, char *buffer, int buflen) 1778 char *buffer, int buflen)
1767{ 1779{
1780 struct dentry *dentry = path->dentry;
1781 struct vfsmount *vfsmnt = path->mnt;
1768 char * end = buffer+buflen; 1782 char * end = buffer+buflen;
1769 char * retval; 1783 char * retval;
1770 int namelen; 1784
1771 1785 prepend(&end, &buflen, "\0", 1);
1772 *--end = '\0'; 1786 if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
1773 buflen--; 1787 (prepend(&end, &buflen, " (deleted)", 10) != 0))
1774 if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
1775 buflen -= 10;
1776 end -= 10;
1777 if (buflen < 0)
1778 goto Elong; 1788 goto Elong;
1779 memcpy(end, " (deleted)", 10);
1780 }
1781 1789
1782 if (buflen < 1) 1790 if (buflen < 1)
1783 goto Elong; 1791 goto Elong;
@@ -1804,13 +1812,10 @@ static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1804 } 1812 }
1805 parent = dentry->d_parent; 1813 parent = dentry->d_parent;
1806 prefetch(parent); 1814 prefetch(parent);
1807 namelen = dentry->d_name.len; 1815 if ((prepend(&end, &buflen, dentry->d_name.name,
1808 buflen -= namelen + 1; 1816 dentry->d_name.len) != 0) ||
1809 if (buflen < 0) 1817 (prepend(&end, &buflen, "/", 1) != 0))
1810 goto Elong; 1818 goto Elong;
1811 end -= namelen;
1812 memcpy(end, dentry->d_name.name, namelen);
1813 *--end = '/';
1814 retval = end; 1819 retval = end;
1815 dentry = parent; 1820 dentry = parent;
1816 } 1821 }
@@ -1818,12 +1823,12 @@ static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
1818 return retval; 1823 return retval;
1819 1824
1820global_root: 1825global_root:
1821 namelen = dentry->d_name.len; 1826 retval += 1; /* hit the slash */
1822 buflen -= namelen; 1827 if (prepend(&retval, &buflen, dentry->d_name.name,
1823 if (buflen < 0) 1828 dentry->d_name.len) != 0)
1824 goto Elong; 1829 goto Elong;
1825 retval -= namelen-1; /* hit the slash */ 1830 root->mnt = vfsmnt;
1826 memcpy(retval, dentry->d_name.name, namelen); 1831 root->dentry = dentry;
1827 return retval; 1832 return retval;
1828Elong: 1833Elong:
1829 return ERR_PTR(-ENAMETOOLONG); 1834 return ERR_PTR(-ENAMETOOLONG);
@@ -1846,6 +1851,7 @@ char *d_path(struct path *path, char *buf, int buflen)
1846{ 1851{
1847 char *res; 1852 char *res;
1848 struct path root; 1853 struct path root;
1854 struct path tmp;
1849 1855
1850 /* 1856 /*
1851 * We have various synthetic filesystems that never get mounted. On 1857 * We have various synthetic filesystems that never get mounted. On
@@ -1859,10 +1865,11 @@ char *d_path(struct path *path, char *buf, int buflen)
1859 1865
1860 read_lock(&current->fs->lock); 1866 read_lock(&current->fs->lock);
1861 root = current->fs->root; 1867 root = current->fs->root;
1862 path_get(&current->fs->root); 1868 path_get(&root);
1863 read_unlock(&current->fs->lock); 1869 read_unlock(&current->fs->lock);
1864 spin_lock(&dcache_lock); 1870 spin_lock(&dcache_lock);
1865 res = __d_path(path->dentry, path->mnt, &root, buf, buflen); 1871 tmp = root;
1872 res = __d_path(path, &tmp, buf, buflen);
1866 spin_unlock(&dcache_lock); 1873 spin_unlock(&dcache_lock);
1867 path_put(&root); 1874 path_put(&root);
1868 return res; 1875 return res;
@@ -1890,6 +1897,48 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
1890} 1897}
1891 1898
1892/* 1899/*
1900 * Write full pathname from the root of the filesystem into the buffer.
1901 */
1902char *dentry_path(struct dentry *dentry, char *buf, int buflen)
1903{
1904 char *end = buf + buflen;
1905 char *retval;
1906
1907 spin_lock(&dcache_lock);
1908 prepend(&end, &buflen, "\0", 1);
1909 if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
1910 (prepend(&end, &buflen, "//deleted", 9) != 0))
1911 goto Elong;
1912 if (buflen < 1)
1913 goto Elong;
1914 /* Get '/' right */
1915 retval = end-1;
1916 *retval = '/';
1917
1918 for (;;) {
1919 struct dentry *parent;
1920 if (IS_ROOT(dentry))
1921 break;
1922
1923 parent = dentry->d_parent;
1924 prefetch(parent);
1925
1926 if ((prepend(&end, &buflen, dentry->d_name.name,
1927 dentry->d_name.len) != 0) ||
1928 (prepend(&end, &buflen, "/", 1) != 0))
1929 goto Elong;
1930
1931 retval = end;
1932 dentry = parent;
1933 }
1934 spin_unlock(&dcache_lock);
1935 return retval;
1936Elong:
1937 spin_unlock(&dcache_lock);
1938 return ERR_PTR(-ENAMETOOLONG);
1939}
1940
1941/*
1893 * NOTE! The user-level library version returns a 1942 * NOTE! The user-level library version returns a
1894 * character pointer. The kernel system call just 1943 * character pointer. The kernel system call just
1895 * returns the length of the buffer filled (which 1944 * returns the length of the buffer filled (which
@@ -1918,9 +1967,9 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1918 1967
1919 read_lock(&current->fs->lock); 1968 read_lock(&current->fs->lock);
1920 pwd = current->fs->pwd; 1969 pwd = current->fs->pwd;
1921 path_get(&current->fs->pwd); 1970 path_get(&pwd);
1922 root = current->fs->root; 1971 root = current->fs->root;
1923 path_get(&current->fs->root); 1972 path_get(&root);
1924 read_unlock(&current->fs->lock); 1973 read_unlock(&current->fs->lock);
1925 1974
1926 error = -ENOENT; 1975 error = -ENOENT;
@@ -1928,9 +1977,10 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
1928 spin_lock(&dcache_lock); 1977 spin_lock(&dcache_lock);
1929 if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) { 1978 if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
1930 unsigned long len; 1979 unsigned long len;
1980 struct path tmp = root;
1931 char * cwd; 1981 char * cwd;
1932 1982
1933 cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE); 1983 cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
1934 spin_unlock(&dcache_lock); 1984 spin_unlock(&dcache_lock);
1935 1985
1936 error = PTR_ERR(cwd); 1986 error = PTR_ERR(cwd);
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index d248e60951ba..ca1c9124c8ce 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -10,6 +10,7 @@ dlm-y := ast.o \
10 midcomms.o \ 10 midcomms.o \
11 netlink.o \ 11 netlink.o \
12 lowcomms.o \ 12 lowcomms.o \
13 plock.o \
13 rcom.o \ 14 rcom.o \
14 recover.o \ 15 recover.o \
15 recoverd.o \ 16 recoverd.o \
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index c3ad1dff3b25..eac23bd288b2 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -114,7 +114,7 @@ struct cluster_attribute {
114}; 114};
115 115
116static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field, 116static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
117 unsigned int *info_field, int check_zero, 117 int *info_field, int check_zero,
118 const char *buf, size_t len) 118 const char *buf, size_t len)
119{ 119{
120 unsigned int x; 120 unsigned int x;
@@ -284,6 +284,7 @@ struct node {
284 struct list_head list; /* space->members */ 284 struct list_head list; /* space->members */
285 int nodeid; 285 int nodeid;
286 int weight; 286 int weight;
287 int new;
287}; 288};
288 289
289static struct configfs_group_operations clusters_ops = { 290static struct configfs_group_operations clusters_ops = {
@@ -565,6 +566,7 @@ static struct config_item *make_node(struct config_group *g, const char *name)
565 config_item_init_type_name(&nd->item, name, &node_type); 566 config_item_init_type_name(&nd->item, name, &node_type);
566 nd->nodeid = -1; 567 nd->nodeid = -1;
567 nd->weight = 1; /* default weight of 1 if none is set */ 568 nd->weight = 1; /* default weight of 1 if none is set */
569 nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */
568 570
569 mutex_lock(&sp->members_lock); 571 mutex_lock(&sp->members_lock);
570 list_add(&nd->list, &sp->members); 572 list_add(&nd->list, &sp->members);
@@ -805,12 +807,13 @@ static void put_comm(struct comm *cm)
805} 807}
806 808
807/* caller must free mem */ 809/* caller must free mem */
808int dlm_nodeid_list(char *lsname, int **ids_out) 810int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
811 int **new_out, int *new_count_out)
809{ 812{
810 struct space *sp; 813 struct space *sp;
811 struct node *nd; 814 struct node *nd;
812 int i = 0, rv = 0; 815 int i = 0, rv = 0, ids_count = 0, new_count = 0;
813 int *ids; 816 int *ids, *new;
814 817
815 sp = get_space(lsname); 818 sp = get_space(lsname);
816 if (!sp) 819 if (!sp)
@@ -818,23 +821,50 @@ int dlm_nodeid_list(char *lsname, int **ids_out)
818 821
819 mutex_lock(&sp->members_lock); 822 mutex_lock(&sp->members_lock);
820 if (!sp->members_count) { 823 if (!sp->members_count) {
821 rv = 0; 824 rv = -EINVAL;
825 printk(KERN_ERR "dlm: zero members_count\n");
822 goto out; 826 goto out;
823 } 827 }
824 828
825 ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL); 829 ids_count = sp->members_count;
830
831 ids = kcalloc(ids_count, sizeof(int), GFP_KERNEL);
826 if (!ids) { 832 if (!ids) {
827 rv = -ENOMEM; 833 rv = -ENOMEM;
828 goto out; 834 goto out;
829 } 835 }
830 836
831 rv = sp->members_count; 837 list_for_each_entry(nd, &sp->members, list) {
832 list_for_each_entry(nd, &sp->members, list)
833 ids[i++] = nd->nodeid; 838 ids[i++] = nd->nodeid;
839 if (nd->new)
840 new_count++;
841 }
842
843 if (ids_count != i)
844 printk(KERN_ERR "dlm: bad nodeid count %d %d\n", ids_count, i);
845
846 if (!new_count)
847 goto out_ids;
848
849 new = kcalloc(new_count, sizeof(int), GFP_KERNEL);
850 if (!new) {
851 kfree(ids);
852 rv = -ENOMEM;
853 goto out;
854 }
834 855
835 if (rv != i) 856 i = 0;
836 printk("bad nodeid count %d %d\n", rv, i); 857 list_for_each_entry(nd, &sp->members, list) {
858 if (nd->new) {
859 new[i++] = nd->nodeid;
860 nd->new = 0;
861 }
862 }
863 *new_count_out = new_count;
864 *new_out = new;
837 865
866 out_ids:
867 *ids_count_out = ids_count;
838 *ids_out = ids; 868 *ids_out = ids;
839 out: 869 out:
840 mutex_unlock(&sp->members_lock); 870 mutex_unlock(&sp->members_lock);
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index a3170fe22090..4f1d6fce58c5 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -35,7 +35,8 @@ extern struct dlm_config_info dlm_config;
35int dlm_config_init(void); 35int dlm_config_init(void);
36void dlm_config_exit(void); 36void dlm_config_exit(void);
37int dlm_node_weight(char *lsname, int nodeid); 37int dlm_node_weight(char *lsname, int nodeid);
38int dlm_nodeid_list(char *lsname, int **ids_out); 38int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
39 int **new_out, int *new_count_out);
39int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr); 40int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
40int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid); 41int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
41int dlm_our_nodeid(void); 42int dlm_our_nodeid(void);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 7a8824f475f2..5a7ac33b629c 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -42,8 +42,6 @@
42#include <linux/dlm.h> 42#include <linux/dlm.h>
43#include "config.h" 43#include "config.h"
44 44
45#define DLM_LOCKSPACE_LEN 64
46
47/* Size of the temp buffer midcomms allocates on the stack. 45/* Size of the temp buffer midcomms allocates on the stack.
48 We try to make this large enough so most messages fit. 46 We try to make this large enough so most messages fit.
49 FIXME: should sctp make this unnecessary? */ 47 FIXME: should sctp make this unnecessary? */
@@ -132,8 +130,10 @@ struct dlm_member {
132 130
133struct dlm_recover { 131struct dlm_recover {
134 struct list_head list; 132 struct list_head list;
135 int *nodeids; 133 int *nodeids; /* nodeids of all members */
136 int node_count; 134 int node_count;
135 int *new; /* nodeids of new members */
136 int new_count;
137 uint64_t seq; 137 uint64_t seq;
138}; 138};
139 139
@@ -579,6 +579,8 @@ static inline int dlm_no_directory(struct dlm_ls *ls)
579int dlm_netlink_init(void); 579int dlm_netlink_init(void);
580void dlm_netlink_exit(void); 580void dlm_netlink_exit(void);
581void dlm_timeout_warn(struct dlm_lkb *lkb); 581void dlm_timeout_warn(struct dlm_lkb *lkb);
582int dlm_plock_init(void);
583void dlm_plock_exit(void);
582 584
583#ifdef CONFIG_DLM_DEBUG 585#ifdef CONFIG_DLM_DEBUG
584int dlm_register_debugfs(void); 586int dlm_register_debugfs(void);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 8f250ac8b928..2d3d1027ce2b 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -165,7 +165,7 @@ void dlm_print_lkb(struct dlm_lkb *lkb)
165 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type); 165 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
166} 166}
167 167
168void dlm_print_rsb(struct dlm_rsb *r) 168static void dlm_print_rsb(struct dlm_rsb *r)
169{ 169{
170 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n", 170 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
171 r->res_nodeid, r->res_flags, r->res_first_lkid, 171 r->res_nodeid, r->res_flags, r->res_first_lkid,
@@ -1956,8 +1956,7 @@ static void confirm_master(struct dlm_rsb *r, int error)
1956 list_del_init(&lkb->lkb_rsb_lookup); 1956 list_del_init(&lkb->lkb_rsb_lookup);
1957 r->res_first_lkid = lkb->lkb_id; 1957 r->res_first_lkid = lkb->lkb_id;
1958 _request_lock(r, lkb); 1958 _request_lock(r, lkb);
1959 } else 1959 }
1960 r->res_nodeid = -1;
1961 break; 1960 break;
1962 1961
1963 default: 1962 default:
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 05d9c82e646b..88e93c80cc22 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -13,7 +13,6 @@
13#ifndef __LOCK_DOT_H__ 13#ifndef __LOCK_DOT_H__
14#define __LOCK_DOT_H__ 14#define __LOCK_DOT_H__
15 15
16void dlm_print_rsb(struct dlm_rsb *r);
17void dlm_dump_rsb(struct dlm_rsb *r); 16void dlm_dump_rsb(struct dlm_rsb *r);
18void dlm_print_lkb(struct dlm_lkb *lkb); 17void dlm_print_lkb(struct dlm_lkb *lkb);
19void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); 18void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms);
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 58487fb95a4c..b80e0aa3cfa5 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -46,10 +46,16 @@ static int __init init_dlm(void)
46 if (error) 46 if (error)
47 goto out_user; 47 goto out_user;
48 48
49 error = dlm_plock_init();
50 if (error)
51 goto out_netlink;
52
49 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); 53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
50 54
51 return 0; 55 return 0;
52 56
57 out_netlink:
58 dlm_netlink_exit();
53 out_user: 59 out_user:
54 dlm_user_exit(); 60 dlm_user_exit();
55 out_debug: 61 out_debug:
@@ -66,6 +72,7 @@ static int __init init_dlm(void)
66 72
67static void __exit exit_dlm(void) 73static void __exit exit_dlm(void)
68{ 74{
75 dlm_plock_exit();
69 dlm_netlink_exit(); 76 dlm_netlink_exit();
70 dlm_user_exit(); 77 dlm_user_exit();
71 dlm_config_exit(); 78 dlm_config_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index fa17f5a27883..26133f05ae3a 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -210,6 +210,23 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
210 } 210 }
211 } 211 }
212 212
213 /* Add an entry to ls_nodes_gone for members that were removed and
214 then added again, so that previous state for these nodes will be
215 cleared during recovery. */
216
217 for (i = 0; i < rv->new_count; i++) {
218 if (!dlm_is_member(ls, rv->new[i]))
219 continue;
220 log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
221
222 memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
223 if (!memb)
224 return -ENOMEM;
225 memb->nodeid = rv->new[i];
226 list_add_tail(&memb->list, &ls->ls_nodes_gone);
227 neg++;
228 }
229
213 /* add new members to ls_nodes */ 230 /* add new members to ls_nodes */
214 231
215 for (i = 0; i < rv->node_count; i++) { 232 for (i = 0; i < rv->node_count; i++) {
@@ -314,15 +331,16 @@ int dlm_ls_stop(struct dlm_ls *ls)
314int dlm_ls_start(struct dlm_ls *ls) 331int dlm_ls_start(struct dlm_ls *ls)
315{ 332{
316 struct dlm_recover *rv = NULL, *rv_old; 333 struct dlm_recover *rv = NULL, *rv_old;
317 int *ids = NULL; 334 int *ids = NULL, *new = NULL;
318 int error, count; 335 int error, ids_count = 0, new_count = 0;
319 336
320 rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL); 337 rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
321 if (!rv) 338 if (!rv)
322 return -ENOMEM; 339 return -ENOMEM;
323 340
324 error = count = dlm_nodeid_list(ls->ls_name, &ids); 341 error = dlm_nodeid_list(ls->ls_name, &ids, &ids_count,
325 if (error <= 0) 342 &new, &new_count);
343 if (error < 0)
326 goto fail; 344 goto fail;
327 345
328 spin_lock(&ls->ls_recover_lock); 346 spin_lock(&ls->ls_recover_lock);
@@ -337,14 +355,19 @@ int dlm_ls_start(struct dlm_ls *ls)
337 } 355 }
338 356
339 rv->nodeids = ids; 357 rv->nodeids = ids;
340 rv->node_count = count; 358 rv->node_count = ids_count;
359 rv->new = new;
360 rv->new_count = new_count;
341 rv->seq = ++ls->ls_recover_seq; 361 rv->seq = ++ls->ls_recover_seq;
342 rv_old = ls->ls_recover_args; 362 rv_old = ls->ls_recover_args;
343 ls->ls_recover_args = rv; 363 ls->ls_recover_args = rv;
344 spin_unlock(&ls->ls_recover_lock); 364 spin_unlock(&ls->ls_recover_lock);
345 365
346 if (rv_old) { 366 if (rv_old) {
367 log_error(ls, "unused recovery %llx %d",
368 (unsigned long long)rv_old->seq, rv_old->node_count);
347 kfree(rv_old->nodeids); 369 kfree(rv_old->nodeids);
370 kfree(rv_old->new);
348 kfree(rv_old); 371 kfree(rv_old);
349 } 372 }
350 373
@@ -354,6 +377,7 @@ int dlm_ls_start(struct dlm_ls *ls)
354 fail: 377 fail:
355 kfree(rv); 378 kfree(rv);
356 kfree(ids); 379 kfree(ids);
380 kfree(new);
357 return error; 381 return error;
358} 382}
359 383
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/dlm/plock.c
index 2ebd374b3143..d6d6e370f89c 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -1,17 +1,19 @@
1/* 1/*
2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved. 2 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
3 * 3 *
4 * This copyrighted material is made available to anyone wishing to use, 4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions 5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2. 6 * of the GNU General Public License version 2.
7 */ 7 */
8 8
9#include <linux/fs.h>
9#include <linux/miscdevice.h> 10#include <linux/miscdevice.h>
10#include <linux/lock_dlm_plock.h>
11#include <linux/poll.h> 11#include <linux/poll.h>
12#include <linux/dlm.h>
13#include <linux/dlm_plock.h>
12 14
13#include "lock_dlm.h" 15#include "dlm_internal.h"
14 16#include "lockspace.h"
15 17
16static spinlock_t ops_lock; 18static spinlock_t ops_lock;
17static struct list_head send_list; 19static struct list_head send_list;
@@ -22,7 +24,7 @@ static wait_queue_head_t recv_wq;
22struct plock_op { 24struct plock_op {
23 struct list_head list; 25 struct list_head list;
24 int done; 26 int done;
25 struct gdlm_plock_info info; 27 struct dlm_plock_info info;
26}; 28};
27 29
28struct plock_xop { 30struct plock_xop {
@@ -34,22 +36,22 @@ struct plock_xop {
34}; 36};
35 37
36 38
37static inline void set_version(struct gdlm_plock_info *info) 39static inline void set_version(struct dlm_plock_info *info)
38{ 40{
39 info->version[0] = GDLM_PLOCK_VERSION_MAJOR; 41 info->version[0] = DLM_PLOCK_VERSION_MAJOR;
40 info->version[1] = GDLM_PLOCK_VERSION_MINOR; 42 info->version[1] = DLM_PLOCK_VERSION_MINOR;
41 info->version[2] = GDLM_PLOCK_VERSION_PATCH; 43 info->version[2] = DLM_PLOCK_VERSION_PATCH;
42} 44}
43 45
44static int check_version(struct gdlm_plock_info *info) 46static int check_version(struct dlm_plock_info *info)
45{ 47{
46 if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) || 48 if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
47 (GDLM_PLOCK_VERSION_MINOR < info->version[1])) { 49 (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
48 log_error("plock device version mismatch: " 50 log_print("plock device version mismatch: "
49 "kernel (%u.%u.%u), user (%u.%u.%u)", 51 "kernel (%u.%u.%u), user (%u.%u.%u)",
50 GDLM_PLOCK_VERSION_MAJOR, 52 DLM_PLOCK_VERSION_MAJOR,
51 GDLM_PLOCK_VERSION_MINOR, 53 DLM_PLOCK_VERSION_MINOR,
52 GDLM_PLOCK_VERSION_PATCH, 54 DLM_PLOCK_VERSION_PATCH,
53 info->version[0], 55 info->version[0],
54 info->version[1], 56 info->version[1],
55 info->version[2]); 57 info->version[2]);
@@ -68,25 +70,31 @@ static void send_op(struct plock_op *op)
68 wake_up(&send_wq); 70 wake_up(&send_wq);
69} 71}
70 72
71int gdlm_plock(void *lockspace, struct lm_lockname *name, 73int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
72 struct file *file, int cmd, struct file_lock *fl) 74 int cmd, struct file_lock *fl)
73{ 75{
74 struct gdlm_ls *ls = lockspace; 76 struct dlm_ls *ls;
75 struct plock_op *op; 77 struct plock_op *op;
76 struct plock_xop *xop; 78 struct plock_xop *xop;
77 int rv; 79 int rv;
78 80
81 ls = dlm_find_lockspace_local(lockspace);
82 if (!ls)
83 return -EINVAL;
84
79 xop = kzalloc(sizeof(*xop), GFP_KERNEL); 85 xop = kzalloc(sizeof(*xop), GFP_KERNEL);
80 if (!xop) 86 if (!xop) {
81 return -ENOMEM; 87 rv = -ENOMEM;
88 goto out;
89 }
82 90
83 op = &xop->xop; 91 op = &xop->xop;
84 op->info.optype = GDLM_PLOCK_OP_LOCK; 92 op->info.optype = DLM_PLOCK_OP_LOCK;
85 op->info.pid = fl->fl_pid; 93 op->info.pid = fl->fl_pid;
86 op->info.ex = (fl->fl_type == F_WRLCK); 94 op->info.ex = (fl->fl_type == F_WRLCK);
87 op->info.wait = IS_SETLKW(cmd); 95 op->info.wait = IS_SETLKW(cmd);
88 op->info.fsid = ls->id; 96 op->info.fsid = ls->ls_global_id;
89 op->info.number = name->ln_number; 97 op->info.number = number;
90 op->info.start = fl->fl_start; 98 op->info.start = fl->fl_start;
91 op->info.end = fl->fl_end; 99 op->info.end = fl->fl_end;
92 if (fl->fl_lmops && fl->fl_lmops->fl_grant) { 100 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
@@ -107,12 +115,15 @@ int gdlm_plock(void *lockspace, struct lm_lockname *name,
107 115
108 if (xop->callback == NULL) 116 if (xop->callback == NULL)
109 wait_event(recv_wq, (op->done != 0)); 117 wait_event(recv_wq, (op->done != 0));
110 else 118 else {
111 return -EINPROGRESS; 119 rv = -EINPROGRESS;
120 goto out;
121 }
112 122
113 spin_lock(&ops_lock); 123 spin_lock(&ops_lock);
114 if (!list_empty(&op->list)) { 124 if (!list_empty(&op->list)) {
115 printk(KERN_INFO "plock op on list\n"); 125 log_error(ls, "dlm_posix_lock: op on list %llx",
126 (unsigned long long)number);
116 list_del(&op->list); 127 list_del(&op->list);
117 } 128 }
118 spin_unlock(&ops_lock); 129 spin_unlock(&ops_lock);
@@ -121,17 +132,19 @@ int gdlm_plock(void *lockspace, struct lm_lockname *name,
121 132
122 if (!rv) { 133 if (!rv) {
123 if (posix_lock_file_wait(file, fl) < 0) 134 if (posix_lock_file_wait(file, fl) < 0)
124 log_error("gdlm_plock: vfs lock error %x,%llx", 135 log_error(ls, "dlm_posix_lock: vfs lock error %llx",
125 name->ln_type, 136 (unsigned long long)number);
126 (unsigned long long)name->ln_number);
127 } 137 }
128 138
129 kfree(xop); 139 kfree(xop);
140out:
141 dlm_put_lockspace(ls);
130 return rv; 142 return rv;
131} 143}
144EXPORT_SYMBOL_GPL(dlm_posix_lock);
132 145
133/* Returns failure iff a succesful lock operation should be canceled */ 146/* Returns failure iff a succesful lock operation should be canceled */
134static int gdlm_plock_callback(struct plock_op *op) 147static int dlm_plock_callback(struct plock_op *op)
135{ 148{
136 struct file *file; 149 struct file *file;
137 struct file_lock *fl; 150 struct file_lock *fl;
@@ -142,7 +155,8 @@ static int gdlm_plock_callback(struct plock_op *op)
142 155
143 spin_lock(&ops_lock); 156 spin_lock(&ops_lock);
144 if (!list_empty(&op->list)) { 157 if (!list_empty(&op->list)) {
145 printk(KERN_INFO "plock op on list\n"); 158 log_print("dlm_plock_callback: op on list %llx",
159 (unsigned long long)op->info.number);
146 list_del(&op->list); 160 list_del(&op->list);
147 } 161 }
148 spin_unlock(&ops_lock); 162 spin_unlock(&ops_lock);
@@ -165,19 +179,19 @@ static int gdlm_plock_callback(struct plock_op *op)
165 * This can only happen in the case of kmalloc() failure. 179 * This can only happen in the case of kmalloc() failure.
166 * The filesystem's own lock is the authoritative lock, 180 * The filesystem's own lock is the authoritative lock,
167 * so a failure to get the lock locally is not a disaster. 181 * so a failure to get the lock locally is not a disaster.
168 * As long as GFS cannot reliably cancel locks (especially 182 * As long as the fs cannot reliably cancel locks (especially
169 * in a low-memory situation), we're better off ignoring 183 * in a low-memory situation), we're better off ignoring
170 * this failure than trying to recover. 184 * this failure than trying to recover.
171 */ 185 */
172 log_error("gdlm_plock: vfs lock error file %p fl %p", 186 log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
173 file, fl); 187 (unsigned long long)op->info.number, file, fl);
174 } 188 }
175 189
176 rv = notify(flc, NULL, 0); 190 rv = notify(flc, NULL, 0);
177 if (rv) { 191 if (rv) {
178 /* XXX: We need to cancel the fs lock here: */ 192 /* XXX: We need to cancel the fs lock here: */
179 printk("gfs2 lock granted after lock request failed;" 193 log_print("dlm_plock_callback: lock granted after lock request "
180 " dangling lock!\n"); 194 "failed; dangling lock!\n");
181 goto out; 195 goto out;
182 } 196 }
183 197
@@ -186,25 +200,31 @@ out:
186 return rv; 200 return rv;
187} 201}
188 202
189int gdlm_punlock(void *lockspace, struct lm_lockname *name, 203int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
190 struct file *file, struct file_lock *fl) 204 struct file_lock *fl)
191{ 205{
192 struct gdlm_ls *ls = lockspace; 206 struct dlm_ls *ls;
193 struct plock_op *op; 207 struct plock_op *op;
194 int rv; 208 int rv;
195 209
210 ls = dlm_find_lockspace_local(lockspace);
211 if (!ls)
212 return -EINVAL;
213
196 op = kzalloc(sizeof(*op), GFP_KERNEL); 214 op = kzalloc(sizeof(*op), GFP_KERNEL);
197 if (!op) 215 if (!op) {
198 return -ENOMEM; 216 rv = -ENOMEM;
217 goto out;
218 }
199 219
200 if (posix_lock_file_wait(file, fl) < 0) 220 if (posix_lock_file_wait(file, fl) < 0)
201 log_error("gdlm_punlock: vfs unlock error %x,%llx", 221 log_error(ls, "dlm_posix_unlock: vfs unlock error %llx",
202 name->ln_type, (unsigned long long)name->ln_number); 222 (unsigned long long)number);
203 223
204 op->info.optype = GDLM_PLOCK_OP_UNLOCK; 224 op->info.optype = DLM_PLOCK_OP_UNLOCK;
205 op->info.pid = fl->fl_pid; 225 op->info.pid = fl->fl_pid;
206 op->info.fsid = ls->id; 226 op->info.fsid = ls->ls_global_id;
207 op->info.number = name->ln_number; 227 op->info.number = number;
208 op->info.start = fl->fl_start; 228 op->info.start = fl->fl_start;
209 op->info.end = fl->fl_end; 229 op->info.end = fl->fl_end;
210 if (fl->fl_lmops && fl->fl_lmops->fl_grant) 230 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
@@ -217,7 +237,8 @@ int gdlm_punlock(void *lockspace, struct lm_lockname *name,
217 237
218 spin_lock(&ops_lock); 238 spin_lock(&ops_lock);
219 if (!list_empty(&op->list)) { 239 if (!list_empty(&op->list)) {
220 printk(KERN_INFO "punlock op on list\n"); 240 log_error(ls, "dlm_posix_unlock: op on list %llx",
241 (unsigned long long)number);
221 list_del(&op->list); 242 list_del(&op->list);
222 } 243 }
223 spin_unlock(&ops_lock); 244 spin_unlock(&ops_lock);
@@ -228,25 +249,34 @@ int gdlm_punlock(void *lockspace, struct lm_lockname *name,
228 rv = 0; 249 rv = 0;
229 250
230 kfree(op); 251 kfree(op);
252out:
253 dlm_put_lockspace(ls);
231 return rv; 254 return rv;
232} 255}
256EXPORT_SYMBOL_GPL(dlm_posix_unlock);
233 257
234int gdlm_plock_get(void *lockspace, struct lm_lockname *name, 258int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
235 struct file *file, struct file_lock *fl) 259 struct file_lock *fl)
236{ 260{
237 struct gdlm_ls *ls = lockspace; 261 struct dlm_ls *ls;
238 struct plock_op *op; 262 struct plock_op *op;
239 int rv; 263 int rv;
240 264
265 ls = dlm_find_lockspace_local(lockspace);
266 if (!ls)
267 return -EINVAL;
268
241 op = kzalloc(sizeof(*op), GFP_KERNEL); 269 op = kzalloc(sizeof(*op), GFP_KERNEL);
242 if (!op) 270 if (!op) {
243 return -ENOMEM; 271 rv = -ENOMEM;
272 goto out;
273 }
244 274
245 op->info.optype = GDLM_PLOCK_OP_GET; 275 op->info.optype = DLM_PLOCK_OP_GET;
246 op->info.pid = fl->fl_pid; 276 op->info.pid = fl->fl_pid;
247 op->info.ex = (fl->fl_type == F_WRLCK); 277 op->info.ex = (fl->fl_type == F_WRLCK);
248 op->info.fsid = ls->id; 278 op->info.fsid = ls->ls_global_id;
249 op->info.number = name->ln_number; 279 op->info.number = number;
250 op->info.start = fl->fl_start; 280 op->info.start = fl->fl_start;
251 op->info.end = fl->fl_end; 281 op->info.end = fl->fl_end;
252 if (fl->fl_lmops && fl->fl_lmops->fl_grant) 282 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
@@ -259,7 +289,8 @@ int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
259 289
260 spin_lock(&ops_lock); 290 spin_lock(&ops_lock);
261 if (!list_empty(&op->list)) { 291 if (!list_empty(&op->list)) {
262 printk(KERN_INFO "plock_get op on list\n"); 292 log_error(ls, "dlm_posix_get: op on list %llx",
293 (unsigned long long)number);
263 list_del(&op->list); 294 list_del(&op->list);
264 } 295 }
265 spin_unlock(&ops_lock); 296 spin_unlock(&ops_lock);
@@ -281,14 +312,17 @@ int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
281 } 312 }
282 313
283 kfree(op); 314 kfree(op);
315out:
316 dlm_put_lockspace(ls);
284 return rv; 317 return rv;
285} 318}
319EXPORT_SYMBOL_GPL(dlm_posix_get);
286 320
287/* a read copies out one plock request from the send list */ 321/* a read copies out one plock request from the send list */
288static ssize_t dev_read(struct file *file, char __user *u, size_t count, 322static ssize_t dev_read(struct file *file, char __user *u, size_t count,
289 loff_t *ppos) 323 loff_t *ppos)
290{ 324{
291 struct gdlm_plock_info info; 325 struct dlm_plock_info info;
292 struct plock_op *op = NULL; 326 struct plock_op *op = NULL;
293 327
294 if (count < sizeof(info)) 328 if (count < sizeof(info))
@@ -315,7 +349,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
315static ssize_t dev_write(struct file *file, const char __user *u, size_t count, 349static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
316 loff_t *ppos) 350 loff_t *ppos)
317{ 351{
318 struct gdlm_plock_info info; 352 struct dlm_plock_info info;
319 struct plock_op *op; 353 struct plock_op *op;
320 int found = 0; 354 int found = 0;
321 355
@@ -345,12 +379,12 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
345 struct plock_xop *xop; 379 struct plock_xop *xop;
346 xop = (struct plock_xop *)op; 380 xop = (struct plock_xop *)op;
347 if (xop->callback) 381 if (xop->callback)
348 count = gdlm_plock_callback(op); 382 count = dlm_plock_callback(op);
349 else 383 else
350 wake_up(&recv_wq); 384 wake_up(&recv_wq);
351 } else 385 } else
352 printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid, 386 log_print("dev_write no op %x %llx", info.fsid,
353 (unsigned long long)info.number); 387 (unsigned long long)info.number);
354 return count; 388 return count;
355} 389}
356 390
@@ -377,11 +411,11 @@ static const struct file_operations dev_fops = {
377 411
378static struct miscdevice plock_dev_misc = { 412static struct miscdevice plock_dev_misc = {
379 .minor = MISC_DYNAMIC_MINOR, 413 .minor = MISC_DYNAMIC_MINOR,
380 .name = GDLM_PLOCK_MISC_NAME, 414 .name = DLM_PLOCK_MISC_NAME,
381 .fops = &dev_fops 415 .fops = &dev_fops
382}; 416};
383 417
384int gdlm_plock_init(void) 418int dlm_plock_init(void)
385{ 419{
386 int rv; 420 int rv;
387 421
@@ -393,14 +427,13 @@ int gdlm_plock_init(void)
393 427
394 rv = misc_register(&plock_dev_misc); 428 rv = misc_register(&plock_dev_misc);
395 if (rv) 429 if (rv)
396 printk(KERN_INFO "gdlm_plock_init: misc_register failed %d", 430 log_print("dlm_plock_init: misc_register failed %d", rv);
397 rv);
398 return rv; 431 return rv;
399} 432}
400 433
401void gdlm_plock_exit(void) 434void dlm_plock_exit(void)
402{ 435{
403 if (misc_deregister(&plock_dev_misc) < 0) 436 if (misc_deregister(&plock_dev_misc) < 0)
404 printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed"); 437 log_print("dlm_plock_exit: misc_deregister failed");
405} 438}
406 439
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 997f9531d594..fd677c8c3d3b 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -257,6 +257,7 @@ static void do_ls_recovery(struct dlm_ls *ls)
257 if (rv) { 257 if (rv) {
258 ls_recover(ls, rv); 258 ls_recover(ls, rv);
259 kfree(rv->nodeids); 259 kfree(rv->nodeids);
260 kfree(rv->new);
260 kfree(rv); 261 kfree(rv);
261 } 262 }
262} 263}
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
index 89b93b6b45cf..2609bb6cd013 100644
--- a/fs/gfs2/locking/dlm/Makefile
+++ b/fs/gfs2/locking/dlm/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o 1obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
2lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o 2lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o
3 3
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 58fcf8c5bf39..a243cf69c54e 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -25,6 +25,7 @@
25#include <net/sock.h> 25#include <net/sock.h>
26 26
27#include <linux/dlm.h> 27#include <linux/dlm.h>
28#include <linux/dlm_plock.h>
28#include <linux/lm_interface.h> 29#include <linux/lm_interface.h>
29 30
30/* 31/*
@@ -173,17 +174,6 @@ void gdlm_cancel(void *);
173int gdlm_hold_lvb(void *, char **); 174int gdlm_hold_lvb(void *, char **);
174void gdlm_unhold_lvb(void *, char *); 175void gdlm_unhold_lvb(void *, char *);
175 176
176/* plock.c */
177
178int gdlm_plock_init(void);
179void gdlm_plock_exit(void);
180int gdlm_plock(void *, struct lm_lockname *, struct file *, int,
181 struct file_lock *);
182int gdlm_plock_get(void *, struct lm_lockname *, struct file *,
183 struct file_lock *);
184int gdlm_punlock(void *, struct lm_lockname *, struct file *,
185 struct file_lock *);
186
187/* mount.c */ 177/* mount.c */
188 178
189extern const struct lm_lockops gdlm_ops; 179extern const struct lm_lockops gdlm_ops;
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index 36a225850bd8..b9a03a7ff801 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -28,13 +28,6 @@ static int __init init_lock_dlm(void)
28 return error; 28 return error;
29 } 29 }
30 30
31 error = gdlm_plock_init();
32 if (error) {
33 gdlm_sysfs_exit();
34 gfs2_unregister_lockproto(&gdlm_ops);
35 return error;
36 }
37
38 printk(KERN_INFO 31 printk(KERN_INFO
39 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__); 32 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
40 return 0; 33 return 0;
@@ -42,7 +35,6 @@ static int __init init_lock_dlm(void)
42 35
43static void __exit exit_lock_dlm(void) 36static void __exit exit_lock_dlm(void)
44{ 37{
45 gdlm_plock_exit();
46 gdlm_sysfs_exit(); 38 gdlm_sysfs_exit();
47 gfs2_unregister_lockproto(&gdlm_ops); 39 gfs2_unregister_lockproto(&gdlm_ops);
48} 40}
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index f2efff424224..470bdf650b50 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -236,6 +236,27 @@ static void gdlm_withdraw(void *lockspace)
236 gdlm_kobject_release(ls); 236 gdlm_kobject_release(ls);
237} 237}
238 238
239static int gdlm_plock(void *lockspace, struct lm_lockname *name,
240 struct file *file, int cmd, struct file_lock *fl)
241{
242 struct gdlm_ls *ls = lockspace;
243 return dlm_posix_lock(ls->dlm_lockspace, name->ln_number, file, cmd, fl);
244}
245
246static int gdlm_punlock(void *lockspace, struct lm_lockname *name,
247 struct file *file, struct file_lock *fl)
248{
249 struct gdlm_ls *ls = lockspace;
250 return dlm_posix_unlock(ls->dlm_lockspace, name->ln_number, file, fl);
251}
252
253static int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
254 struct file *file, struct file_lock *fl)
255{
256 struct gdlm_ls *ls = lockspace;
257 return dlm_posix_get(ls->dlm_lockspace, name->ln_number, file, fl);
258}
259
239const struct lm_lockops gdlm_ops = { 260const struct lm_lockops gdlm_ops = {
240 .lm_proto_name = "lock_dlm", 261 .lm_proto_name = "lock_dlm",
241 .lm_mount = gdlm_mount, 262 .lm_mount = gdlm_mount,
diff --git a/fs/internal.h b/fs/internal.h
index 392e8ccd6fc4..80aa9a023372 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -43,3 +43,14 @@ extern void __init chrdev_init(void);
43 * namespace.c 43 * namespace.c
44 */ 44 */
45extern int copy_mount_options(const void __user *, unsigned long *); 45extern int copy_mount_options(const void __user *, unsigned long *);
46
47extern void free_vfsmnt(struct vfsmount *);
48extern struct vfsmount *alloc_vfsmnt(const char *);
49extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
50extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
51 struct vfsmount *);
52extern void release_mounts(struct list_head *);
53extern void umount_tree(struct vfsmount *, int, struct list_head *);
54extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
55
56extern void __init mnt_init(void);
diff --git a/fs/namespace.c b/fs/namespace.c
index 678f7ce060f2..0505fb61aa74 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -27,6 +27,7 @@
27#include <linux/mount.h> 27#include <linux/mount.h>
28#include <linux/ramfs.h> 28#include <linux/ramfs.h>
29#include <linux/log2.h> 29#include <linux/log2.h>
30#include <linux/idr.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/unistd.h> 32#include <asm/unistd.h>
32#include "pnode.h" 33#include "pnode.h"
@@ -39,6 +40,8 @@
39__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 40__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
40 41
41static int event; 42static int event;
43static DEFINE_IDA(mnt_id_ida);
44static DEFINE_IDA(mnt_group_ida);
42 45
43static struct list_head *mount_hashtable __read_mostly; 46static struct list_head *mount_hashtable __read_mostly;
44static struct kmem_cache *mnt_cache __read_mostly; 47static struct kmem_cache *mnt_cache __read_mostly;
@@ -58,10 +61,63 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
58 61
59#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) 62#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
60 63
64/* allocation is serialized by namespace_sem */
65static int mnt_alloc_id(struct vfsmount *mnt)
66{
67 int res;
68
69retry:
70 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
71 spin_lock(&vfsmount_lock);
72 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
73 spin_unlock(&vfsmount_lock);
74 if (res == -EAGAIN)
75 goto retry;
76
77 return res;
78}
79
80static void mnt_free_id(struct vfsmount *mnt)
81{
82 spin_lock(&vfsmount_lock);
83 ida_remove(&mnt_id_ida, mnt->mnt_id);
84 spin_unlock(&vfsmount_lock);
85}
86
87/*
88 * Allocate a new peer group ID
89 *
90 * mnt_group_ida is protected by namespace_sem
91 */
92static int mnt_alloc_group_id(struct vfsmount *mnt)
93{
94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
95 return -ENOMEM;
96
97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
98}
99
100/*
101 * Release a peer group ID
102 */
103void mnt_release_group_id(struct vfsmount *mnt)
104{
105 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
106 mnt->mnt_group_id = 0;
107}
108
61struct vfsmount *alloc_vfsmnt(const char *name) 109struct vfsmount *alloc_vfsmnt(const char *name)
62{ 110{
63 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 111 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
64 if (mnt) { 112 if (mnt) {
113 int err;
114
115 err = mnt_alloc_id(mnt);
116 if (err) {
117 kmem_cache_free(mnt_cache, mnt);
118 return NULL;
119 }
120
65 atomic_set(&mnt->mnt_count, 1); 121 atomic_set(&mnt->mnt_count, 1);
66 INIT_LIST_HEAD(&mnt->mnt_hash); 122 INIT_LIST_HEAD(&mnt->mnt_hash);
67 INIT_LIST_HEAD(&mnt->mnt_child); 123 INIT_LIST_HEAD(&mnt->mnt_child);
@@ -353,6 +409,7 @@ EXPORT_SYMBOL(simple_set_mnt);
353void free_vfsmnt(struct vfsmount *mnt) 409void free_vfsmnt(struct vfsmount *mnt)
354{ 410{
355 kfree(mnt->mnt_devname); 411 kfree(mnt->mnt_devname);
412 mnt_free_id(mnt);
356 kmem_cache_free(mnt_cache, mnt); 413 kmem_cache_free(mnt_cache, mnt);
357} 414}
358 415
@@ -499,6 +556,17 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
499 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); 556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
500 557
501 if (mnt) { 558 if (mnt) {
559 if (flag & (CL_SLAVE | CL_PRIVATE))
560 mnt->mnt_group_id = 0; /* not a peer of original */
561 else
562 mnt->mnt_group_id = old->mnt_group_id;
563
564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
565 int err = mnt_alloc_group_id(mnt);
566 if (err)
567 goto out_free;
568 }
569
502 mnt->mnt_flags = old->mnt_flags; 570 mnt->mnt_flags = old->mnt_flags;
503 atomic_inc(&sb->s_active); 571 atomic_inc(&sb->s_active);
504 mnt->mnt_sb = sb; 572 mnt->mnt_sb = sb;
@@ -528,6 +596,10 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
528 } 596 }
529 } 597 }
530 return mnt; 598 return mnt;
599
600 out_free:
601 free_vfsmnt(mnt);
602 return NULL;
531} 603}
532 604
533static inline void __mntput(struct vfsmount *mnt) 605static inline void __mntput(struct vfsmount *mnt)
@@ -652,20 +724,21 @@ void save_mount_options(struct super_block *sb, char *options)
652} 724}
653EXPORT_SYMBOL(save_mount_options); 725EXPORT_SYMBOL(save_mount_options);
654 726
727#ifdef CONFIG_PROC_FS
655/* iterator */ 728/* iterator */
656static void *m_start(struct seq_file *m, loff_t *pos) 729static void *m_start(struct seq_file *m, loff_t *pos)
657{ 730{
658 struct mnt_namespace *n = m->private; 731 struct proc_mounts *p = m->private;
659 732
660 down_read(&namespace_sem); 733 down_read(&namespace_sem);
661 return seq_list_start(&n->list, *pos); 734 return seq_list_start(&p->ns->list, *pos);
662} 735}
663 736
664static void *m_next(struct seq_file *m, void *v, loff_t *pos) 737static void *m_next(struct seq_file *m, void *v, loff_t *pos)
665{ 738{
666 struct mnt_namespace *n = m->private; 739 struct proc_mounts *p = m->private;
667 740
668 return seq_list_next(v, &n->list, pos); 741 return seq_list_next(v, &p->ns->list, pos);
669} 742}
670 743
671static void m_stop(struct seq_file *m, void *v) 744static void m_stop(struct seq_file *m, void *v)
@@ -673,20 +746,30 @@ static void m_stop(struct seq_file *m, void *v)
673 up_read(&namespace_sem); 746 up_read(&namespace_sem);
674} 747}
675 748
676static int show_vfsmnt(struct seq_file *m, void *v) 749struct proc_fs_info {
750 int flag;
751 const char *str;
752};
753
754static void show_sb_opts(struct seq_file *m, struct super_block *sb)
677{ 755{
678 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 756 static const struct proc_fs_info fs_info[] = {
679 int err = 0;
680 static struct proc_fs_info {
681 int flag;
682 char *str;
683 } fs_info[] = {
684 { MS_SYNCHRONOUS, ",sync" }, 757 { MS_SYNCHRONOUS, ",sync" },
685 { MS_DIRSYNC, ",dirsync" }, 758 { MS_DIRSYNC, ",dirsync" },
686 { MS_MANDLOCK, ",mand" }, 759 { MS_MANDLOCK, ",mand" },
687 { 0, NULL } 760 { 0, NULL }
688 }; 761 };
689 static struct proc_fs_info mnt_info[] = { 762 const struct proc_fs_info *fs_infop;
763
764 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
765 if (sb->s_flags & fs_infop->flag)
766 seq_puts(m, fs_infop->str);
767 }
768}
769
770static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
771{
772 static const struct proc_fs_info mnt_info[] = {
690 { MNT_NOSUID, ",nosuid" }, 773 { MNT_NOSUID, ",nosuid" },
691 { MNT_NODEV, ",nodev" }, 774 { MNT_NODEV, ",nodev" },
692 { MNT_NOEXEC, ",noexec" }, 775 { MNT_NOEXEC, ",noexec" },
@@ -695,40 +778,108 @@ static int show_vfsmnt(struct seq_file *m, void *v)
695 { MNT_RELATIME, ",relatime" }, 778 { MNT_RELATIME, ",relatime" },
696 { 0, NULL } 779 { 0, NULL }
697 }; 780 };
698 struct proc_fs_info *fs_infop; 781 const struct proc_fs_info *fs_infop;
782
783 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
784 if (mnt->mnt_flags & fs_infop->flag)
785 seq_puts(m, fs_infop->str);
786 }
787}
788
789static void show_type(struct seq_file *m, struct super_block *sb)
790{
791 mangle(m, sb->s_type->name);
792 if (sb->s_subtype && sb->s_subtype[0]) {
793 seq_putc(m, '.');
794 mangle(m, sb->s_subtype);
795 }
796}
797
798static int show_vfsmnt(struct seq_file *m, void *v)
799{
800 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
801 int err = 0;
699 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 802 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
700 803
701 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 804 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
702 seq_putc(m, ' '); 805 seq_putc(m, ' ');
703 seq_path(m, &mnt_path, " \t\n\\"); 806 seq_path(m, &mnt_path, " \t\n\\");
704 seq_putc(m, ' '); 807 seq_putc(m, ' ');
705 mangle(m, mnt->mnt_sb->s_type->name); 808 show_type(m, mnt->mnt_sb);
706 if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
707 seq_putc(m, '.');
708 mangle(m, mnt->mnt_sb->s_subtype);
709 }
710 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); 809 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
711 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { 810 show_sb_opts(m, mnt->mnt_sb);
712 if (mnt->mnt_sb->s_flags & fs_infop->flag) 811 show_mnt_opts(m, mnt);
713 seq_puts(m, fs_infop->str);
714 }
715 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
716 if (mnt->mnt_flags & fs_infop->flag)
717 seq_puts(m, fs_infop->str);
718 }
719 if (mnt->mnt_sb->s_op->show_options) 812 if (mnt->mnt_sb->s_op->show_options)
720 err = mnt->mnt_sb->s_op->show_options(m, mnt); 813 err = mnt->mnt_sb->s_op->show_options(m, mnt);
721 seq_puts(m, " 0 0\n"); 814 seq_puts(m, " 0 0\n");
722 return err; 815 return err;
723} 816}
724 817
725struct seq_operations mounts_op = { 818const struct seq_operations mounts_op = {
726 .start = m_start, 819 .start = m_start,
727 .next = m_next, 820 .next = m_next,
728 .stop = m_stop, 821 .stop = m_stop,
729 .show = show_vfsmnt 822 .show = show_vfsmnt
730}; 823};
731 824
825static int show_mountinfo(struct seq_file *m, void *v)
826{
827 struct proc_mounts *p = m->private;
828 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
829 struct super_block *sb = mnt->mnt_sb;
830 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
831 struct path root = p->root;
832 int err = 0;
833
834 seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
835 MAJOR(sb->s_dev), MINOR(sb->s_dev));
836 seq_dentry(m, mnt->mnt_root, " \t\n\\");
837 seq_putc(m, ' ');
838 seq_path_root(m, &mnt_path, &root, " \t\n\\");
839 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
840 /*
841 * Mountpoint is outside root, discard that one. Ugly,
842 * but less so than trying to do that in iterator in a
843 * race-free way (due to renames).
844 */
845 return SEQ_SKIP;
846 }
847 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
848 show_mnt_opts(m, mnt);
849
850 /* Tagged fields ("foo:X" or "bar") */
851 if (IS_MNT_SHARED(mnt))
852 seq_printf(m, " shared:%i", mnt->mnt_group_id);
853 if (IS_MNT_SLAVE(mnt)) {
854 int master = mnt->mnt_master->mnt_group_id;
855 int dom = get_dominating_id(mnt, &p->root);
856 seq_printf(m, " master:%i", master);
857 if (dom && dom != master)
858 seq_printf(m, " propagate_from:%i", dom);
859 }
860 if (IS_MNT_UNBINDABLE(mnt))
861 seq_puts(m, " unbindable");
862
863 /* Filesystem specific data */
864 seq_puts(m, " - ");
865 show_type(m, sb);
866 seq_putc(m, ' ');
867 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
868 seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
869 show_sb_opts(m, sb);
870 if (sb->s_op->show_options)
871 err = sb->s_op->show_options(m, mnt);
872 seq_putc(m, '\n');
873 return err;
874}
875
876const struct seq_operations mountinfo_op = {
877 .start = m_start,
878 .next = m_next,
879 .stop = m_stop,
880 .show = show_mountinfo,
881};
882
732static int show_vfsstat(struct seq_file *m, void *v) 883static int show_vfsstat(struct seq_file *m, void *v)
733{ 884{
734 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 885 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
@@ -749,7 +900,7 @@ static int show_vfsstat(struct seq_file *m, void *v)
749 900
750 /* file system type */ 901 /* file system type */
751 seq_puts(m, "with fstype "); 902 seq_puts(m, "with fstype ");
752 mangle(m, mnt->mnt_sb->s_type->name); 903 show_type(m, mnt->mnt_sb);
753 904
754 /* optional statistics */ 905 /* optional statistics */
755 if (mnt->mnt_sb->s_op->show_stats) { 906 if (mnt->mnt_sb->s_op->show_stats) {
@@ -761,12 +912,13 @@ static int show_vfsstat(struct seq_file *m, void *v)
761 return err; 912 return err;
762} 913}
763 914
764struct seq_operations mountstats_op = { 915const struct seq_operations mountstats_op = {
765 .start = m_start, 916 .start = m_start,
766 .next = m_next, 917 .next = m_next,
767 .stop = m_stop, 918 .stop = m_stop,
768 .show = show_vfsstat, 919 .show = show_vfsstat,
769}; 920};
921#endif /* CONFIG_PROC_FS */
770 922
771/** 923/**
772 * may_umount_tree - check if a mount tree is busy 924 * may_umount_tree - check if a mount tree is busy
@@ -1091,23 +1243,50 @@ Enomem:
1091struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry) 1243struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
1092{ 1244{
1093 struct vfsmount *tree; 1245 struct vfsmount *tree;
1094 down_read(&namespace_sem); 1246 down_write(&namespace_sem);
1095 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE); 1247 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
1096 up_read(&namespace_sem); 1248 up_write(&namespace_sem);
1097 return tree; 1249 return tree;
1098} 1250}
1099 1251
1100void drop_collected_mounts(struct vfsmount *mnt) 1252void drop_collected_mounts(struct vfsmount *mnt)
1101{ 1253{
1102 LIST_HEAD(umount_list); 1254 LIST_HEAD(umount_list);
1103 down_read(&namespace_sem); 1255 down_write(&namespace_sem);
1104 spin_lock(&vfsmount_lock); 1256 spin_lock(&vfsmount_lock);
1105 umount_tree(mnt, 0, &umount_list); 1257 umount_tree(mnt, 0, &umount_list);
1106 spin_unlock(&vfsmount_lock); 1258 spin_unlock(&vfsmount_lock);
1107 up_read(&namespace_sem); 1259 up_write(&namespace_sem);
1108 release_mounts(&umount_list); 1260 release_mounts(&umount_list);
1109} 1261}
1110 1262
1263static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1264{
1265 struct vfsmount *p;
1266
1267 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1268 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1269 mnt_release_group_id(p);
1270 }
1271}
1272
1273static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1274{
1275 struct vfsmount *p;
1276
1277 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1278 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1279 int err = mnt_alloc_group_id(p);
1280 if (err) {
1281 cleanup_group_ids(mnt, p);
1282 return err;
1283 }
1284 }
1285 }
1286
1287 return 0;
1288}
1289
1111/* 1290/*
1112 * @source_mnt : mount tree to be attached 1291 * @source_mnt : mount tree to be attached
1113 * @nd : place the mount tree @source_mnt is attached 1292 * @nd : place the mount tree @source_mnt is attached
@@ -1178,9 +1357,16 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
1178 struct vfsmount *dest_mnt = path->mnt; 1357 struct vfsmount *dest_mnt = path->mnt;
1179 struct dentry *dest_dentry = path->dentry; 1358 struct dentry *dest_dentry = path->dentry;
1180 struct vfsmount *child, *p; 1359 struct vfsmount *child, *p;
1360 int err;
1181 1361
1182 if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) 1362 if (IS_MNT_SHARED(dest_mnt)) {
1183 return -EINVAL; 1363 err = invent_group_ids(source_mnt, true);
1364 if (err)
1365 goto out;
1366 }
1367 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1368 if (err)
1369 goto out_cleanup_ids;
1184 1370
1185 if (IS_MNT_SHARED(dest_mnt)) { 1371 if (IS_MNT_SHARED(dest_mnt)) {
1186 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1372 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1203,34 +1389,40 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
1203 } 1389 }
1204 spin_unlock(&vfsmount_lock); 1390 spin_unlock(&vfsmount_lock);
1205 return 0; 1391 return 0;
1392
1393 out_cleanup_ids:
1394 if (IS_MNT_SHARED(dest_mnt))
1395 cleanup_group_ids(source_mnt, NULL);
1396 out:
1397 return err;
1206} 1398}
1207 1399
1208static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) 1400static int graft_tree(struct vfsmount *mnt, struct path *path)
1209{ 1401{
1210 int err; 1402 int err;
1211 if (mnt->mnt_sb->s_flags & MS_NOUSER) 1403 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1212 return -EINVAL; 1404 return -EINVAL;
1213 1405
1214 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) != 1406 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1215 S_ISDIR(mnt->mnt_root->d_inode->i_mode)) 1407 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1216 return -ENOTDIR; 1408 return -ENOTDIR;
1217 1409
1218 err = -ENOENT; 1410 err = -ENOENT;
1219 mutex_lock(&nd->path.dentry->d_inode->i_mutex); 1411 mutex_lock(&path->dentry->d_inode->i_mutex);
1220 if (IS_DEADDIR(nd->path.dentry->d_inode)) 1412 if (IS_DEADDIR(path->dentry->d_inode))
1221 goto out_unlock; 1413 goto out_unlock;
1222 1414
1223 err = security_sb_check_sb(mnt, nd); 1415 err = security_sb_check_sb(mnt, path);
1224 if (err) 1416 if (err)
1225 goto out_unlock; 1417 goto out_unlock;
1226 1418
1227 err = -ENOENT; 1419 err = -ENOENT;
1228 if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry)) 1420 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1229 err = attach_recursive_mnt(mnt, &nd->path, NULL); 1421 err = attach_recursive_mnt(mnt, path, NULL);
1230out_unlock: 1422out_unlock:
1231 mutex_unlock(&nd->path.dentry->d_inode->i_mutex); 1423 mutex_unlock(&path->dentry->d_inode->i_mutex);
1232 if (!err) 1424 if (!err)
1233 security_sb_post_addmount(mnt, nd); 1425 security_sb_post_addmount(mnt, path);
1234 return err; 1426 return err;
1235} 1427}
1236 1428
@@ -1243,6 +1435,7 @@ static noinline int do_change_type(struct nameidata *nd, int flag)
1243 struct vfsmount *m, *mnt = nd->path.mnt; 1435 struct vfsmount *m, *mnt = nd->path.mnt;
1244 int recurse = flag & MS_REC; 1436 int recurse = flag & MS_REC;
1245 int type = flag & ~MS_REC; 1437 int type = flag & ~MS_REC;
1438 int err = 0;
1246 1439
1247 if (!capable(CAP_SYS_ADMIN)) 1440 if (!capable(CAP_SYS_ADMIN))
1248 return -EPERM; 1441 return -EPERM;
@@ -1251,12 +1444,20 @@ static noinline int do_change_type(struct nameidata *nd, int flag)
1251 return -EINVAL; 1444 return -EINVAL;
1252 1445
1253 down_write(&namespace_sem); 1446 down_write(&namespace_sem);
1447 if (type == MS_SHARED) {
1448 err = invent_group_ids(mnt, recurse);
1449 if (err)
1450 goto out_unlock;
1451 }
1452
1254 spin_lock(&vfsmount_lock); 1453 spin_lock(&vfsmount_lock);
1255 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1454 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1256 change_mnt_propagation(m, type); 1455 change_mnt_propagation(m, type);
1257 spin_unlock(&vfsmount_lock); 1456 spin_unlock(&vfsmount_lock);
1457
1458 out_unlock:
1258 up_write(&namespace_sem); 1459 up_write(&namespace_sem);
1259 return 0; 1460 return err;
1260} 1461}
1261 1462
1262/* 1463/*
@@ -1294,7 +1495,7 @@ static noinline int do_loopback(struct nameidata *nd, char *old_name,
1294 if (!mnt) 1495 if (!mnt)
1295 goto out; 1496 goto out;
1296 1497
1297 err = graft_tree(mnt, nd); 1498 err = graft_tree(mnt, &nd->path);
1298 if (err) { 1499 if (err) {
1299 LIST_HEAD(umount_list); 1500 LIST_HEAD(umount_list);
1300 spin_lock(&vfsmount_lock); 1501 spin_lock(&vfsmount_lock);
@@ -1501,7 +1702,7 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
1501 goto unlock; 1702 goto unlock;
1502 1703
1503 newmnt->mnt_flags = mnt_flags; 1704 newmnt->mnt_flags = mnt_flags;
1504 if ((err = graft_tree(newmnt, nd))) 1705 if ((err = graft_tree(newmnt, &nd->path)))
1505 goto unlock; 1706 goto unlock;
1506 1707
1507 if (fslist) /* add to the specified expiration list */ 1708 if (fslist) /* add to the specified expiration list */
@@ -1746,7 +1947,8 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1746 if (retval) 1947 if (retval)
1747 return retval; 1948 return retval;
1748 1949
1749 retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page); 1950 retval = security_sb_mount(dev_name, &nd.path,
1951 type_page, flags, data_page);
1750 if (retval) 1952 if (retval)
1751 goto dput_out; 1953 goto dput_out;
1752 1954
@@ -1986,15 +2188,13 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
1986 const char __user * put_old) 2188 const char __user * put_old)
1987{ 2189{
1988 struct vfsmount *tmp; 2190 struct vfsmount *tmp;
1989 struct nameidata new_nd, old_nd, user_nd; 2191 struct nameidata new_nd, old_nd;
1990 struct path parent_path, root_parent; 2192 struct path parent_path, root_parent, root;
1991 int error; 2193 int error;
1992 2194
1993 if (!capable(CAP_SYS_ADMIN)) 2195 if (!capable(CAP_SYS_ADMIN))
1994 return -EPERM; 2196 return -EPERM;
1995 2197
1996 lock_kernel();
1997
1998 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, 2198 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
1999 &new_nd); 2199 &new_nd);
2000 if (error) 2200 if (error)
@@ -2007,14 +2207,14 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
2007 if (error) 2207 if (error)
2008 goto out1; 2208 goto out1;
2009 2209
2010 error = security_sb_pivotroot(&old_nd, &new_nd); 2210 error = security_sb_pivotroot(&old_nd.path, &new_nd.path);
2011 if (error) { 2211 if (error) {
2012 path_put(&old_nd.path); 2212 path_put(&old_nd.path);
2013 goto out1; 2213 goto out1;
2014 } 2214 }
2015 2215
2016 read_lock(&current->fs->lock); 2216 read_lock(&current->fs->lock);
2017 user_nd.path = current->fs->root; 2217 root = current->fs->root;
2018 path_get(&current->fs->root); 2218 path_get(&current->fs->root);
2019 read_unlock(&current->fs->lock); 2219 read_unlock(&current->fs->lock);
2020 down_write(&namespace_sem); 2220 down_write(&namespace_sem);
@@ -2022,9 +2222,9 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
2022 error = -EINVAL; 2222 error = -EINVAL;
2023 if (IS_MNT_SHARED(old_nd.path.mnt) || 2223 if (IS_MNT_SHARED(old_nd.path.mnt) ||
2024 IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) || 2224 IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
2025 IS_MNT_SHARED(user_nd.path.mnt->mnt_parent)) 2225 IS_MNT_SHARED(root.mnt->mnt_parent))
2026 goto out2; 2226 goto out2;
2027 if (!check_mnt(user_nd.path.mnt)) 2227 if (!check_mnt(root.mnt))
2028 goto out2; 2228 goto out2;
2029 error = -ENOENT; 2229 error = -ENOENT;
2030 if (IS_DEADDIR(new_nd.path.dentry->d_inode)) 2230 if (IS_DEADDIR(new_nd.path.dentry->d_inode))
@@ -2034,13 +2234,13 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
2034 if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry)) 2234 if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
2035 goto out2; 2235 goto out2;
2036 error = -EBUSY; 2236 error = -EBUSY;
2037 if (new_nd.path.mnt == user_nd.path.mnt || 2237 if (new_nd.path.mnt == root.mnt ||
2038 old_nd.path.mnt == user_nd.path.mnt) 2238 old_nd.path.mnt == root.mnt)
2039 goto out2; /* loop, on the same file system */ 2239 goto out2; /* loop, on the same file system */
2040 error = -EINVAL; 2240 error = -EINVAL;
2041 if (user_nd.path.mnt->mnt_root != user_nd.path.dentry) 2241 if (root.mnt->mnt_root != root.dentry)
2042 goto out2; /* not a mountpoint */ 2242 goto out2; /* not a mountpoint */
2043 if (user_nd.path.mnt->mnt_parent == user_nd.path.mnt) 2243 if (root.mnt->mnt_parent == root.mnt)
2044 goto out2; /* not attached */ 2244 goto out2; /* not attached */
2045 if (new_nd.path.mnt->mnt_root != new_nd.path.dentry) 2245 if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
2046 goto out2; /* not a mountpoint */ 2246 goto out2; /* not a mountpoint */
@@ -2062,27 +2262,26 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
2062 } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry)) 2262 } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
2063 goto out3; 2263 goto out3;
2064 detach_mnt(new_nd.path.mnt, &parent_path); 2264 detach_mnt(new_nd.path.mnt, &parent_path);
2065 detach_mnt(user_nd.path.mnt, &root_parent); 2265 detach_mnt(root.mnt, &root_parent);
2066 /* mount old root on put_old */ 2266 /* mount old root on put_old */
2067 attach_mnt(user_nd.path.mnt, &old_nd.path); 2267 attach_mnt(root.mnt, &old_nd.path);
2068 /* mount new_root on / */ 2268 /* mount new_root on / */
2069 attach_mnt(new_nd.path.mnt, &root_parent); 2269 attach_mnt(new_nd.path.mnt, &root_parent);
2070 touch_mnt_namespace(current->nsproxy->mnt_ns); 2270 touch_mnt_namespace(current->nsproxy->mnt_ns);
2071 spin_unlock(&vfsmount_lock); 2271 spin_unlock(&vfsmount_lock);
2072 chroot_fs_refs(&user_nd.path, &new_nd.path); 2272 chroot_fs_refs(&root, &new_nd.path);
2073 security_sb_post_pivotroot(&user_nd, &new_nd); 2273 security_sb_post_pivotroot(&root, &new_nd.path);
2074 error = 0; 2274 error = 0;
2075 path_put(&root_parent); 2275 path_put(&root_parent);
2076 path_put(&parent_path); 2276 path_put(&parent_path);
2077out2: 2277out2:
2078 mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex); 2278 mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
2079 up_write(&namespace_sem); 2279 up_write(&namespace_sem);
2080 path_put(&user_nd.path); 2280 path_put(&root);
2081 path_put(&old_nd.path); 2281 path_put(&old_nd.path);
2082out1: 2282out1:
2083 path_put(&new_nd.path); 2283 path_put(&new_nd.path);
2084out0: 2284out0:
2085 unlock_kernel();
2086 return error; 2285 return error;
2087out3: 2286out3:
2088 spin_unlock(&vfsmount_lock); 2287 spin_unlock(&vfsmount_lock);
diff --git a/fs/pipe.c b/fs/pipe.c
index 8be381bbcb54..f73492b6817e 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -988,7 +988,10 @@ struct file *create_write_pipe(void)
988 return f; 988 return f;
989 989
990 err_dentry: 990 err_dentry:
991 free_pipe_info(inode);
991 dput(dentry); 992 dput(dentry);
993 return ERR_PTR(err);
994
992 err_inode: 995 err_inode:
993 free_pipe_info(inode); 996 free_pipe_info(inode);
994 iput(inode); 997 iput(inode);
diff --git a/fs/pnode.c b/fs/pnode.c
index 1d8f5447f3f7..8d5f392ec3d3 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -9,6 +9,7 @@
9#include <linux/mnt_namespace.h> 9#include <linux/mnt_namespace.h>
10#include <linux/mount.h> 10#include <linux/mount.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include "internal.h"
12#include "pnode.h" 13#include "pnode.h"
13 14
14/* return the next shared peer mount of @p */ 15/* return the next shared peer mount of @p */
@@ -27,6 +28,57 @@ static inline struct vfsmount *next_slave(struct vfsmount *p)
27 return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave); 28 return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave);
28} 29}
29 30
31/*
32 * Return true if path is reachable from root
33 *
34 * namespace_sem is held, and mnt is attached
35 */
36static bool is_path_reachable(struct vfsmount *mnt, struct dentry *dentry,
37 const struct path *root)
38{
39 while (mnt != root->mnt && mnt->mnt_parent != mnt) {
40 dentry = mnt->mnt_mountpoint;
41 mnt = mnt->mnt_parent;
42 }
43 return mnt == root->mnt && is_subdir(dentry, root->dentry);
44}
45
46static struct vfsmount *get_peer_under_root(struct vfsmount *mnt,
47 struct mnt_namespace *ns,
48 const struct path *root)
49{
50 struct vfsmount *m = mnt;
51
52 do {
53 /* Check the namespace first for optimization */
54 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt_root, root))
55 return m;
56
57 m = next_peer(m);
58 } while (m != mnt);
59
60 return NULL;
61}
62
63/*
64 * Get ID of closest dominating peer group having a representative
65 * under the given root.
66 *
67 * Caller must hold namespace_sem
68 */
69int get_dominating_id(struct vfsmount *mnt, const struct path *root)
70{
71 struct vfsmount *m;
72
73 for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
74 struct vfsmount *d = get_peer_under_root(m, mnt->mnt_ns, root);
75 if (d)
76 return d->mnt_group_id;
77 }
78
79 return 0;
80}
81
30static int do_make_slave(struct vfsmount *mnt) 82static int do_make_slave(struct vfsmount *mnt)
31{ 83{
32 struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master; 84 struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master;
@@ -45,7 +97,11 @@ static int do_make_slave(struct vfsmount *mnt)
45 if (peer_mnt == mnt) 97 if (peer_mnt == mnt)
46 peer_mnt = NULL; 98 peer_mnt = NULL;
47 } 99 }
100 if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
101 mnt_release_group_id(mnt);
102
48 list_del_init(&mnt->mnt_share); 103 list_del_init(&mnt->mnt_share);
104 mnt->mnt_group_id = 0;
49 105
50 if (peer_mnt) 106 if (peer_mnt)
51 master = peer_mnt; 107 master = peer_mnt;
@@ -67,7 +123,6 @@ static int do_make_slave(struct vfsmount *mnt)
67 } 123 }
68 mnt->mnt_master = master; 124 mnt->mnt_master = master;
69 CLEAR_MNT_SHARED(mnt); 125 CLEAR_MNT_SHARED(mnt);
70 INIT_LIST_HEAD(&mnt->mnt_slave_list);
71 return 0; 126 return 0;
72} 127}
73 128
@@ -211,8 +266,7 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
211out: 266out:
212 spin_lock(&vfsmount_lock); 267 spin_lock(&vfsmount_lock);
213 while (!list_empty(&tmp_list)) { 268 while (!list_empty(&tmp_list)) {
214 child = list_entry(tmp_list.next, struct vfsmount, mnt_hash); 269 child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
215 list_del_init(&child->mnt_hash);
216 umount_tree(child, 0, &umount_list); 270 umount_tree(child, 0, &umount_list);
217 } 271 }
218 spin_unlock(&vfsmount_lock); 272 spin_unlock(&vfsmount_lock);
diff --git a/fs/pnode.h b/fs/pnode.h
index f249be2fee7a..958665d662af 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -35,4 +35,6 @@ int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *,
35 struct list_head *); 35 struct list_head *);
36int propagate_umount(struct list_head *); 36int propagate_umount(struct list_head *);
37int propagate_mount_busy(struct vfsmount *, int); 37int propagate_mount_busy(struct vfsmount *, int);
38void mnt_release_group_id(struct vfsmount *);
39int get_dominating_id(struct vfsmount *mnt, const struct path *root);
38#endif /* _LINUX_PNODE_H */ 40#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 81d7d145292a..c5e412a00b17 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -502,17 +502,14 @@ static const struct inode_operations proc_def_inode_operations = {
502 .setattr = proc_setattr, 502 .setattr = proc_setattr,
503}; 503};
504 504
505extern const struct seq_operations mounts_op; 505static int mounts_open_common(struct inode *inode, struct file *file,
506struct proc_mounts { 506 const struct seq_operations *op)
507 struct seq_file m;
508 int event;
509};
510
511static int mounts_open(struct inode *inode, struct file *file)
512{ 507{
513 struct task_struct *task = get_proc_task(inode); 508 struct task_struct *task = get_proc_task(inode);
514 struct nsproxy *nsp; 509 struct nsproxy *nsp;
515 struct mnt_namespace *ns = NULL; 510 struct mnt_namespace *ns = NULL;
511 struct fs_struct *fs = NULL;
512 struct path root;
516 struct proc_mounts *p; 513 struct proc_mounts *p;
517 int ret = -EINVAL; 514 int ret = -EINVAL;
518 515
@@ -525,40 +522,61 @@ static int mounts_open(struct inode *inode, struct file *file)
525 get_mnt_ns(ns); 522 get_mnt_ns(ns);
526 } 523 }
527 rcu_read_unlock(); 524 rcu_read_unlock();
528 525 if (ns)
526 fs = get_fs_struct(task);
529 put_task_struct(task); 527 put_task_struct(task);
530 } 528 }
531 529
532 if (ns) { 530 if (!ns)
533 ret = -ENOMEM; 531 goto err;
534 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); 532 if (!fs)
535 if (p) { 533 goto err_put_ns;
536 file->private_data = &p->m; 534
537 ret = seq_open(file, &mounts_op); 535 read_lock(&fs->lock);
538 if (!ret) { 536 root = fs->root;
539 p->m.private = ns; 537 path_get(&root);
540 p->event = ns->event; 538 read_unlock(&fs->lock);
541 return 0; 539 put_fs_struct(fs);
542 } 540
543 kfree(p); 541 ret = -ENOMEM;
544 } 542 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
545 put_mnt_ns(ns); 543 if (!p)
546 } 544 goto err_put_path;
545
546 file->private_data = &p->m;
547 ret = seq_open(file, op);
548 if (ret)
549 goto err_free;
550
551 p->m.private = p;
552 p->ns = ns;
553 p->root = root;
554 p->event = ns->event;
555
556 return 0;
557
558 err_free:
559 kfree(p);
560 err_put_path:
561 path_put(&root);
562 err_put_ns:
563 put_mnt_ns(ns);
564 err:
547 return ret; 565 return ret;
548} 566}
549 567
550static int mounts_release(struct inode *inode, struct file *file) 568static int mounts_release(struct inode *inode, struct file *file)
551{ 569{
552 struct seq_file *m = file->private_data; 570 struct proc_mounts *p = file->private_data;
553 struct mnt_namespace *ns = m->private; 571 path_put(&p->root);
554 put_mnt_ns(ns); 572 put_mnt_ns(p->ns);
555 return seq_release(inode, file); 573 return seq_release(inode, file);
556} 574}
557 575
558static unsigned mounts_poll(struct file *file, poll_table *wait) 576static unsigned mounts_poll(struct file *file, poll_table *wait)
559{ 577{
560 struct proc_mounts *p = file->private_data; 578 struct proc_mounts *p = file->private_data;
561 struct mnt_namespace *ns = p->m.private; 579 struct mnt_namespace *ns = p->ns;
562 unsigned res = 0; 580 unsigned res = 0;
563 581
564 poll_wait(file, &ns->poll, wait); 582 poll_wait(file, &ns->poll, wait);
@@ -573,6 +591,11 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
573 return res; 591 return res;
574} 592}
575 593
594static int mounts_open(struct inode *inode, struct file *file)
595{
596 return mounts_open_common(inode, file, &mounts_op);
597}
598
576static const struct file_operations proc_mounts_operations = { 599static const struct file_operations proc_mounts_operations = {
577 .open = mounts_open, 600 .open = mounts_open,
578 .read = seq_read, 601 .read = seq_read,
@@ -581,38 +604,22 @@ static const struct file_operations proc_mounts_operations = {
581 .poll = mounts_poll, 604 .poll = mounts_poll,
582}; 605};
583 606
584extern const struct seq_operations mountstats_op; 607static int mountinfo_open(struct inode *inode, struct file *file)
585static int mountstats_open(struct inode *inode, struct file *file)
586{ 608{
587 int ret = seq_open(file, &mountstats_op); 609 return mounts_open_common(inode, file, &mountinfo_op);
588 610}
589 if (!ret) {
590 struct seq_file *m = file->private_data;
591 struct nsproxy *nsp;
592 struct mnt_namespace *mnt_ns = NULL;
593 struct task_struct *task = get_proc_task(inode);
594
595 if (task) {
596 rcu_read_lock();
597 nsp = task_nsproxy(task);
598 if (nsp) {
599 mnt_ns = nsp->mnt_ns;
600 if (mnt_ns)
601 get_mnt_ns(mnt_ns);
602 }
603 rcu_read_unlock();
604 611
605 put_task_struct(task); 612static const struct file_operations proc_mountinfo_operations = {
606 } 613 .open = mountinfo_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = mounts_release,
617 .poll = mounts_poll,
618};
607 619
608 if (mnt_ns) 620static int mountstats_open(struct inode *inode, struct file *file)
609 m->private = mnt_ns; 621{
610 else { 622 return mounts_open_common(inode, file, &mountstats_op);
611 seq_release(inode, file);
612 ret = -EINVAL;
613 }
614 }
615 return ret;
616} 623}
617 624
618static const struct file_operations proc_mountstats_operations = { 625static const struct file_operations proc_mountstats_operations = {
@@ -1626,7 +1633,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,
1626 unsigned int fd, ino; 1633 unsigned int fd, ino;
1627 int retval; 1634 int retval;
1628 struct files_struct * files; 1635 struct files_struct * files;
1629 struct fdtable *fdt;
1630 1636
1631 retval = -ENOENT; 1637 retval = -ENOENT;
1632 if (!p) 1638 if (!p)
@@ -1649,9 +1655,8 @@ static int proc_readfd_common(struct file * filp, void * dirent,
1649 if (!files) 1655 if (!files)
1650 goto out; 1656 goto out;
1651 rcu_read_lock(); 1657 rcu_read_lock();
1652 fdt = files_fdtable(files);
1653 for (fd = filp->f_pos-2; 1658 for (fd = filp->f_pos-2;
1654 fd < fdt->max_fds; 1659 fd < files_fdtable(files)->max_fds;
1655 fd++, filp->f_pos++) { 1660 fd++, filp->f_pos++) {
1656 char name[PROC_NUMBUF]; 1661 char name[PROC_NUMBUF];
1657 int len; 1662 int len;
@@ -2311,6 +2316,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2311 LNK("root", root), 2316 LNK("root", root),
2312 LNK("exe", exe), 2317 LNK("exe", exe),
2313 REG("mounts", S_IRUGO, mounts), 2318 REG("mounts", S_IRUGO, mounts),
2319 REG("mountinfo", S_IRUGO, mountinfo),
2314 REG("mountstats", S_IRUSR, mountstats), 2320 REG("mountstats", S_IRUSR, mountstats),
2315#ifdef CONFIG_PROC_PAGE_MONITOR 2321#ifdef CONFIG_PROC_PAGE_MONITOR
2316 REG("clear_refs", S_IWUSR, clear_refs), 2322 REG("clear_refs", S_IWUSR, clear_refs),
@@ -2643,6 +2649,7 @@ static const struct pid_entry tid_base_stuff[] = {
2643 LNK("root", root), 2649 LNK("root", root),
2644 LNK("exe", exe), 2650 LNK("exe", exe),
2645 REG("mounts", S_IRUGO, mounts), 2651 REG("mounts", S_IRUGO, mounts),
2652 REG("mountinfo", S_IRUGO, mountinfo),
2646#ifdef CONFIG_PROC_PAGE_MONITOR 2653#ifdef CONFIG_PROC_PAGE_MONITOR
2647 REG("clear_refs", S_IWUSR, clear_refs), 2654 REG("clear_refs", S_IWUSR, clear_refs),
2648 REG("smaps", S_IRUGO, smaps), 2655 REG("smaps", S_IRUGO, smaps),
diff --git a/fs/read_write.c b/fs/read_write.c
index 49a98718ecdf..f0d1240a5c69 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(generic_ro_fops);
33 33
34loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) 34loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
35{ 35{
36 long long retval; 36 loff_t retval;
37 struct inode *inode = file->f_mapping->host; 37 struct inode *inode = file->f_mapping->host;
38 38
39 mutex_lock(&inode->i_mutex); 39 mutex_lock(&inode->i_mutex);
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(generic_file_llseek);
60 60
61loff_t remote_llseek(struct file *file, loff_t offset, int origin) 61loff_t remote_llseek(struct file *file, loff_t offset, int origin)
62{ 62{
63 long long retval; 63 loff_t retval;
64 64
65 lock_kernel(); 65 lock_kernel();
66 switch (origin) { 66 switch (origin) {
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(no_llseek);
91 91
92loff_t default_llseek(struct file *file, loff_t offset, int origin) 92loff_t default_llseek(struct file *file, loff_t offset, int origin)
93{ 93{
94 long long retval; 94 loff_t retval;
95 95
96 lock_kernel(); 96 lock_kernel();
97 switch (origin) { 97 switch (origin) {
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 853770274f20..3f54dbd6c49b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -25,6 +25,7 @@
25 * into the buffer. In case of error ->start() and ->next() return 25 * into the buffer. In case of error ->start() and ->next() return
26 * ERR_PTR(error). In the end of sequence they return %NULL. ->show() 26 * ERR_PTR(error). In the end of sequence they return %NULL. ->show()
27 * returns 0 in case of success and negative number in case of error. 27 * returns 0 in case of success and negative number in case of error.
28 * Returning SEQ_SKIP means "discard this element and move on".
28 */ 29 */
29int seq_open(struct file *file, const struct seq_operations *op) 30int seq_open(struct file *file, const struct seq_operations *op)
30{ 31{
@@ -114,8 +115,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
114 if (!p || IS_ERR(p)) 115 if (!p || IS_ERR(p))
115 break; 116 break;
116 err = m->op->show(m, p); 117 err = m->op->show(m, p);
117 if (err) 118 if (err < 0)
118 break; 119 break;
120 if (unlikely(err))
121 m->count = 0;
119 if (m->count < m->size) 122 if (m->count < m->size)
120 goto Fill; 123 goto Fill;
121 m->op->stop(m, p); 124 m->op->stop(m, p);
@@ -140,9 +143,10 @@ Fill:
140 break; 143 break;
141 } 144 }
142 err = m->op->show(m, p); 145 err = m->op->show(m, p);
143 if (err || m->count == m->size) { 146 if (m->count == m->size || err) {
144 m->count = offs; 147 m->count = offs;
145 break; 148 if (likely(err <= 0))
149 break;
146 } 150 }
147 pos = next; 151 pos = next;
148 } 152 }
@@ -199,8 +203,12 @@ static int traverse(struct seq_file *m, loff_t offset)
199 if (IS_ERR(p)) 203 if (IS_ERR(p))
200 break; 204 break;
201 error = m->op->show(m, p); 205 error = m->op->show(m, p);
202 if (error) 206 if (error < 0)
203 break; 207 break;
208 if (unlikely(error)) {
209 error = 0;
210 m->count = 0;
211 }
204 if (m->count == m->size) 212 if (m->count == m->size)
205 goto Eoverflow; 213 goto Eoverflow;
206 if (pos + m->count > offset) { 214 if (pos + m->count > offset) {
@@ -239,7 +247,7 @@ Eoverflow:
239loff_t seq_lseek(struct file *file, loff_t offset, int origin) 247loff_t seq_lseek(struct file *file, loff_t offset, int origin)
240{ 248{
241 struct seq_file *m = (struct seq_file *)file->private_data; 249 struct seq_file *m = (struct seq_file *)file->private_data;
242 long long retval = -EINVAL; 250 loff_t retval = -EINVAL;
243 251
244 mutex_lock(&m->lock); 252 mutex_lock(&m->lock);
245 m->version = file->f_version; 253 m->version = file->f_version;
@@ -342,28 +350,40 @@ int seq_printf(struct seq_file *m, const char *f, ...)
342} 350}
343EXPORT_SYMBOL(seq_printf); 351EXPORT_SYMBOL(seq_printf);
344 352
353static char *mangle_path(char *s, char *p, char *esc)
354{
355 while (s <= p) {
356 char c = *p++;
357 if (!c) {
358 return s;
359 } else if (!strchr(esc, c)) {
360 *s++ = c;
361 } else if (s + 4 > p) {
362 break;
363 } else {
364 *s++ = '\\';
365 *s++ = '0' + ((c & 0300) >> 6);
366 *s++ = '0' + ((c & 070) >> 3);
367 *s++ = '0' + (c & 07);
368 }
369 }
370 return NULL;
371}
372
373/*
374 * return the absolute path of 'dentry' residing in mount 'mnt'.
375 */
345int seq_path(struct seq_file *m, struct path *path, char *esc) 376int seq_path(struct seq_file *m, struct path *path, char *esc)
346{ 377{
347 if (m->count < m->size) { 378 if (m->count < m->size) {
348 char *s = m->buf + m->count; 379 char *s = m->buf + m->count;
349 char *p = d_path(path, s, m->size - m->count); 380 char *p = d_path(path, s, m->size - m->count);
350 if (!IS_ERR(p)) { 381 if (!IS_ERR(p)) {
351 while (s <= p) { 382 s = mangle_path(s, p, esc);
352 char c = *p++; 383 if (s) {
353 if (!c) { 384 p = m->buf + m->count;
354 p = m->buf + m->count; 385 m->count = s - m->buf;
355 m->count = s - m->buf; 386 return s - p;
356 return s - p;
357 } else if (!strchr(esc, c)) {
358 *s++ = c;
359 } else if (s + 4 > p) {
360 break;
361 } else {
362 *s++ = '\\';
363 *s++ = '0' + ((c & 0300) >> 6);
364 *s++ = '0' + ((c & 070) >> 3);
365 *s++ = '0' + (c & 07);
366 }
367 } 387 }
368 } 388 }
369 } 389 }
@@ -372,6 +392,57 @@ int seq_path(struct seq_file *m, struct path *path, char *esc)
372} 392}
373EXPORT_SYMBOL(seq_path); 393EXPORT_SYMBOL(seq_path);
374 394
395/*
396 * Same as seq_path, but relative to supplied root.
397 *
398 * root may be changed, see __d_path().
399 */
400int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
401 char *esc)
402{
403 int err = -ENAMETOOLONG;
404 if (m->count < m->size) {
405 char *s = m->buf + m->count;
406 char *p;
407
408 spin_lock(&dcache_lock);
409 p = __d_path(path, root, s, m->size - m->count);
410 spin_unlock(&dcache_lock);
411 err = PTR_ERR(p);
412 if (!IS_ERR(p)) {
413 s = mangle_path(s, p, esc);
414 if (s) {
415 p = m->buf + m->count;
416 m->count = s - m->buf;
417 return 0;
418 }
419 }
420 }
421 m->count = m->size;
422 return err;
423}
424
425/*
426 * returns the path of the 'dentry' from the root of its filesystem.
427 */
428int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
429{
430 if (m->count < m->size) {
431 char *s = m->buf + m->count;
432 char *p = dentry_path(dentry, s, m->size - m->count);
433 if (!IS_ERR(p)) {
434 s = mangle_path(s, p, esc);
435 if (s) {
436 p = m->buf + m->count;
437 m->count = s - m->buf;
438 return s - p;
439 }
440 }
441 }
442 m->count = m->size;
443 return -1;
444}
445
375static void *single_start(struct seq_file *p, loff_t *pos) 446static void *single_start(struct seq_file *p, loff_t *pos)
376{ 447{
377 return NULL + (*pos == 0); 448 return NULL + (*pos == 0);
diff --git a/fs/super.c b/fs/super.c
index 1f8f05ede437..4798350b2bc9 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -39,6 +39,7 @@
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/file.h> 40#include <linux/file.h>
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42#include "internal.h"
42 43
43 44
44LIST_HEAD(super_blocks); 45LIST_HEAD(super_blocks);
diff --git a/fs/udf/Makefile b/fs/udf/Makefile
index be845e7540ef..0d4503f7446d 100644
--- a/fs/udf/Makefile
+++ b/fs/udf/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_UDF_FS) += udf.o
6 6
7udf-objs := balloc.o dir.o file.o ialloc.o inode.o lowlevel.o namei.o \ 7udf-objs := balloc.o dir.o file.o ialloc.o inode.o lowlevel.o namei.o \
8 partition.o super.o truncate.o symlink.o fsync.o \ 8 partition.o super.o truncate.o symlink.o fsync.o \
9 crc.o directory.o misc.o udftime.o unicode.o 9 directory.o misc.o udftime.o unicode.o
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index f855dcbbdfb8..1b809bd494bd 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -149,8 +149,7 @@ static bool udf_add_free_space(struct udf_sb_info *sbi,
149 return false; 149 return false;
150 150
151 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 151 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
152 lvid->freeSpaceTable[partition] = cpu_to_le32(le32_to_cpu( 152 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
153 lvid->freeSpaceTable[partition]) + cnt);
154 return true; 153 return true;
155} 154}
156 155
@@ -589,10 +588,8 @@ static void udf_table_free_blocks(struct super_block *sb,
589 sptr = oepos.bh->b_data + epos.offset; 588 sptr = oepos.bh->b_data + epos.offset;
590 aed = (struct allocExtDesc *) 589 aed = (struct allocExtDesc *)
591 oepos.bh->b_data; 590 oepos.bh->b_data;
592 aed->lengthAllocDescs = 591 le32_add_cpu(&aed->lengthAllocDescs,
593 cpu_to_le32(le32_to_cpu( 592 adsize);
594 aed->lengthAllocDescs) +
595 adsize);
596 } else { 593 } else {
597 sptr = iinfo->i_ext.i_data + 594 sptr = iinfo->i_ext.i_data +
598 epos.offset; 595 epos.offset;
@@ -645,9 +642,7 @@ static void udf_table_free_blocks(struct super_block *sb,
645 mark_inode_dirty(table); 642 mark_inode_dirty(table);
646 } else { 643 } else {
647 aed = (struct allocExtDesc *)epos.bh->b_data; 644 aed = (struct allocExtDesc *)epos.bh->b_data;
648 aed->lengthAllocDescs = 645 le32_add_cpu(&aed->lengthAllocDescs, adsize);
649 cpu_to_le32(le32_to_cpu(
650 aed->lengthAllocDescs) + adsize);
651 udf_update_tag(epos.bh->b_data, epos.offset); 646 udf_update_tag(epos.bh->b_data, epos.offset);
652 mark_buffer_dirty(epos.bh); 647 mark_buffer_dirty(epos.bh);
653 } 648 }
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
deleted file mode 100644
index b1661296e786..000000000000
--- a/fs/udf/crc.c
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * crc.c
3 *
4 * PURPOSE
5 * Routines to generate, calculate, and test a 16-bit CRC.
6 *
7 * DESCRIPTION
8 * The CRC code was devised by Don P. Mitchell of AT&T Bell Laboratories
9 * and Ned W. Rhodes of Software Systems Group. It has been published in
10 * "Design and Validation of Computer Protocols", Prentice Hall,
11 * Englewood Cliffs, NJ, 1991, Chapter 3, ISBN 0-13-539925-4.
12 *
13 * Copyright is held by AT&T.
14 *
15 * AT&T gives permission for the free use of the CRC source code.
16 *
17 * COPYRIGHT
18 * This file is distributed under the terms of the GNU General Public
19 * License (GPL). Copies of the GPL can be obtained from:
20 * ftp://prep.ai.mit.edu/pub/gnu/GPL
21 * Each contributing author retains all rights to their own work.
22 */
23
24#include "udfdecl.h"
25
26static uint16_t crc_table[256] = {
27 0x0000U, 0x1021U, 0x2042U, 0x3063U, 0x4084U, 0x50a5U, 0x60c6U, 0x70e7U,
28 0x8108U, 0x9129U, 0xa14aU, 0xb16bU, 0xc18cU, 0xd1adU, 0xe1ceU, 0xf1efU,
29 0x1231U, 0x0210U, 0x3273U, 0x2252U, 0x52b5U, 0x4294U, 0x72f7U, 0x62d6U,
30 0x9339U, 0x8318U, 0xb37bU, 0xa35aU, 0xd3bdU, 0xc39cU, 0xf3ffU, 0xe3deU,
31 0x2462U, 0x3443U, 0x0420U, 0x1401U, 0x64e6U, 0x74c7U, 0x44a4U, 0x5485U,
32 0xa56aU, 0xb54bU, 0x8528U, 0x9509U, 0xe5eeU, 0xf5cfU, 0xc5acU, 0xd58dU,
33 0x3653U, 0x2672U, 0x1611U, 0x0630U, 0x76d7U, 0x66f6U, 0x5695U, 0x46b4U,
34 0xb75bU, 0xa77aU, 0x9719U, 0x8738U, 0xf7dfU, 0xe7feU, 0xd79dU, 0xc7bcU,
35 0x48c4U, 0x58e5U, 0x6886U, 0x78a7U, 0x0840U, 0x1861U, 0x2802U, 0x3823U,
36 0xc9ccU, 0xd9edU, 0xe98eU, 0xf9afU, 0x8948U, 0x9969U, 0xa90aU, 0xb92bU,
37 0x5af5U, 0x4ad4U, 0x7ab7U, 0x6a96U, 0x1a71U, 0x0a50U, 0x3a33U, 0x2a12U,
38 0xdbfdU, 0xcbdcU, 0xfbbfU, 0xeb9eU, 0x9b79U, 0x8b58U, 0xbb3bU, 0xab1aU,
39 0x6ca6U, 0x7c87U, 0x4ce4U, 0x5cc5U, 0x2c22U, 0x3c03U, 0x0c60U, 0x1c41U,
40 0xedaeU, 0xfd8fU, 0xcdecU, 0xddcdU, 0xad2aU, 0xbd0bU, 0x8d68U, 0x9d49U,
41 0x7e97U, 0x6eb6U, 0x5ed5U, 0x4ef4U, 0x3e13U, 0x2e32U, 0x1e51U, 0x0e70U,
42 0xff9fU, 0xefbeU, 0xdfddU, 0xcffcU, 0xbf1bU, 0xaf3aU, 0x9f59U, 0x8f78U,
43 0x9188U, 0x81a9U, 0xb1caU, 0xa1ebU, 0xd10cU, 0xc12dU, 0xf14eU, 0xe16fU,
44 0x1080U, 0x00a1U, 0x30c2U, 0x20e3U, 0x5004U, 0x4025U, 0x7046U, 0x6067U,
45 0x83b9U, 0x9398U, 0xa3fbU, 0xb3daU, 0xc33dU, 0xd31cU, 0xe37fU, 0xf35eU,
46 0x02b1U, 0x1290U, 0x22f3U, 0x32d2U, 0x4235U, 0x5214U, 0x6277U, 0x7256U,
47 0xb5eaU, 0xa5cbU, 0x95a8U, 0x8589U, 0xf56eU, 0xe54fU, 0xd52cU, 0xc50dU,
48 0x34e2U, 0x24c3U, 0x14a0U, 0x0481U, 0x7466U, 0x6447U, 0x5424U, 0x4405U,
49 0xa7dbU, 0xb7faU, 0x8799U, 0x97b8U, 0xe75fU, 0xf77eU, 0xc71dU, 0xd73cU,
50 0x26d3U, 0x36f2U, 0x0691U, 0x16b0U, 0x6657U, 0x7676U, 0x4615U, 0x5634U,
51 0xd94cU, 0xc96dU, 0xf90eU, 0xe92fU, 0x99c8U, 0x89e9U, 0xb98aU, 0xa9abU,
52 0x5844U, 0x4865U, 0x7806U, 0x6827U, 0x18c0U, 0x08e1U, 0x3882U, 0x28a3U,
53 0xcb7dU, 0xdb5cU, 0xeb3fU, 0xfb1eU, 0x8bf9U, 0x9bd8U, 0xabbbU, 0xbb9aU,
54 0x4a75U, 0x5a54U, 0x6a37U, 0x7a16U, 0x0af1U, 0x1ad0U, 0x2ab3U, 0x3a92U,
55 0xfd2eU, 0xed0fU, 0xdd6cU, 0xcd4dU, 0xbdaaU, 0xad8bU, 0x9de8U, 0x8dc9U,
56 0x7c26U, 0x6c07U, 0x5c64U, 0x4c45U, 0x3ca2U, 0x2c83U, 0x1ce0U, 0x0cc1U,
57 0xef1fU, 0xff3eU, 0xcf5dU, 0xdf7cU, 0xaf9bU, 0xbfbaU, 0x8fd9U, 0x9ff8U,
58 0x6e17U, 0x7e36U, 0x4e55U, 0x5e74U, 0x2e93U, 0x3eb2U, 0x0ed1U, 0x1ef0U
59};
60
61/*
62 * udf_crc
63 *
64 * PURPOSE
65 * Calculate a 16-bit CRC checksum using ITU-T V.41 polynomial.
66 *
67 * DESCRIPTION
68 * The OSTA-UDF(tm) 1.50 standard states that using CRCs is mandatory.
69 * The polynomial used is: x^16 + x^12 + x^15 + 1
70 *
71 * PRE-CONDITIONS
72 * data Pointer to the data block.
73 * size Size of the data block.
74 *
75 * POST-CONDITIONS
76 * <return> CRC of the data block.
77 *
78 * HISTORY
79 * July 21, 1997 - Andrew E. Mileski
80 * Adapted from OSTA-UDF(tm) 1.50 standard.
81 */
82uint16_t udf_crc(uint8_t *data, uint32_t size, uint16_t crc)
83{
84 while (size--)
85 crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8);
86
87 return crc;
88}
89
90/****************************************************************************/
91#if defined(TEST)
92
93/*
94 * PURPOSE
95 * Test udf_crc()
96 *
97 * HISTORY
98 * July 21, 1997 - Andrew E. Mileski
99 * Adapted from OSTA-UDF(tm) 1.50 standard.
100 */
101
102unsigned char bytes[] = { 0x70U, 0x6AU, 0x77U };
103
104int main(void)
105{
106 unsigned short x;
107
108 x = udf_crc(bytes, sizeof bytes);
109 printf("udf_crc: calculated = %4.4x, correct = %4.4x\n", x, 0x3299U);
110
111 return 0;
112}
113
114#endif /* defined(TEST) */
115
116/****************************************************************************/
117#if defined(GENERATE)
118
119/*
120 * PURPOSE
121 * Generate a table for fast 16-bit CRC calculations (any polynomial).
122 *
123 * DESCRIPTION
124 * The ITU-T V.41 polynomial is 010041.
125 *
126 * HISTORY
127 * July 21, 1997 - Andrew E. Mileski
128 * Adapted from OSTA-UDF(tm) 1.50 standard.
129 */
130
131#include <stdio.h>
132
133int main(int argc, char **argv)
134{
135 unsigned long crc, poly;
136 int n, i;
137
138 /* Get the polynomial */
139 sscanf(argv[1], "%lo", &poly);
140 if (poly & 0xffff0000U) {
141 fprintf(stderr, "polynomial is too large\en");
142 exit(1);
143 }
144
145 printf("/* CRC 0%o */\n", poly);
146
147 /* Create a table */
148 printf("static unsigned short crc_table[256] = {\n");
149 for (n = 0; n < 256; n++) {
150 if (n % 8 == 0)
151 printf("\t");
152 crc = n << 8;
153 for (i = 0; i < 8; i++) {
154 if (crc & 0x8000U)
155 crc = (crc << 1) ^ poly;
156 else
157 crc <<= 1;
158 crc &= 0xFFFFU;
159 }
160 if (n == 255)
161 printf("0x%04xU ", crc);
162 else
163 printf("0x%04xU, ", crc);
164 if (n % 8 == 7)
165 printf("\n");
166 }
167 printf("};\n");
168
169 return 0;
170}
171
172#endif /* defined(GENERATE) */
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 8d8643ada199..62dc270c69d1 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -39,13 +39,13 @@
39static int do_udf_readdir(struct inode *dir, struct file *filp, 39static int do_udf_readdir(struct inode *dir, struct file *filp,
40 filldir_t filldir, void *dirent) 40 filldir_t filldir, void *dirent)
41{ 41{
42 struct udf_fileident_bh fibh; 42 struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
43 struct fileIdentDesc *fi = NULL; 43 struct fileIdentDesc *fi = NULL;
44 struct fileIdentDesc cfi; 44 struct fileIdentDesc cfi;
45 int block, iblock; 45 int block, iblock;
46 loff_t nf_pos = (filp->f_pos - 1) << 2; 46 loff_t nf_pos = (filp->f_pos - 1) << 2;
47 int flen; 47 int flen;
48 char fname[UDF_NAME_LEN]; 48 char *fname = NULL;
49 char *nameptr; 49 char *nameptr;
50 uint16_t liu; 50 uint16_t liu;
51 uint8_t lfi; 51 uint8_t lfi;
@@ -54,23 +54,32 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
54 kernel_lb_addr eloc; 54 kernel_lb_addr eloc;
55 uint32_t elen; 55 uint32_t elen;
56 sector_t offset; 56 sector_t offset;
57 int i, num; 57 int i, num, ret = 0;
58 unsigned int dt_type; 58 unsigned int dt_type;
59 struct extent_position epos = { NULL, 0, {0, 0} }; 59 struct extent_position epos = { NULL, 0, {0, 0} };
60 struct udf_inode_info *iinfo; 60 struct udf_inode_info *iinfo;
61 61
62 if (nf_pos >= size) 62 if (nf_pos >= size)
63 return 0; 63 goto out;
64
65 fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
66 if (!fname) {
67 ret = -ENOMEM;
68 goto out;
69 }
64 70
65 if (nf_pos == 0) 71 if (nf_pos == 0)
66 nf_pos = udf_ext0_offset(dir); 72 nf_pos = udf_ext0_offset(dir);
67 73
68 fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1); 74 fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
69 iinfo = UDF_I(dir); 75 iinfo = UDF_I(dir);
70 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 76 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
71 fibh.sbh = fibh.ebh = NULL; 77 if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
72 } else if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits, 78 &epos, &eloc, &elen, &offset)
73 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { 79 != (EXT_RECORDED_ALLOCATED >> 30)) {
80 ret = -ENOENT;
81 goto out;
82 }
74 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 83 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
75 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 84 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
76 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 85 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -83,8 +92,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
83 } 92 }
84 93
85 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { 94 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
86 brelse(epos.bh); 95 ret = -EIO;
87 return -EIO; 96 goto out;
88 } 97 }
89 98
90 if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) { 99 if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
@@ -105,9 +114,6 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
105 brelse(bha[i]); 114 brelse(bha[i]);
106 } 115 }
107 } 116 }
108 } else {
109 brelse(epos.bh);
110 return -ENOENT;
111 } 117 }
112 118
113 while (nf_pos < size) { 119 while (nf_pos < size) {
@@ -115,13 +121,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
115 121
116 fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, 122 fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
117 &elen, &offset); 123 &elen, &offset);
118 if (!fi) { 124 if (!fi)
119 if (fibh.sbh != fibh.ebh) 125 goto out;
120 brelse(fibh.ebh);
121 brelse(fibh.sbh);
122 brelse(epos.bh);
123 return 0;
124 }
125 126
126 liu = le16_to_cpu(cfi.lengthOfImpUse); 127 liu = le16_to_cpu(cfi.lengthOfImpUse);
127 lfi = cfi.lengthFileIdent; 128 lfi = cfi.lengthFileIdent;
@@ -167,53 +168,23 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
167 dt_type = DT_UNKNOWN; 168 dt_type = DT_UNKNOWN;
168 } 169 }
169 170
170 if (flen) { 171 if (flen && filldir(dirent, fname, flen, filp->f_pos,
171 if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0) { 172 iblock, dt_type) < 0)
172 if (fibh.sbh != fibh.ebh) 173 goto out;
173 brelse(fibh.ebh);
174 brelse(fibh.sbh);
175 brelse(epos.bh);
176 return 0;
177 }
178 }
179 } /* end while */ 174 } /* end while */
180 175
181 filp->f_pos = (nf_pos >> 2) + 1; 176 filp->f_pos = (nf_pos >> 2) + 1;
182 177
178out:
183 if (fibh.sbh != fibh.ebh) 179 if (fibh.sbh != fibh.ebh)
184 brelse(fibh.ebh); 180 brelse(fibh.ebh);
185 brelse(fibh.sbh); 181 brelse(fibh.sbh);
186 brelse(epos.bh); 182 brelse(epos.bh);
183 kfree(fname);
187 184
188 return 0; 185 return ret;
189} 186}
190 187
191/*
192 * udf_readdir
193 *
194 * PURPOSE
195 * Read a directory entry.
196 *
197 * DESCRIPTION
198 * Optional - sys_getdents() will return -ENOTDIR if this routine is not
199 * available.
200 *
201 * Refer to sys_getdents() in fs/readdir.c
202 * sys_getdents() -> .
203 *
204 * PRE-CONDITIONS
205 * filp Pointer to directory file.
206 * buf Pointer to directory entry buffer.
207 * filldir Pointer to filldir function.
208 *
209 * POST-CONDITIONS
210 * <return> >=0 on success.
211 *
212 * HISTORY
213 * July 1, 1997 - Andrew E. Mileski
214 * Written, tested, and released.
215 */
216
217static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) 188static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
218{ 189{
219 struct inode *dir = filp->f_path.dentry->d_inode; 190 struct inode *dir = filp->f_path.dentry->d_inode;
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index 56387711589b..a0974df82b31 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -70,19 +70,6 @@ typedef struct {
70 uint8_t microseconds; 70 uint8_t microseconds;
71} __attribute__ ((packed)) timestamp; 71} __attribute__ ((packed)) timestamp;
72 72
73typedef struct {
74 uint16_t typeAndTimezone;
75 int16_t year;
76 uint8_t month;
77 uint8_t day;
78 uint8_t hour;
79 uint8_t minute;
80 uint8_t second;
81 uint8_t centiseconds;
82 uint8_t hundredsOfMicroseconds;
83 uint8_t microseconds;
84} __attribute__ ((packed)) kernel_timestamp;
85
86/* Type and Time Zone (ECMA 167r3 1/7.3.1) */ 73/* Type and Time Zone (ECMA 167r3 1/7.3.1) */
87#define TIMESTAMP_TYPE_MASK 0xF000 74#define TIMESTAMP_TYPE_MASK 0xF000
88#define TIMESTAMP_TYPE_CUT 0x0000 75#define TIMESTAMP_TYPE_CUT 0x0000
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 97c71ae7c689..0ed6e146a0d9 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -27,7 +27,6 @@
27 27
28#include "udfdecl.h" 28#include "udfdecl.h"
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/udf_fs.h>
31#include <asm/uaccess.h> 30#include <asm/uaccess.h>
32#include <linux/kernel.h> 31#include <linux/kernel.h>
33#include <linux/string.h> /* memset */ 32#include <linux/string.h> /* memset */
@@ -144,40 +143,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
144 return retval; 143 return retval;
145} 144}
146 145
147/*
148 * udf_ioctl
149 *
150 * PURPOSE
151 * Issue an ioctl.
152 *
153 * DESCRIPTION
154 * Optional - sys_ioctl() will return -ENOTTY if this routine is not
155 * available, and the ioctl cannot be handled without filesystem help.
156 *
157 * sys_ioctl() handles these ioctls that apply only to regular files:
158 * FIBMAP [requires udf_block_map()], FIGETBSZ, FIONREAD
159 * These ioctls are also handled by sys_ioctl():
160 * FIOCLEX, FIONCLEX, FIONBIO, FIOASYNC
161 * All other ioctls are passed to the filesystem.
162 *
163 * Refer to sys_ioctl() in fs/ioctl.c
164 * sys_ioctl() -> .
165 *
166 * PRE-CONDITIONS
167 * inode Pointer to inode that ioctl was issued on.
168 * filp Pointer to file that ioctl was issued on.
169 * cmd The ioctl command.
170 * arg The ioctl argument [can be interpreted as a
171 * user-space pointer if desired].
172 *
173 * POST-CONDITIONS
174 * <return> Success (>=0) or an error code (<=0) that
175 * sys_ioctl() will return.
176 *
177 * HISTORY
178 * July 1, 1997 - Andrew E. Mileski
179 * Written, tested, and released.
180 */
181int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 146int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
182 unsigned long arg) 147 unsigned long arg)
183{ 148{
@@ -225,18 +190,6 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
225 return result; 190 return result;
226} 191}
227 192
228/*
229 * udf_release_file
230 *
231 * PURPOSE
232 * Called when all references to the file are closed
233 *
234 * DESCRIPTION
235 * Discard prealloced blocks
236 *
237 * HISTORY
238 *
239 */
240static int udf_release_file(struct inode *inode, struct file *filp) 193static int udf_release_file(struct inode *inode, struct file *filp)
241{ 194{
242 if (filp->f_mode & FMODE_WRITE) { 195 if (filp->f_mode & FMODE_WRITE) {
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 84360315aca2..eb9cfa23dc3d 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -21,7 +21,6 @@
21#include "udfdecl.h" 21#include "udfdecl.h"
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/quotaops.h> 23#include <linux/quotaops.h>
24#include <linux/udf_fs.h>
25#include <linux/sched.h> 24#include <linux/sched.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27 26
@@ -47,11 +46,9 @@ void udf_free_inode(struct inode *inode)
47 struct logicalVolIntegrityDescImpUse *lvidiu = 46 struct logicalVolIntegrityDescImpUse *lvidiu =
48 udf_sb_lvidiu(sbi); 47 udf_sb_lvidiu(sbi);
49 if (S_ISDIR(inode->i_mode)) 48 if (S_ISDIR(inode->i_mode))
50 lvidiu->numDirs = 49 le32_add_cpu(&lvidiu->numDirs, -1);
51 cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
52 else 50 else
53 lvidiu->numFiles = 51 le32_add_cpu(&lvidiu->numFiles, -1);
54 cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
55 52
56 mark_buffer_dirty(sbi->s_lvid_bh); 53 mark_buffer_dirty(sbi->s_lvid_bh);
57 } 54 }
@@ -105,11 +102,9 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
105 lvhd = (struct logicalVolHeaderDesc *) 102 lvhd = (struct logicalVolHeaderDesc *)
106 (lvid->logicalVolContentsUse); 103 (lvid->logicalVolContentsUse);
107 if (S_ISDIR(mode)) 104 if (S_ISDIR(mode))
108 lvidiu->numDirs = 105 le32_add_cpu(&lvidiu->numDirs, 1);
109 cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
110 else 106 else
111 lvidiu->numFiles = 107 le32_add_cpu(&lvidiu->numFiles, 1);
112 cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
113 iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID); 108 iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
114 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 109 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
115 uniqueID += 16; 110 uniqueID += 16;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 24cfa55d0fdc..6e74b117aaf0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -37,6 +37,7 @@
37#include <linux/buffer_head.h> 37#include <linux/buffer_head.h>
38#include <linux/writeback.h> 38#include <linux/writeback.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/crc-itu-t.h>
40 41
41#include "udf_i.h" 42#include "udf_i.h"
42#include "udf_sb.h" 43#include "udf_sb.h"
@@ -66,22 +67,7 @@ static void udf_update_extents(struct inode *,
66 struct extent_position *); 67 struct extent_position *);
67static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); 68static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
68 69
69/* 70
70 * udf_delete_inode
71 *
72 * PURPOSE
73 * Clean-up before the specified inode is destroyed.
74 *
75 * DESCRIPTION
76 * This routine is called when the kernel destroys an inode structure
77 * ie. when iput() finds i_count == 0.
78 *
79 * HISTORY
80 * July 1, 1997 - Andrew E. Mileski
81 * Written, tested, and released.
82 *
83 * Called at the last iput() if i_nlink is zero.
84 */
85void udf_delete_inode(struct inode *inode) 71void udf_delete_inode(struct inode *inode)
86{ 72{
87 truncate_inode_pages(&inode->i_data, 0); 73 truncate_inode_pages(&inode->i_data, 0);
@@ -323,9 +309,6 @@ static int udf_get_block(struct inode *inode, sector_t block,
323 309
324 lock_kernel(); 310 lock_kernel();
325 311
326 if (block < 0)
327 goto abort_negative;
328
329 iinfo = UDF_I(inode); 312 iinfo = UDF_I(inode);
330 if (block == iinfo->i_next_alloc_block + 1) { 313 if (block == iinfo->i_next_alloc_block + 1) {
331 iinfo->i_next_alloc_block++; 314 iinfo->i_next_alloc_block++;
@@ -347,10 +330,6 @@ static int udf_get_block(struct inode *inode, sector_t block,
347abort: 330abort:
348 unlock_kernel(); 331 unlock_kernel();
349 return err; 332 return err;
350
351abort_negative:
352 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
353 goto abort;
354} 333}
355 334
356static struct buffer_head *udf_getblk(struct inode *inode, long block, 335static struct buffer_head *udf_getblk(struct inode *inode, long block,
@@ -1116,42 +1095,36 @@ static void __udf_read_inode(struct inode *inode)
1116 fe = (struct fileEntry *)bh->b_data; 1095 fe = (struct fileEntry *)bh->b_data;
1117 1096
1118 if (fe->icbTag.strategyType == cpu_to_le16(4096)) { 1097 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1119 struct buffer_head *ibh = NULL, *nbh = NULL; 1098 struct buffer_head *ibh;
1120 struct indirectEntry *ie;
1121 1099
1122 ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1, 1100 ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1,
1123 &ident); 1101 &ident);
1124 if (ident == TAG_IDENT_IE) { 1102 if (ident == TAG_IDENT_IE && ibh) {
1125 if (ibh) { 1103 struct buffer_head *nbh = NULL;
1126 kernel_lb_addr loc; 1104 kernel_lb_addr loc;
1127 ie = (struct indirectEntry *)ibh->b_data; 1105 struct indirectEntry *ie;
1128 1106
1129 loc = lelb_to_cpu(ie->indirectICB.extLocation); 1107 ie = (struct indirectEntry *)ibh->b_data;
1130 1108 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1131 if (ie->indirectICB.extLength && 1109
1132 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, 1110 if (ie->indirectICB.extLength &&
1133 &ident))) { 1111 (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
1134 if (ident == TAG_IDENT_FE || 1112 &ident))) {
1135 ident == TAG_IDENT_EFE) { 1113 if (ident == TAG_IDENT_FE ||
1136 memcpy(&iinfo->i_location, 1114 ident == TAG_IDENT_EFE) {
1137 &loc, 1115 memcpy(&iinfo->i_location,
1138 sizeof(kernel_lb_addr)); 1116 &loc,
1139 brelse(bh); 1117 sizeof(kernel_lb_addr));
1140 brelse(ibh); 1118 brelse(bh);
1141 brelse(nbh);
1142 __udf_read_inode(inode);
1143 return;
1144 } else {
1145 brelse(nbh);
1146 brelse(ibh);
1147 }
1148 } else {
1149 brelse(ibh); 1119 brelse(ibh);
1120 brelse(nbh);
1121 __udf_read_inode(inode);
1122 return;
1150 } 1123 }
1124 brelse(nbh);
1151 } 1125 }
1152 } else {
1153 brelse(ibh);
1154 } 1126 }
1127 brelse(ibh);
1155 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { 1128 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1156 printk(KERN_ERR "udf: unsupported strategy type: %d\n", 1129 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1157 le16_to_cpu(fe->icbTag.strategyType)); 1130 le16_to_cpu(fe->icbTag.strategyType));
@@ -1168,8 +1141,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1168{ 1141{
1169 struct fileEntry *fe; 1142 struct fileEntry *fe;
1170 struct extendedFileEntry *efe; 1143 struct extendedFileEntry *efe;
1171 time_t convtime;
1172 long convtime_usec;
1173 int offset; 1144 int offset;
1174 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); 1145 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1175 struct udf_inode_info *iinfo = UDF_I(inode); 1146 struct udf_inode_info *iinfo = UDF_I(inode);
@@ -1257,29 +1228,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1257 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << 1228 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1258 (inode->i_sb->s_blocksize_bits - 9); 1229 (inode->i_sb->s_blocksize_bits - 9);
1259 1230
1260 if (udf_stamp_to_time(&convtime, &convtime_usec, 1231 if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
1261 lets_to_cpu(fe->accessTime))) {
1262 inode->i_atime.tv_sec = convtime;
1263 inode->i_atime.tv_nsec = convtime_usec * 1000;
1264 } else {
1265 inode->i_atime = sbi->s_record_time; 1232 inode->i_atime = sbi->s_record_time;
1266 }
1267 1233
1268 if (udf_stamp_to_time(&convtime, &convtime_usec, 1234 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1269 lets_to_cpu(fe->modificationTime))) { 1235 fe->modificationTime))
1270 inode->i_mtime.tv_sec = convtime;
1271 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1272 } else {
1273 inode->i_mtime = sbi->s_record_time; 1236 inode->i_mtime = sbi->s_record_time;
1274 }
1275 1237
1276 if (udf_stamp_to_time(&convtime, &convtime_usec, 1238 if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
1277 lets_to_cpu(fe->attrTime))) {
1278 inode->i_ctime.tv_sec = convtime;
1279 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1280 } else {
1281 inode->i_ctime = sbi->s_record_time; 1239 inode->i_ctime = sbi->s_record_time;
1282 }
1283 1240
1284 iinfo->i_unique = le64_to_cpu(fe->uniqueID); 1241 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1285 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); 1242 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1289,37 +1246,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1289 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << 1246 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1290 (inode->i_sb->s_blocksize_bits - 9); 1247 (inode->i_sb->s_blocksize_bits - 9);
1291 1248
1292 if (udf_stamp_to_time(&convtime, &convtime_usec, 1249 if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
1293 lets_to_cpu(efe->accessTime))) {
1294 inode->i_atime.tv_sec = convtime;
1295 inode->i_atime.tv_nsec = convtime_usec * 1000;
1296 } else {
1297 inode->i_atime = sbi->s_record_time; 1250 inode->i_atime = sbi->s_record_time;
1298 }
1299 1251
1300 if (udf_stamp_to_time(&convtime, &convtime_usec, 1252 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1301 lets_to_cpu(efe->modificationTime))) { 1253 efe->modificationTime))
1302 inode->i_mtime.tv_sec = convtime;
1303 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1304 } else {
1305 inode->i_mtime = sbi->s_record_time; 1254 inode->i_mtime = sbi->s_record_time;
1306 }
1307 1255
1308 if (udf_stamp_to_time(&convtime, &convtime_usec, 1256 if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
1309 lets_to_cpu(efe->createTime))) {
1310 iinfo->i_crtime.tv_sec = convtime;
1311 iinfo->i_crtime.tv_nsec = convtime_usec * 1000;
1312 } else {
1313 iinfo->i_crtime = sbi->s_record_time; 1257 iinfo->i_crtime = sbi->s_record_time;
1314 }
1315 1258
1316 if (udf_stamp_to_time(&convtime, &convtime_usec, 1259 if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
1317 lets_to_cpu(efe->attrTime))) {
1318 inode->i_ctime.tv_sec = convtime;
1319 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1320 } else {
1321 inode->i_ctime = sbi->s_record_time; 1260 inode->i_ctime = sbi->s_record_time;
1322 }
1323 1261
1324 iinfo->i_unique = le64_to_cpu(efe->uniqueID); 1262 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1325 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); 1263 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1338,6 +1276,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1338 case ICBTAG_FILE_TYPE_REALTIME: 1276 case ICBTAG_FILE_TYPE_REALTIME:
1339 case ICBTAG_FILE_TYPE_REGULAR: 1277 case ICBTAG_FILE_TYPE_REGULAR:
1340 case ICBTAG_FILE_TYPE_UNDEF: 1278 case ICBTAG_FILE_TYPE_UNDEF:
1279 case ICBTAG_FILE_TYPE_VAT20:
1341 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 1280 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1342 inode->i_data.a_ops = &udf_adinicb_aops; 1281 inode->i_data.a_ops = &udf_adinicb_aops;
1343 else 1282 else
@@ -1363,6 +1302,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1363 inode->i_op = &page_symlink_inode_operations; 1302 inode->i_op = &page_symlink_inode_operations;
1364 inode->i_mode = S_IFLNK | S_IRWXUGO; 1303 inode->i_mode = S_IFLNK | S_IRWXUGO;
1365 break; 1304 break;
1305 case ICBTAG_FILE_TYPE_MAIN:
1306 udf_debug("METADATA FILE-----\n");
1307 break;
1308 case ICBTAG_FILE_TYPE_MIRROR:
1309 udf_debug("METADATA MIRROR FILE-----\n");
1310 break;
1311 case ICBTAG_FILE_TYPE_BITMAP:
1312 udf_debug("METADATA BITMAP FILE-----\n");
1313 break;
1366 default: 1314 default:
1367 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown " 1315 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
1368 "file type=%d\n", inode->i_ino, 1316 "file type=%d\n", inode->i_ino,
@@ -1416,21 +1364,6 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
1416 return mode; 1364 return mode;
1417} 1365}
1418 1366
1419/*
1420 * udf_write_inode
1421 *
1422 * PURPOSE
1423 * Write out the specified inode.
1424 *
1425 * DESCRIPTION
1426 * This routine is called whenever an inode is synced.
1427 * Currently this routine is just a placeholder.
1428 *
1429 * HISTORY
1430 * July 1, 1997 - Andrew E. Mileski
1431 * Written, tested, and released.
1432 */
1433
1434int udf_write_inode(struct inode *inode, int sync) 1367int udf_write_inode(struct inode *inode, int sync)
1435{ 1368{
1436 int ret; 1369 int ret;
@@ -1455,7 +1388,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1455 uint32_t udfperms; 1388 uint32_t udfperms;
1456 uint16_t icbflags; 1389 uint16_t icbflags;
1457 uint16_t crclen; 1390 uint16_t crclen;
1458 kernel_timestamp cpu_time;
1459 int err = 0; 1391 int err = 0;
1460 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); 1392 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1461 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 1393 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
@@ -1488,9 +1420,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1488 iinfo->i_location. 1420 iinfo->i_location.
1489 logicalBlockNum); 1421 logicalBlockNum);
1490 use->descTag.descCRCLength = cpu_to_le16(crclen); 1422 use->descTag.descCRCLength = cpu_to_le16(crclen);
1491 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + 1423 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1492 sizeof(tag), crclen, 1424 sizeof(tag),
1493 0)); 1425 crclen));
1494 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); 1426 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1495 1427
1496 mark_buffer_dirty(bh); 1428 mark_buffer_dirty(bh);
@@ -1558,12 +1490,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1558 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> 1490 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1559 (blocksize_bits - 9)); 1491 (blocksize_bits - 9));
1560 1492
1561 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1493 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1562 fe->accessTime = cpu_to_lets(cpu_time); 1494 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1563 if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) 1495 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1564 fe->modificationTime = cpu_to_lets(cpu_time);
1565 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1566 fe->attrTime = cpu_to_lets(cpu_time);
1567 memset(&(fe->impIdent), 0, sizeof(regid)); 1496 memset(&(fe->impIdent), 0, sizeof(regid));
1568 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); 1497 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1569 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1498 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1598,14 +1527,10 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1598 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) 1527 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1599 iinfo->i_crtime = inode->i_ctime; 1528 iinfo->i_crtime = inode->i_ctime;
1600 1529
1601 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1530 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1602 efe->accessTime = cpu_to_lets(cpu_time); 1531 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1603 if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) 1532 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1604 efe->modificationTime = cpu_to_lets(cpu_time); 1533 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1605 if (udf_time_to_stamp(&cpu_time, iinfo->i_crtime))
1606 efe->createTime = cpu_to_lets(cpu_time);
1607 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1608 efe->attrTime = cpu_to_lets(cpu_time);
1609 1534
1610 memset(&(efe->impIdent), 0, sizeof(regid)); 1535 memset(&(efe->impIdent), 0, sizeof(regid));
1611 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); 1536 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
@@ -1660,8 +1585,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1660 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - 1585 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
1661 sizeof(tag); 1586 sizeof(tag);
1662 fe->descTag.descCRCLength = cpu_to_le16(crclen); 1587 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1663 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), 1588 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(tag),
1664 crclen, 0)); 1589 crclen));
1665 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1590 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1666 1591
1667 /* write the data blocks */ 1592 /* write the data blocks */
@@ -1778,9 +1703,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1778 1703
1779 if (epos->bh) { 1704 if (epos->bh) {
1780 aed = (struct allocExtDesc *)epos->bh->b_data; 1705 aed = (struct allocExtDesc *)epos->bh->b_data;
1781 aed->lengthAllocDescs = 1706 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1782 cpu_to_le32(le32_to_cpu(
1783 aed->lengthAllocDescs) + adsize);
1784 } else { 1707 } else {
1785 iinfo->i_lenAlloc += adsize; 1708 iinfo->i_lenAlloc += adsize;
1786 mark_inode_dirty(inode); 1709 mark_inode_dirty(inode);
@@ -1830,9 +1753,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1830 mark_inode_dirty(inode); 1753 mark_inode_dirty(inode);
1831 } else { 1754 } else {
1832 aed = (struct allocExtDesc *)epos->bh->b_data; 1755 aed = (struct allocExtDesc *)epos->bh->b_data;
1833 aed->lengthAllocDescs = 1756 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1834 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) +
1835 adsize);
1836 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 1757 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1837 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) 1758 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1838 udf_update_tag(epos->bh->b_data, 1759 udf_update_tag(epos->bh->b_data,
@@ -2046,9 +1967,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
2046 mark_inode_dirty(inode); 1967 mark_inode_dirty(inode);
2047 } else { 1968 } else {
2048 aed = (struct allocExtDesc *)oepos.bh->b_data; 1969 aed = (struct allocExtDesc *)oepos.bh->b_data;
2049 aed->lengthAllocDescs = 1970 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
2050 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
2051 (2 * adsize));
2052 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 1971 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2053 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) 1972 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2054 udf_update_tag(oepos.bh->b_data, 1973 udf_update_tag(oepos.bh->b_data,
@@ -2065,9 +1984,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
2065 mark_inode_dirty(inode); 1984 mark_inode_dirty(inode);
2066 } else { 1985 } else {
2067 aed = (struct allocExtDesc *)oepos.bh->b_data; 1986 aed = (struct allocExtDesc *)oepos.bh->b_data;
2068 aed->lengthAllocDescs = 1987 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
2069 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
2070 adsize);
2071 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 1988 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2072 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) 1989 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2073 udf_update_tag(oepos.bh->b_data, 1990 udf_update_tag(oepos.bh->b_data,
@@ -2095,11 +2012,6 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
2095 int8_t etype; 2012 int8_t etype;
2096 struct udf_inode_info *iinfo; 2013 struct udf_inode_info *iinfo;
2097 2014
2098 if (block < 0) {
2099 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2100 return -1;
2101 }
2102
2103 iinfo = UDF_I(inode); 2015 iinfo = UDF_I(inode);
2104 pos->offset = 0; 2016 pos->offset = 0;
2105 pos->block = iinfo->i_location; 2017 pos->block = iinfo->i_location;
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 579bae71e67e..703843f30ffd 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -23,7 +23,6 @@
23#include <linux/cdrom.h> 23#include <linux/cdrom.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25 25
26#include <linux/udf_fs.h>
27#include "udf_sb.h" 26#include "udf_sb.h"
28 27
29unsigned int udf_get_last_session(struct super_block *sb) 28unsigned int udf_get_last_session(struct super_block *sb)
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index a1d6da0caf71..84bf0fd4a4f1 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -23,8 +23,8 @@
23 23
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/udf_fs.h>
27#include <linux/buffer_head.h> 26#include <linux/buffer_head.h>
27#include <linux/crc-itu-t.h>
28 28
29#include "udf_i.h" 29#include "udf_i.h"
30#include "udf_sb.h" 30#include "udf_sb.h"
@@ -136,8 +136,8 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
136 /* rewrite CRC + checksum of eahd */ 136 /* rewrite CRC + checksum of eahd */
137 crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag); 137 crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
138 eahd->descTag.descCRCLength = cpu_to_le16(crclen); 138 eahd->descTag.descCRCLength = cpu_to_le16(crclen);
139 eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + 139 eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd +
140 sizeof(tag), crclen, 0)); 140 sizeof(tag), crclen));
141 eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag); 141 eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
142 iinfo->i_lenEAttr += size; 142 iinfo->i_lenEAttr += size;
143 return (struct genericFormat *)&ea[offset]; 143 return (struct genericFormat *)&ea[offset];
@@ -204,16 +204,15 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
204{ 204{
205 tag *tag_p; 205 tag *tag_p;
206 struct buffer_head *bh = NULL; 206 struct buffer_head *bh = NULL;
207 struct udf_sb_info *sbi = UDF_SB(sb);
208 207
209 /* Read the block */ 208 /* Read the block */
210 if (block == 0xFFFFFFFF) 209 if (block == 0xFFFFFFFF)
211 return NULL; 210 return NULL;
212 211
213 bh = udf_tread(sb, block + sbi->s_session); 212 bh = udf_tread(sb, block);
214 if (!bh) { 213 if (!bh) {
215 udf_debug("block=%d, location=%d: read failed\n", 214 udf_debug("block=%d, location=%d: read failed\n",
216 block + sbi->s_session, location); 215 block, location);
217 return NULL; 216 return NULL;
218 } 217 }
219 218
@@ -223,8 +222,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
223 222
224 if (location != le32_to_cpu(tag_p->tagLocation)) { 223 if (location != le32_to_cpu(tag_p->tagLocation)) {
225 udf_debug("location mismatch block %u, tag %u != %u\n", 224 udf_debug("location mismatch block %u, tag %u != %u\n",
226 block + sbi->s_session, 225 block, le32_to_cpu(tag_p->tagLocation), location);
227 le32_to_cpu(tag_p->tagLocation), location);
228 goto error_out; 226 goto error_out;
229 } 227 }
230 228
@@ -244,13 +242,13 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
244 242
245 /* Verify the descriptor CRC */ 243 /* Verify the descriptor CRC */
246 if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize || 244 if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
247 le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag), 245 le16_to_cpu(tag_p->descCRC) == crc_itu_t(0,
248 le16_to_cpu(tag_p->descCRCLength), 0)) 246 bh->b_data + sizeof(tag),
247 le16_to_cpu(tag_p->descCRCLength)))
249 return bh; 248 return bh;
250 249
251 udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", 250 udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", block,
252 block + sbi->s_session, le16_to_cpu(tag_p->descCRC), 251 le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength));
253 le16_to_cpu(tag_p->descCRCLength));
254 252
255error_out: 253error_out:
256 brelse(bh); 254 brelse(bh);
@@ -270,7 +268,7 @@ void udf_update_tag(char *data, int length)
270 length -= sizeof(tag); 268 length -= sizeof(tag);
271 269
272 tptr->descCRCLength = cpu_to_le16(length); 270 tptr->descCRCLength = cpu_to_le16(length);
273 tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0)); 271 tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(tag), length));
274 tptr->tagChecksum = udf_tag_checksum(tptr); 272 tptr->tagChecksum = udf_tag_checksum(tptr);
275} 273}
276 274
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 112a5fb0b27b..ba5537d4bc15 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -31,6 +31,7 @@
31#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
32#include <linux/buffer_head.h> 32#include <linux/buffer_head.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/crc-itu-t.h>
34 35
35static inline int udf_match(int len1, const char *name1, int len2, 36static inline int udf_match(int len1, const char *name1, int len2,
36 const char *name2) 37 const char *name2)
@@ -97,25 +98,23 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
97 memset(fibh->ebh->b_data, 0x00, padlen + offset); 98 memset(fibh->ebh->b_data, 0x00, padlen + offset);
98 } 99 }
99 100
100 crc = udf_crc((uint8_t *)cfi + sizeof(tag), 101 crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(tag),
101 sizeof(struct fileIdentDesc) - sizeof(tag), 0); 102 sizeof(struct fileIdentDesc) - sizeof(tag));
102 103
103 if (fibh->sbh == fibh->ebh) { 104 if (fibh->sbh == fibh->ebh) {
104 crc = udf_crc((uint8_t *)sfi->impUse, 105 crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
105 crclen + sizeof(tag) - 106 crclen + sizeof(tag) -
106 sizeof(struct fileIdentDesc), crc); 107 sizeof(struct fileIdentDesc));
107 } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) { 108 } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
108 crc = udf_crc(fibh->ebh->b_data + 109 crc = crc_itu_t(crc, fibh->ebh->b_data +
109 sizeof(struct fileIdentDesc) + 110 sizeof(struct fileIdentDesc) +
110 fibh->soffset, 111 fibh->soffset,
111 crclen + sizeof(tag) - 112 crclen + sizeof(tag) -
112 sizeof(struct fileIdentDesc), 113 sizeof(struct fileIdentDesc));
113 crc);
114 } else { 114 } else {
115 crc = udf_crc((uint8_t *)sfi->impUse, 115 crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
116 -fibh->soffset - sizeof(struct fileIdentDesc), 116 -fibh->soffset - sizeof(struct fileIdentDesc));
117 crc); 117 crc = crc_itu_t(crc, fibh->ebh->b_data, fibh->eoffset);
118 crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
119 } 118 }
120 119
121 cfi->descTag.descCRC = cpu_to_le16(crc); 120 cfi->descTag.descCRC = cpu_to_le16(crc);
@@ -149,7 +148,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
149 struct fileIdentDesc *fi = NULL; 148 struct fileIdentDesc *fi = NULL;
150 loff_t f_pos; 149 loff_t f_pos;
151 int block, flen; 150 int block, flen;
152 char fname[UDF_NAME_LEN]; 151 char *fname = NULL;
153 char *nameptr; 152 char *nameptr;
154 uint8_t lfi; 153 uint8_t lfi;
155 uint16_t liu; 154 uint16_t liu;
@@ -163,12 +162,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
163 size = udf_ext0_offset(dir) + dir->i_size; 162 size = udf_ext0_offset(dir) + dir->i_size;
164 f_pos = udf_ext0_offset(dir); 163 f_pos = udf_ext0_offset(dir);
165 164
165 fibh->sbh = fibh->ebh = NULL;
166 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); 166 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
167 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 167 if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
168 fibh->sbh = fibh->ebh = NULL; 168 if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
169 else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, 169 &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
170 &epos, &eloc, &elen, &offset) == 170 goto out_err;
171 (EXT_RECORDED_ALLOCATED >> 30)) {
172 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 171 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
173 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 172 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
174 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 173 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -179,25 +178,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
179 offset = 0; 178 offset = 0;
180 179
181 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); 180 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
182 if (!fibh->sbh) { 181 if (!fibh->sbh)
183 brelse(epos.bh); 182 goto out_err;
184 return NULL;
185 }
186 } else {
187 brelse(epos.bh);
188 return NULL;
189 } 183 }
190 184
185 fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
186 if (!fname)
187 goto out_err;
188
191 while (f_pos < size) { 189 while (f_pos < size) {
192 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, 190 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
193 &elen, &offset); 191 &elen, &offset);
194 if (!fi) { 192 if (!fi)
195 if (fibh->sbh != fibh->ebh) 193 goto out_err;
196 brelse(fibh->ebh);
197 brelse(fibh->sbh);
198 brelse(epos.bh);
199 return NULL;
200 }
201 194
202 liu = le16_to_cpu(cfi->lengthOfImpUse); 195 liu = le16_to_cpu(cfi->lengthOfImpUse);
203 lfi = cfi->lengthFileIdent; 196 lfi = cfi->lengthFileIdent;
@@ -237,53 +230,22 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
237 230
238 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); 231 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
239 if (flen && udf_match(flen, fname, dentry->d_name.len, 232 if (flen && udf_match(flen, fname, dentry->d_name.len,
240 dentry->d_name.name)) { 233 dentry->d_name.name))
241 brelse(epos.bh); 234 goto out_ok;
242 return fi;
243 }
244 } 235 }
245 236
237out_err:
238 fi = NULL;
246 if (fibh->sbh != fibh->ebh) 239 if (fibh->sbh != fibh->ebh)
247 brelse(fibh->ebh); 240 brelse(fibh->ebh);
248 brelse(fibh->sbh); 241 brelse(fibh->sbh);
242out_ok:
249 brelse(epos.bh); 243 brelse(epos.bh);
244 kfree(fname);
250 245
251 return NULL; 246 return fi;
252} 247}
253 248
254/*
255 * udf_lookup
256 *
257 * PURPOSE
258 * Look-up the inode for a given name.
259 *
260 * DESCRIPTION
261 * Required - lookup_dentry() will return -ENOTDIR if this routine is not
262 * available for a directory. The filesystem is useless if this routine is
263 * not available for at least the filesystem's root directory.
264 *
265 * This routine is passed an incomplete dentry - it must be completed by
266 * calling d_add(dentry, inode). If the name does not exist, then the
267 * specified inode must be set to null. An error should only be returned
268 * when the lookup fails for a reason other than the name not existing.
269 * Note that the directory inode semaphore is held during the call.
270 *
271 * Refer to lookup_dentry() in fs/namei.c
272 * lookup_dentry() -> lookup() -> real_lookup() -> .
273 *
274 * PRE-CONDITIONS
275 * dir Pointer to inode of parent directory.
276 * dentry Pointer to dentry to complete.
277 * nd Pointer to lookup nameidata
278 *
279 * POST-CONDITIONS
280 * <return> Zero on success.
281 *
282 * HISTORY
283 * July 1, 1997 - Andrew E. Mileski
284 * Written, tested, and released.
285 */
286
287static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, 249static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
288 struct nameidata *nd) 250 struct nameidata *nd)
289{ 251{
@@ -336,11 +298,9 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
336{ 298{
337 struct super_block *sb = dir->i_sb; 299 struct super_block *sb = dir->i_sb;
338 struct fileIdentDesc *fi = NULL; 300 struct fileIdentDesc *fi = NULL;
339 char name[UDF_NAME_LEN], fname[UDF_NAME_LEN]; 301 char *name = NULL;
340 int namelen; 302 int namelen;
341 loff_t f_pos; 303 loff_t f_pos;
342 int flen;
343 char *nameptr;
344 loff_t size = udf_ext0_offset(dir) + dir->i_size; 304 loff_t size = udf_ext0_offset(dir) + dir->i_size;
345 int nfidlen; 305 int nfidlen;
346 uint8_t lfi; 306 uint8_t lfi;
@@ -352,16 +312,23 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
352 struct extent_position epos = {}; 312 struct extent_position epos = {};
353 struct udf_inode_info *dinfo; 313 struct udf_inode_info *dinfo;
354 314
315 fibh->sbh = fibh->ebh = NULL;
316 name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
317 if (!name) {
318 *err = -ENOMEM;
319 goto out_err;
320 }
321
355 if (dentry) { 322 if (dentry) {
356 if (!dentry->d_name.len) { 323 if (!dentry->d_name.len) {
357 *err = -EINVAL; 324 *err = -EINVAL;
358 return NULL; 325 goto out_err;
359 } 326 }
360 namelen = udf_put_filename(sb, dentry->d_name.name, name, 327 namelen = udf_put_filename(sb, dentry->d_name.name, name,
361 dentry->d_name.len); 328 dentry->d_name.len);
362 if (!namelen) { 329 if (!namelen) {
363 *err = -ENAMETOOLONG; 330 *err = -ENAMETOOLONG;
364 return NULL; 331 goto out_err;
365 } 332 }
366 } else { 333 } else {
367 namelen = 0; 334 namelen = 0;
@@ -373,11 +340,14 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
373 340
374 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); 341 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
375 dinfo = UDF_I(dir); 342 dinfo = UDF_I(dir);
376 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 343 if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
377 fibh->sbh = fibh->ebh = NULL; 344 if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
378 else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, 345 &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
379 &epos, &eloc, &elen, &offset) == 346 block = udf_get_lb_pblock(dir->i_sb,
380 (EXT_RECORDED_ALLOCATED >> 30)) { 347 dinfo->i_location, 0);
348 fibh->soffset = fibh->eoffset = sb->s_blocksize;
349 goto add;
350 }
381 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 351 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
382 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 352 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
383 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 353 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
@@ -389,17 +359,11 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
389 359
390 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); 360 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
391 if (!fibh->sbh) { 361 if (!fibh->sbh) {
392 brelse(epos.bh);
393 *err = -EIO; 362 *err = -EIO;
394 return NULL; 363 goto out_err;
395 } 364 }
396 365
397 block = dinfo->i_location.logicalBlockNum; 366 block = dinfo->i_location.logicalBlockNum;
398 } else {
399 block = udf_get_lb_pblock(dir->i_sb, dinfo->i_location, 0);
400 fibh->sbh = fibh->ebh = NULL;
401 fibh->soffset = fibh->eoffset = sb->s_blocksize;
402 goto add;
403 } 367 }
404 368
405 while (f_pos < size) { 369 while (f_pos < size) {
@@ -407,41 +371,16 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
407 &elen, &offset); 371 &elen, &offset);
408 372
409 if (!fi) { 373 if (!fi) {
410 if (fibh->sbh != fibh->ebh)
411 brelse(fibh->ebh);
412 brelse(fibh->sbh);
413 brelse(epos.bh);
414 *err = -EIO; 374 *err = -EIO;
415 return NULL; 375 goto out_err;
416 } 376 }
417 377
418 liu = le16_to_cpu(cfi->lengthOfImpUse); 378 liu = le16_to_cpu(cfi->lengthOfImpUse);
419 lfi = cfi->lengthFileIdent; 379 lfi = cfi->lengthFileIdent;
420 380
421 if (fibh->sbh == fibh->ebh)
422 nameptr = fi->fileIdent + liu;
423 else {
424 int poffset; /* Unpaded ending offset */
425
426 poffset = fibh->soffset + sizeof(struct fileIdentDesc) +
427 liu + lfi;
428
429 if (poffset >= lfi)
430 nameptr = (char *)(fibh->ebh->b_data +
431 poffset - lfi);
432 else {
433 nameptr = fname;
434 memcpy(nameptr, fi->fileIdent + liu,
435 lfi - poffset);
436 memcpy(nameptr + lfi - poffset,
437 fibh->ebh->b_data, poffset);
438 }
439 }
440
441 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 381 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
442 if (((sizeof(struct fileIdentDesc) + 382 if (((sizeof(struct fileIdentDesc) +
443 liu + lfi + 3) & ~3) == nfidlen) { 383 liu + lfi + 3) & ~3) == nfidlen) {
444 brelse(epos.bh);
445 cfi->descTag.tagSerialNum = cpu_to_le16(1); 384 cfi->descTag.tagSerialNum = cpu_to_le16(1);
446 cfi->fileVersionNum = cpu_to_le16(1); 385 cfi->fileVersionNum = cpu_to_le16(1);
447 cfi->fileCharacteristics = 0; 386 cfi->fileCharacteristics = 0;
@@ -449,27 +388,13 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
449 cfi->lengthOfImpUse = cpu_to_le16(0); 388 cfi->lengthOfImpUse = cpu_to_le16(0);
450 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, 389 if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
451 name)) 390 name))
452 return fi; 391 goto out_ok;
453 else { 392 else {
454 *err = -EIO; 393 *err = -EIO;
455 return NULL; 394 goto out_err;
456 } 395 }
457 } 396 }
458 } 397 }
459
460 if (!lfi || !dentry)
461 continue;
462
463 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
464 if (flen && udf_match(flen, fname, dentry->d_name.len,
465 dentry->d_name.name)) {
466 if (fibh->sbh != fibh->ebh)
467 brelse(fibh->ebh);
468 brelse(fibh->sbh);
469 brelse(epos.bh);
470 *err = -EEXIST;
471 return NULL;
472 }
473 } 398 }
474 399
475add: 400add:
@@ -496,7 +421,7 @@ add:
496 fibh->sbh = fibh->ebh = 421 fibh->sbh = fibh->ebh =
497 udf_expand_dir_adinicb(dir, &block, err); 422 udf_expand_dir_adinicb(dir, &block, err);
498 if (!fibh->sbh) 423 if (!fibh->sbh)
499 return NULL; 424 goto out_err;
500 epos.block = dinfo->i_location; 425 epos.block = dinfo->i_location;
501 epos.offset = udf_file_entry_alloc_offset(dir); 426 epos.offset = udf_file_entry_alloc_offset(dir);
502 /* Load extent udf_expand_dir_adinicb() has created */ 427 /* Load extent udf_expand_dir_adinicb() has created */
@@ -537,11 +462,8 @@ add:
537 dir->i_sb->s_blocksize_bits); 462 dir->i_sb->s_blocksize_bits);
538 fibh->ebh = udf_bread(dir, 463 fibh->ebh = udf_bread(dir,
539 f_pos >> dir->i_sb->s_blocksize_bits, 1, err); 464 f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
540 if (!fibh->ebh) { 465 if (!fibh->ebh)
541 brelse(epos.bh); 466 goto out_err;
542 brelse(fibh->sbh);
543 return NULL;
544 }
545 467
546 if (!fibh->soffset) { 468 if (!fibh->soffset) {
547 if (udf_next_aext(dir, &epos, &eloc, &elen, 1) == 469 if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
@@ -572,20 +494,25 @@ add:
572 cfi->lengthFileIdent = namelen; 494 cfi->lengthFileIdent = namelen;
573 cfi->lengthOfImpUse = cpu_to_le16(0); 495 cfi->lengthOfImpUse = cpu_to_le16(0);
574 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { 496 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
575 brelse(epos.bh);
576 dir->i_size += nfidlen; 497 dir->i_size += nfidlen;
577 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 498 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
578 dinfo->i_lenAlloc += nfidlen; 499 dinfo->i_lenAlloc += nfidlen;
579 mark_inode_dirty(dir); 500 mark_inode_dirty(dir);
580 return fi; 501 goto out_ok;
581 } else { 502 } else {
582 brelse(epos.bh);
583 if (fibh->sbh != fibh->ebh)
584 brelse(fibh->ebh);
585 brelse(fibh->sbh);
586 *err = -EIO; 503 *err = -EIO;
587 return NULL; 504 goto out_err;
588 } 505 }
506
507out_err:
508 fi = NULL;
509 if (fibh->sbh != fibh->ebh)
510 brelse(fibh->ebh);
511 brelse(fibh->sbh);
512out_ok:
513 brelse(epos.bh);
514 kfree(name);
515 return fi;
589} 516}
590 517
591static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi, 518static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
@@ -940,7 +867,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
940 char *ea; 867 char *ea;
941 int err; 868 int err;
942 int block; 869 int block;
943 char name[UDF_NAME_LEN]; 870 char *name = NULL;
944 int namelen; 871 int namelen;
945 struct buffer_head *bh; 872 struct buffer_head *bh;
946 struct udf_inode_info *iinfo; 873 struct udf_inode_info *iinfo;
@@ -950,6 +877,12 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
950 if (!inode) 877 if (!inode)
951 goto out; 878 goto out;
952 879
880 name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
881 if (!name) {
882 err = -ENOMEM;
883 goto out_no_entry;
884 }
885
953 iinfo = UDF_I(inode); 886 iinfo = UDF_I(inode);
954 inode->i_mode = S_IFLNK | S_IRWXUGO; 887 inode->i_mode = S_IFLNK | S_IRWXUGO;
955 inode->i_data.a_ops = &udf_symlink_aops; 888 inode->i_data.a_ops = &udf_symlink_aops;
@@ -1089,6 +1022,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1089 err = 0; 1022 err = 0;
1090 1023
1091out: 1024out:
1025 kfree(name);
1092 unlock_kernel(); 1026 unlock_kernel();
1093 return err; 1027 return err;
1094 1028
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index fc533345ab89..63610f026ae1 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -24,7 +24,6 @@
24 24
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/udf_fs.h>
28#include <linux/slab.h> 27#include <linux/slab.h>
29#include <linux/buffer_head.h> 28#include <linux/buffer_head.h>
30 29
@@ -55,11 +54,10 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
55 struct udf_sb_info *sbi = UDF_SB(sb); 54 struct udf_sb_info *sbi = UDF_SB(sb);
56 struct udf_part_map *map; 55 struct udf_part_map *map;
57 struct udf_virtual_data *vdata; 56 struct udf_virtual_data *vdata;
58 struct udf_inode_info *iinfo; 57 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
59 58
60 map = &sbi->s_partmaps[partition]; 59 map = &sbi->s_partmaps[partition];
61 vdata = &map->s_type_specific.s_virtual; 60 vdata = &map->s_type_specific.s_virtual;
62 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
63 61
64 if (block > vdata->s_num_entries) { 62 if (block > vdata->s_num_entries) {
65 udf_debug("Trying to access block beyond end of VAT " 63 udf_debug("Trying to access block beyond end of VAT "
@@ -67,6 +65,12 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
67 return 0xFFFFFFFF; 65 return 0xFFFFFFFF;
68 } 66 }
69 67
68 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
69 loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
70 vdata->s_start_offset))[block]);
71 goto translate;
72 }
73 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
70 if (block >= index) { 74 if (block >= index) {
71 block -= index; 75 block -= index;
72 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t))); 76 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
@@ -89,7 +93,7 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
89 93
90 brelse(bh); 94 brelse(bh);
91 95
92 iinfo = UDF_I(sbi->s_vat_inode); 96translate:
93 if (iinfo->i_location.partitionReferenceNum == partition) { 97 if (iinfo->i_location.partitionReferenceNum == partition) {
94 udf_debug("recursive call to udf_get_pblock!\n"); 98 udf_debug("recursive call to udf_get_pblock!\n");
95 return 0xFFFFFFFF; 99 return 0xFFFFFFFF;
@@ -263,3 +267,58 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
263 267
264 return 0; 268 return 0;
265} 269}
270
271static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
272 uint16_t partition, uint32_t offset)
273{
274 struct super_block *sb = inode->i_sb;
275 struct udf_part_map *map;
276 kernel_lb_addr eloc;
277 uint32_t elen;
278 sector_t ext_offset;
279 struct extent_position epos = {};
280 uint32_t phyblock;
281
282 if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
283 (EXT_RECORDED_ALLOCATED >> 30))
284 phyblock = 0xFFFFFFFF;
285 else {
286 map = &UDF_SB(sb)->s_partmaps[partition];
287 /* map to sparable/physical partition desc */
288 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
289 map->s_partition_num, ext_offset + offset);
290 }
291
292 brelse(epos.bh);
293 return phyblock;
294}
295
296uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
297 uint16_t partition, uint32_t offset)
298{
299 struct udf_sb_info *sbi = UDF_SB(sb);
300 struct udf_part_map *map;
301 struct udf_meta_data *mdata;
302 uint32_t retblk;
303 struct inode *inode;
304
305 udf_debug("READING from METADATA\n");
306
307 map = &sbi->s_partmaps[partition];
308 mdata = &map->s_type_specific.s_metadata;
309 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
310
311 /* We shouldn't mount such media... */
312 BUG_ON(!inode);
313 retblk = udf_try_read_meta(inode, block, partition, offset);
314 if (retblk == 0xFFFFFFFF) {
315 udf_warning(sb, __func__, "error reading from METADATA, "
316 "trying to read from MIRROR");
317 inode = mdata->s_mirror_fe;
318 if (!inode)
319 return 0xFFFFFFFF;
320 retblk = udf_try_read_meta(inode, block, partition, offset);
321 }
322
323 return retblk;
324}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index f3ac4abfc946..b564fc140fe4 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -55,9 +55,10 @@
55#include <linux/errno.h> 55#include <linux/errno.h>
56#include <linux/mount.h> 56#include <linux/mount.h>
57#include <linux/seq_file.h> 57#include <linux/seq_file.h>
58#include <linux/bitmap.h>
59#include <linux/crc-itu-t.h>
58#include <asm/byteorder.h> 60#include <asm/byteorder.h>
59 61
60#include <linux/udf_fs.h>
61#include "udf_sb.h" 62#include "udf_sb.h"
62#include "udf_i.h" 63#include "udf_i.h"
63 64
@@ -84,22 +85,19 @@ static void udf_write_super(struct super_block *);
84static int udf_remount_fs(struct super_block *, int *, char *); 85static int udf_remount_fs(struct super_block *, int *, char *);
85static int udf_check_valid(struct super_block *, int, int); 86static int udf_check_valid(struct super_block *, int, int);
86static int udf_vrs(struct super_block *sb, int silent); 87static int udf_vrs(struct super_block *sb, int silent);
87static int udf_load_partition(struct super_block *, kernel_lb_addr *);
88static int udf_load_logicalvol(struct super_block *, struct buffer_head *,
89 kernel_lb_addr *);
90static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad); 88static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
91static void udf_find_anchor(struct super_block *); 89static void udf_find_anchor(struct super_block *);
92static int udf_find_fileset(struct super_block *, kernel_lb_addr *, 90static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
93 kernel_lb_addr *); 91 kernel_lb_addr *);
94static void udf_load_pvoldesc(struct super_block *, struct buffer_head *);
95static void udf_load_fileset(struct super_block *, struct buffer_head *, 92static void udf_load_fileset(struct super_block *, struct buffer_head *,
96 kernel_lb_addr *); 93 kernel_lb_addr *);
97static int udf_load_partdesc(struct super_block *, struct buffer_head *);
98static void udf_open_lvid(struct super_block *); 94static void udf_open_lvid(struct super_block *);
99static void udf_close_lvid(struct super_block *); 95static void udf_close_lvid(struct super_block *);
100static unsigned int udf_count_free(struct super_block *); 96static unsigned int udf_count_free(struct super_block *);
101static int udf_statfs(struct dentry *, struct kstatfs *); 97static int udf_statfs(struct dentry *, struct kstatfs *);
102static int udf_show_options(struct seq_file *, struct vfsmount *); 98static int udf_show_options(struct seq_file *, struct vfsmount *);
99static void udf_error(struct super_block *sb, const char *function,
100 const char *fmt, ...);
103 101
104struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) 102struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
105{ 103{
@@ -587,48 +585,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
587 return 0; 585 return 0;
588} 586}
589 587
590/*
591 * udf_set_blocksize
592 *
593 * PURPOSE
594 * Set the block size to be used in all transfers.
595 *
596 * DESCRIPTION
597 * To allow room for a DMA transfer, it is best to guess big when unsure.
598 * This routine picks 2048 bytes as the blocksize when guessing. This
599 * should be adequate until devices with larger block sizes become common.
600 *
601 * Note that the Linux kernel can currently only deal with blocksizes of
602 * 512, 1024, 2048, 4096, and 8192 bytes.
603 *
604 * PRE-CONDITIONS
605 * sb Pointer to _locked_ superblock.
606 *
607 * POST-CONDITIONS
608 * sb->s_blocksize Blocksize.
609 * sb->s_blocksize_bits log2 of blocksize.
610 * <return> 0 Blocksize is valid.
611 * <return> 1 Blocksize is invalid.
612 *
613 * HISTORY
614 * July 1, 1997 - Andrew E. Mileski
615 * Written, tested, and released.
616 */
617static int udf_set_blocksize(struct super_block *sb, int bsize)
618{
619 if (!sb_min_blocksize(sb, bsize)) {
620 udf_debug("Bad block size (%d)\n", bsize);
621 printk(KERN_ERR "udf: bad block size (%d)\n", bsize);
622 return 0;
623 }
624
625 return sb->s_blocksize;
626}
627
628static int udf_vrs(struct super_block *sb, int silent) 588static int udf_vrs(struct super_block *sb, int silent)
629{ 589{
630 struct volStructDesc *vsd = NULL; 590 struct volStructDesc *vsd = NULL;
631 int sector = 32768; 591 loff_t sector = 32768;
632 int sectorsize; 592 int sectorsize;
633 struct buffer_head *bh = NULL; 593 struct buffer_head *bh = NULL;
634 int iso9660 = 0; 594 int iso9660 = 0;
@@ -649,7 +609,8 @@ static int udf_vrs(struct super_block *sb, int silent)
649 sector += (sbi->s_session << sb->s_blocksize_bits); 609 sector += (sbi->s_session << sb->s_blocksize_bits);
650 610
651 udf_debug("Starting at sector %u (%ld byte sectors)\n", 611 udf_debug("Starting at sector %u (%ld byte sectors)\n",
652 (sector >> sb->s_blocksize_bits), sb->s_blocksize); 612 (unsigned int)(sector >> sb->s_blocksize_bits),
613 sb->s_blocksize);
653 /* Process the sequence (if applicable) */ 614 /* Process the sequence (if applicable) */
654 for (; !nsr02 && !nsr03; sector += sectorsize) { 615 for (; !nsr02 && !nsr03; sector += sectorsize) {
655 /* Read a block */ 616 /* Read a block */
@@ -719,162 +680,140 @@ static int udf_vrs(struct super_block *sb, int silent)
719} 680}
720 681
721/* 682/*
722 * udf_find_anchor 683 * Check whether there is an anchor block in the given block
723 *
724 * PURPOSE
725 * Find an anchor volume descriptor.
726 *
727 * PRE-CONDITIONS
728 * sb Pointer to _locked_ superblock.
729 * lastblock Last block on media.
730 *
731 * POST-CONDITIONS
732 * <return> 1 if not found, 0 if ok
733 *
734 * HISTORY
735 * July 1, 1997 - Andrew E. Mileski
736 * Written, tested, and released.
737 */ 684 */
738static void udf_find_anchor(struct super_block *sb) 685static int udf_check_anchor_block(struct super_block *sb, sector_t block,
686 bool varconv)
739{ 687{
740 int lastblock;
741 struct buffer_head *bh = NULL; 688 struct buffer_head *bh = NULL;
689 tag *t;
742 uint16_t ident; 690 uint16_t ident;
743 uint32_t location; 691 uint32_t location;
744 int i;
745 struct udf_sb_info *sbi;
746 692
747 sbi = UDF_SB(sb); 693 if (varconv) {
748 lastblock = sbi->s_last_block; 694 if (udf_fixed_to_variable(block) >=
695 sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
696 return 0;
697 bh = sb_bread(sb, udf_fixed_to_variable(block));
698 }
699 else
700 bh = sb_bread(sb, block);
749 701
750 if (lastblock) { 702 if (!bh)
751 int varlastblock = udf_variable_to_fixed(lastblock); 703 return 0;
752 int last[] = { lastblock, lastblock - 2,
753 lastblock - 150, lastblock - 152,
754 varlastblock, varlastblock - 2,
755 varlastblock - 150, varlastblock - 152 };
756
757 lastblock = 0;
758
759 /* Search for an anchor volume descriptor pointer */
760
761 /* according to spec, anchor is in either:
762 * block 256
763 * lastblock-256
764 * lastblock
765 * however, if the disc isn't closed, it could be 512 */
766
767 for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) {
768 ident = location = 0;
769 if (last[i] >= 0) {
770 bh = sb_bread(sb, last[i]);
771 if (bh) {
772 tag *t = (tag *)bh->b_data;
773 ident = le16_to_cpu(t->tagIdent);
774 location = le32_to_cpu(t->tagLocation);
775 brelse(bh);
776 }
777 }
778 704
779 if (ident == TAG_IDENT_AVDP) { 705 t = (tag *)bh->b_data;
780 if (location == last[i] - sbi->s_session) { 706 ident = le16_to_cpu(t->tagIdent);
781 lastblock = last[i] - sbi->s_session; 707 location = le32_to_cpu(t->tagLocation);
782 sbi->s_anchor[0] = lastblock; 708 brelse(bh);
783 sbi->s_anchor[1] = lastblock - 256; 709 if (ident != TAG_IDENT_AVDP)
784 } else if (location == 710 return 0;
785 udf_variable_to_fixed(last[i]) - 711 return location == block;
786 sbi->s_session) { 712}
787 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
788 lastblock =
789 udf_variable_to_fixed(last[i]) -
790 sbi->s_session;
791 sbi->s_anchor[0] = lastblock;
792 sbi->s_anchor[1] = lastblock - 256 -
793 sbi->s_session;
794 } else {
795 udf_debug("Anchor found at block %d, "
796 "location mismatch %d.\n",
797 last[i], location);
798 }
799 } else if (ident == TAG_IDENT_FE ||
800 ident == TAG_IDENT_EFE) {
801 lastblock = last[i];
802 sbi->s_anchor[3] = 512;
803 } else {
804 ident = location = 0;
805 if (last[i] >= 256) {
806 bh = sb_bread(sb, last[i] - 256);
807 if (bh) {
808 tag *t = (tag *)bh->b_data;
809 ident = le16_to_cpu(
810 t->tagIdent);
811 location = le32_to_cpu(
812 t->tagLocation);
813 brelse(bh);
814 }
815 }
816 713
817 if (ident == TAG_IDENT_AVDP && 714/* Search for an anchor volume descriptor pointer */
818 location == last[i] - 256 - 715static sector_t udf_scan_anchors(struct super_block *sb, bool varconv,
819 sbi->s_session) { 716 sector_t lastblock)
820 lastblock = last[i]; 717{
821 sbi->s_anchor[1] = last[i] - 256; 718 sector_t last[6];
822 } else { 719 int i;
823 ident = location = 0; 720 struct udf_sb_info *sbi = UDF_SB(sb);
824 if (last[i] >= 312 + sbi->s_session) {
825 bh = sb_bread(sb,
826 last[i] - 312 -
827 sbi->s_session);
828 if (bh) {
829 tag *t = (tag *)
830 bh->b_data;
831 ident = le16_to_cpu(
832 t->tagIdent);
833 location = le32_to_cpu(
834 t->tagLocation);
835 brelse(bh);
836 }
837 }
838 721
839 if (ident == TAG_IDENT_AVDP && 722 last[0] = lastblock;
840 location == udf_variable_to_fixed(last[i]) - 256) { 723 last[1] = last[0] - 1;
841 UDF_SET_FLAG(sb, 724 last[2] = last[0] + 1;
842 UDF_FLAG_VARCONV); 725 last[3] = last[0] - 2;
843 lastblock = udf_variable_to_fixed(last[i]); 726 last[4] = last[0] - 150;
844 sbi->s_anchor[1] = lastblock - 256; 727 last[5] = last[0] - 152;
845 } 728
846 } 729 /* according to spec, anchor is in either:
847 } 730 * block 256
731 * lastblock-256
732 * lastblock
733 * however, if the disc isn't closed, it could be 512 */
734
735 for (i = 0; i < ARRAY_SIZE(last); i++) {
736 if (last[i] < 0)
737 continue;
738 if (last[i] >= sb->s_bdev->bd_inode->i_size >>
739 sb->s_blocksize_bits)
740 continue;
741
742 if (udf_check_anchor_block(sb, last[i], varconv)) {
743 sbi->s_anchor[0] = last[i];
744 sbi->s_anchor[1] = last[i] - 256;
745 return last[i];
848 } 746 }
849 }
850 747
851 if (!lastblock) { 748 if (last[i] < 256)
852 /* We haven't found the lastblock. check 312 */ 749 continue;
853 bh = sb_bread(sb, 312 + sbi->s_session);
854 if (bh) {
855 tag *t = (tag *)bh->b_data;
856 ident = le16_to_cpu(t->tagIdent);
857 location = le32_to_cpu(t->tagLocation);
858 brelse(bh);
859 750
860 if (ident == TAG_IDENT_AVDP && location == 256) 751 if (udf_check_anchor_block(sb, last[i] - 256, varconv)) {
861 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); 752 sbi->s_anchor[1] = last[i] - 256;
753 return last[i];
862 } 754 }
863 } 755 }
864 756
757 if (udf_check_anchor_block(sb, sbi->s_session + 256, varconv)) {
758 sbi->s_anchor[0] = sbi->s_session + 256;
759 return last[0];
760 }
761 if (udf_check_anchor_block(sb, sbi->s_session + 512, varconv)) {
762 sbi->s_anchor[0] = sbi->s_session + 512;
763 return last[0];
764 }
765 return 0;
766}
767
768/*
769 * Find an anchor volume descriptor. The function expects sbi->s_lastblock to
770 * be the last block on the media.
771 *
772 * Return 1 if not found, 0 if ok
773 *
774 */
775static void udf_find_anchor(struct super_block *sb)
776{
777 sector_t lastblock;
778 struct buffer_head *bh = NULL;
779 uint16_t ident;
780 int i;
781 struct udf_sb_info *sbi = UDF_SB(sb);
782
783 lastblock = udf_scan_anchors(sb, 0, sbi->s_last_block);
784 if (lastblock)
785 goto check_anchor;
786
787 /* No anchor found? Try VARCONV conversion of block numbers */
788 /* Firstly, we try to not convert number of the last block */
789 lastblock = udf_scan_anchors(sb, 1,
790 udf_variable_to_fixed(sbi->s_last_block));
791 if (lastblock) {
792 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
793 goto check_anchor;
794 }
795
796 /* Secondly, we try with converted number of the last block */
797 lastblock = udf_scan_anchors(sb, 1, sbi->s_last_block);
798 if (lastblock)
799 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
800
801check_anchor:
802 /*
803 * Check located anchors and the anchor block supplied via
804 * mount options
805 */
865 for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) { 806 for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
866 if (sbi->s_anchor[i]) { 807 if (!sbi->s_anchor[i])
867 bh = udf_read_tagged(sb, sbi->s_anchor[i], 808 continue;
868 sbi->s_anchor[i], &ident); 809 bh = udf_read_tagged(sb, sbi->s_anchor[i],
869 if (!bh) 810 sbi->s_anchor[i], &ident);
811 if (!bh)
812 sbi->s_anchor[i] = 0;
813 else {
814 brelse(bh);
815 if (ident != TAG_IDENT_AVDP)
870 sbi->s_anchor[i] = 0; 816 sbi->s_anchor[i] = 0;
871 else {
872 brelse(bh);
873 if ((ident != TAG_IDENT_AVDP) &&
874 (i || (ident != TAG_IDENT_FE &&
875 ident != TAG_IDENT_EFE)))
876 sbi->s_anchor[i] = 0;
877 }
878 } 817 }
879 } 818 }
880 819
@@ -971,27 +910,30 @@ static int udf_find_fileset(struct super_block *sb,
971 return 1; 910 return 1;
972} 911}
973 912
974static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh) 913static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
975{ 914{
976 struct primaryVolDesc *pvoldesc; 915 struct primaryVolDesc *pvoldesc;
977 time_t recording;
978 long recording_usec;
979 struct ustr instr; 916 struct ustr instr;
980 struct ustr outstr; 917 struct ustr outstr;
918 struct buffer_head *bh;
919 uint16_t ident;
920
921 bh = udf_read_tagged(sb, block, block, &ident);
922 if (!bh)
923 return 1;
924 BUG_ON(ident != TAG_IDENT_PVD);
981 925
982 pvoldesc = (struct primaryVolDesc *)bh->b_data; 926 pvoldesc = (struct primaryVolDesc *)bh->b_data;
983 927
984 if (udf_stamp_to_time(&recording, &recording_usec, 928 if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
985 lets_to_cpu(pvoldesc->recordingDateAndTime))) { 929 pvoldesc->recordingDateAndTime)) {
986 kernel_timestamp ts; 930#ifdef UDFFS_DEBUG
987 ts = lets_to_cpu(pvoldesc->recordingDateAndTime); 931 timestamp *ts = &pvoldesc->recordingDateAndTime;
988 udf_debug("recording time %ld/%ld, %04u/%02u/%02u" 932 udf_debug("recording time %04u/%02u/%02u"
989 " %02u:%02u (%x)\n", 933 " %02u:%02u (%x)\n",
990 recording, recording_usec, 934 le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
991 ts.year, ts.month, ts.day, ts.hour, 935 ts->minute, le16_to_cpu(ts->typeAndTimezone));
992 ts.minute, ts.typeAndTimezone); 936#endif
993 UDF_SB(sb)->s_record_time.tv_sec = recording;
994 UDF_SB(sb)->s_record_time.tv_nsec = recording_usec * 1000;
995 } 937 }
996 938
997 if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32)) 939 if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32))
@@ -1005,6 +947,104 @@ static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
1005 if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128)) 947 if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128))
1006 if (udf_CS0toUTF8(&outstr, &instr)) 948 if (udf_CS0toUTF8(&outstr, &instr))
1007 udf_debug("volSetIdent[] = '%s'\n", outstr.u_name); 949 udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
950
951 brelse(bh);
952 return 0;
953}
954
955static int udf_load_metadata_files(struct super_block *sb, int partition)
956{
957 struct udf_sb_info *sbi = UDF_SB(sb);
958 struct udf_part_map *map;
959 struct udf_meta_data *mdata;
960 kernel_lb_addr addr;
961 int fe_error = 0;
962
963 map = &sbi->s_partmaps[partition];
964 mdata = &map->s_type_specific.s_metadata;
965
966 /* metadata address */
967 addr.logicalBlockNum = mdata->s_meta_file_loc;
968 addr.partitionReferenceNum = map->s_partition_num;
969
970 udf_debug("Metadata file location: block = %d part = %d\n",
971 addr.logicalBlockNum, addr.partitionReferenceNum);
972
973 mdata->s_metadata_fe = udf_iget(sb, addr);
974
975 if (mdata->s_metadata_fe == NULL) {
976 udf_warning(sb, __func__, "metadata inode efe not found, "
977 "will try mirror inode.");
978 fe_error = 1;
979 } else if (UDF_I(mdata->s_metadata_fe)->i_alloc_type !=
980 ICBTAG_FLAG_AD_SHORT) {
981 udf_warning(sb, __func__, "metadata inode efe does not have "
982 "short allocation descriptors!");
983 fe_error = 1;
984 iput(mdata->s_metadata_fe);
985 mdata->s_metadata_fe = NULL;
986 }
987
988 /* mirror file entry */
989 addr.logicalBlockNum = mdata->s_mirror_file_loc;
990 addr.partitionReferenceNum = map->s_partition_num;
991
992 udf_debug("Mirror metadata file location: block = %d part = %d\n",
993 addr.logicalBlockNum, addr.partitionReferenceNum);
994
995 mdata->s_mirror_fe = udf_iget(sb, addr);
996
997 if (mdata->s_mirror_fe == NULL) {
998 if (fe_error) {
999 udf_error(sb, __func__, "mirror inode efe not found "
1000 "and metadata inode is missing too, exiting...");
1001 goto error_exit;
1002 } else
1003 udf_warning(sb, __func__, "mirror inode efe not found,"
1004 " but metadata inode is OK");
1005 } else if (UDF_I(mdata->s_mirror_fe)->i_alloc_type !=
1006 ICBTAG_FLAG_AD_SHORT) {
1007 udf_warning(sb, __func__, "mirror inode efe does not have "
1008 "short allocation descriptors!");
1009 iput(mdata->s_mirror_fe);
1010 mdata->s_mirror_fe = NULL;
1011 if (fe_error)
1012 goto error_exit;
1013 }
1014
1015 /*
1016 * bitmap file entry
1017 * Note:
1018 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
1019 */
1020 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1021 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1022 addr.partitionReferenceNum = map->s_partition_num;
1023
1024 udf_debug("Bitmap file location: block = %d part = %d\n",
1025 addr.logicalBlockNum, addr.partitionReferenceNum);
1026
1027 mdata->s_bitmap_fe = udf_iget(sb, addr);
1028
1029 if (mdata->s_bitmap_fe == NULL) {
1030 if (sb->s_flags & MS_RDONLY)
1031 udf_warning(sb, __func__, "bitmap inode efe "
1032 "not found but it's ok since the disc"
1033 " is mounted read-only");
1034 else {
1035 udf_error(sb, __func__, "bitmap inode efe not "
1036 "found and attempted read-write mount");
1037 goto error_exit;
1038 }
1039 }
1040 }
1041
1042 udf_debug("udf_load_metadata_files Ok\n");
1043
1044 return 0;
1045
1046error_exit:
1047 return 1;
1008} 1048}
1009 1049
1010static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, 1050static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1025,10 +1065,9 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
1025int udf_compute_nr_groups(struct super_block *sb, u32 partition) 1065int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1026{ 1066{
1027 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 1067 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1028 return (map->s_partition_len + 1068 return DIV_ROUND_UP(map->s_partition_len +
1029 (sizeof(struct spaceBitmapDesc) << 3) + 1069 (sizeof(struct spaceBitmapDesc) << 3),
1030 (sb->s_blocksize * 8) - 1) / 1070 sb->s_blocksize * 8);
1031 (sb->s_blocksize * 8);
1032} 1071}
1033 1072
1034static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) 1073static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
@@ -1059,134 +1098,241 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1059 return bitmap; 1098 return bitmap;
1060} 1099}
1061 1100
1062static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) 1101static int udf_fill_partdesc_info(struct super_block *sb,
1102 struct partitionDesc *p, int p_index)
1103{
1104 struct udf_part_map *map;
1105 struct udf_sb_info *sbi = UDF_SB(sb);
1106 struct partitionHeaderDesc *phd;
1107
1108 map = &sbi->s_partmaps[p_index];
1109
1110 map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1111 map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1112
1113 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1114 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1115 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1116 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1117 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1118 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1119 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1120 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1121
1122 udf_debug("Partition (%d type %x) starts at physical %d, "
1123 "block length %d\n", p_index,
1124 map->s_partition_type, map->s_partition_root,
1125 map->s_partition_len);
1126
1127 if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1128 strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1129 return 0;
1130
1131 phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1132 if (phd->unallocSpaceTable.extLength) {
1133 kernel_lb_addr loc = {
1134 .logicalBlockNum = le32_to_cpu(
1135 phd->unallocSpaceTable.extPosition),
1136 .partitionReferenceNum = p_index,
1137 };
1138
1139 map->s_uspace.s_table = udf_iget(sb, loc);
1140 if (!map->s_uspace.s_table) {
1141 udf_debug("cannot load unallocSpaceTable (part %d)\n",
1142 p_index);
1143 return 1;
1144 }
1145 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1146 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
1147 p_index, map->s_uspace.s_table->i_ino);
1148 }
1149
1150 if (phd->unallocSpaceBitmap.extLength) {
1151 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1152 if (!bitmap)
1153 return 1;
1154 map->s_uspace.s_bitmap = bitmap;
1155 bitmap->s_extLength = le32_to_cpu(
1156 phd->unallocSpaceBitmap.extLength);
1157 bitmap->s_extPosition = le32_to_cpu(
1158 phd->unallocSpaceBitmap.extPosition);
1159 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1160 udf_debug("unallocSpaceBitmap (part %d) @ %d\n", p_index,
1161 bitmap->s_extPosition);
1162 }
1163
1164 if (phd->partitionIntegrityTable.extLength)
1165 udf_debug("partitionIntegrityTable (part %d)\n", p_index);
1166
1167 if (phd->freedSpaceTable.extLength) {
1168 kernel_lb_addr loc = {
1169 .logicalBlockNum = le32_to_cpu(
1170 phd->freedSpaceTable.extPosition),
1171 .partitionReferenceNum = p_index,
1172 };
1173
1174 map->s_fspace.s_table = udf_iget(sb, loc);
1175 if (!map->s_fspace.s_table) {
1176 udf_debug("cannot load freedSpaceTable (part %d)\n",
1177 p_index);
1178 return 1;
1179 }
1180
1181 map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
1182 udf_debug("freedSpaceTable (part %d) @ %ld\n",
1183 p_index, map->s_fspace.s_table->i_ino);
1184 }
1185
1186 if (phd->freedSpaceBitmap.extLength) {
1187 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1188 if (!bitmap)
1189 return 1;
1190 map->s_fspace.s_bitmap = bitmap;
1191 bitmap->s_extLength = le32_to_cpu(
1192 phd->freedSpaceBitmap.extLength);
1193 bitmap->s_extPosition = le32_to_cpu(
1194 phd->freedSpaceBitmap.extPosition);
1195 map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
1196 udf_debug("freedSpaceBitmap (part %d) @ %d\n", p_index,
1197 bitmap->s_extPosition);
1198 }
1199 return 0;
1200}
1201
1202static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1203{
1204 struct udf_sb_info *sbi = UDF_SB(sb);
1205 struct udf_part_map *map = &sbi->s_partmaps[p_index];
1206 kernel_lb_addr ino;
1207 struct buffer_head *bh = NULL;
1208 struct udf_inode_info *vati;
1209 uint32_t pos;
1210 struct virtualAllocationTable20 *vat20;
1211
1212 /* VAT file entry is in the last recorded block */
1213 ino.partitionReferenceNum = type1_index;
1214 ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
1215 sbi->s_vat_inode = udf_iget(sb, ino);
1216 if (!sbi->s_vat_inode)
1217 return 1;
1218
1219 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1220 map->s_type_specific.s_virtual.s_start_offset = 0;
1221 map->s_type_specific.s_virtual.s_num_entries =
1222 (sbi->s_vat_inode->i_size - 36) >> 2;
1223 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1224 vati = UDF_I(sbi->s_vat_inode);
1225 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1226 pos = udf_block_map(sbi->s_vat_inode, 0);
1227 bh = sb_bread(sb, pos);
1228 if (!bh)
1229 return 1;
1230 vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1231 } else {
1232 vat20 = (struct virtualAllocationTable20 *)
1233 vati->i_ext.i_data;
1234 }
1235
1236 map->s_type_specific.s_virtual.s_start_offset =
1237 le16_to_cpu(vat20->lengthHeader);
1238 map->s_type_specific.s_virtual.s_num_entries =
1239 (sbi->s_vat_inode->i_size -
1240 map->s_type_specific.s_virtual.
1241 s_start_offset) >> 2;
1242 brelse(bh);
1243 }
1244 return 0;
1245}
1246
1247static int udf_load_partdesc(struct super_block *sb, sector_t block)
1063{ 1248{
1249 struct buffer_head *bh;
1064 struct partitionDesc *p; 1250 struct partitionDesc *p;
1065 int i;
1066 struct udf_part_map *map; 1251 struct udf_part_map *map;
1067 struct udf_sb_info *sbi; 1252 struct udf_sb_info *sbi = UDF_SB(sb);
1253 int i, type1_idx;
1254 uint16_t partitionNumber;
1255 uint16_t ident;
1256 int ret = 0;
1257
1258 bh = udf_read_tagged(sb, block, block, &ident);
1259 if (!bh)
1260 return 1;
1261 if (ident != TAG_IDENT_PD)
1262 goto out_bh;
1068 1263
1069 p = (struct partitionDesc *)bh->b_data; 1264 p = (struct partitionDesc *)bh->b_data;
1070 sbi = UDF_SB(sb); 1265 partitionNumber = le16_to_cpu(p->partitionNumber);
1071 1266
1267 /* First scan for TYPE1, SPARABLE and METADATA partitions */
1072 for (i = 0; i < sbi->s_partitions; i++) { 1268 for (i = 0; i < sbi->s_partitions; i++) {
1073 map = &sbi->s_partmaps[i]; 1269 map = &sbi->s_partmaps[i];
1074 udf_debug("Searching map: (%d == %d)\n", 1270 udf_debug("Searching map: (%d == %d)\n",
1075 map->s_partition_num, 1271 map->s_partition_num, partitionNumber);
1076 le16_to_cpu(p->partitionNumber)); 1272 if (map->s_partition_num == partitionNumber &&
1077 if (map->s_partition_num == 1273 (map->s_partition_type == UDF_TYPE1_MAP15 ||
1078 le16_to_cpu(p->partitionNumber)) { 1274 map->s_partition_type == UDF_SPARABLE_MAP15))
1079 map->s_partition_len =
1080 le32_to_cpu(p->partitionLength); /* blocks */
1081 map->s_partition_root =
1082 le32_to_cpu(p->partitionStartingLocation);
1083 if (p->accessType ==
1084 cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1085 map->s_partition_flags |=
1086 UDF_PART_FLAG_READ_ONLY;
1087 if (p->accessType ==
1088 cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1089 map->s_partition_flags |=
1090 UDF_PART_FLAG_WRITE_ONCE;
1091 if (p->accessType ==
1092 cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1093 map->s_partition_flags |=
1094 UDF_PART_FLAG_REWRITABLE;
1095 if (p->accessType ==
1096 cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1097 map->s_partition_flags |=
1098 UDF_PART_FLAG_OVERWRITABLE;
1099
1100 if (!strcmp(p->partitionContents.ident,
1101 PD_PARTITION_CONTENTS_NSR02) ||
1102 !strcmp(p->partitionContents.ident,
1103 PD_PARTITION_CONTENTS_NSR03)) {
1104 struct partitionHeaderDesc *phd;
1105
1106 phd = (struct partitionHeaderDesc *)
1107 (p->partitionContentsUse);
1108 if (phd->unallocSpaceTable.extLength) {
1109 kernel_lb_addr loc = {
1110 .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition),
1111 .partitionReferenceNum = i,
1112 };
1113
1114 map->s_uspace.s_table =
1115 udf_iget(sb, loc);
1116 if (!map->s_uspace.s_table) {
1117 udf_debug("cannot load unallocSpaceTable (part %d)\n", i);
1118 return 1;
1119 }
1120 map->s_partition_flags |=
1121 UDF_PART_FLAG_UNALLOC_TABLE;
1122 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
1123 i, map->s_uspace.s_table->i_ino);
1124 }
1125 if (phd->unallocSpaceBitmap.extLength) {
1126 struct udf_bitmap *bitmap =
1127 udf_sb_alloc_bitmap(sb, i);
1128 map->s_uspace.s_bitmap = bitmap;
1129 if (bitmap != NULL) {
1130 bitmap->s_extLength =
1131 le32_to_cpu(phd->unallocSpaceBitmap.extLength);
1132 bitmap->s_extPosition =
1133 le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
1134 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1135 udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
1136 i, bitmap->s_extPosition);
1137 }
1138 }
1139 if (phd->partitionIntegrityTable.extLength)
1140 udf_debug("partitionIntegrityTable (part %d)\n", i);
1141 if (phd->freedSpaceTable.extLength) {
1142 kernel_lb_addr loc = {
1143 .logicalBlockNum = le32_to_cpu(phd->freedSpaceTable.extPosition),
1144 .partitionReferenceNum = i,
1145 };
1146
1147 map->s_fspace.s_table =
1148 udf_iget(sb, loc);
1149 if (!map->s_fspace.s_table) {
1150 udf_debug("cannot load freedSpaceTable (part %d)\n", i);
1151 return 1;
1152 }
1153 map->s_partition_flags |=
1154 UDF_PART_FLAG_FREED_TABLE;
1155 udf_debug("freedSpaceTable (part %d) @ %ld\n",
1156 i, map->s_fspace.s_table->i_ino);
1157 }
1158 if (phd->freedSpaceBitmap.extLength) {
1159 struct udf_bitmap *bitmap =
1160 udf_sb_alloc_bitmap(sb, i);
1161 map->s_fspace.s_bitmap = bitmap;
1162 if (bitmap != NULL) {
1163 bitmap->s_extLength =
1164 le32_to_cpu(phd->freedSpaceBitmap.extLength);
1165 bitmap->s_extPosition =
1166 le32_to_cpu(phd->freedSpaceBitmap.extPosition);
1167 map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
1168 udf_debug("freedSpaceBitmap (part %d) @ %d\n",
1169 i, bitmap->s_extPosition);
1170 }
1171 }
1172 }
1173 break; 1275 break;
1174 }
1175 } 1276 }
1176 if (i == sbi->s_partitions) 1277
1278 if (i >= sbi->s_partitions) {
1177 udf_debug("Partition (%d) not found in partition map\n", 1279 udf_debug("Partition (%d) not found in partition map\n",
1178 le16_to_cpu(p->partitionNumber)); 1280 partitionNumber);
1179 else 1281 goto out_bh;
1180 udf_debug("Partition (%d:%d type %x) starts at physical %d, " 1282 }
1181 "block length %d\n", 1283
1182 le16_to_cpu(p->partitionNumber), i, 1284 ret = udf_fill_partdesc_info(sb, p, i);
1183 map->s_partition_type, 1285
1184 map->s_partition_root, 1286 /*
1185 map->s_partition_len); 1287 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1186 return 0; 1288 * PHYSICAL partitions are already set up
1289 */
1290 type1_idx = i;
1291 for (i = 0; i < sbi->s_partitions; i++) {
1292 map = &sbi->s_partmaps[i];
1293
1294 if (map->s_partition_num == partitionNumber &&
1295 (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1296 map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1297 map->s_partition_type == UDF_METADATA_MAP25))
1298 break;
1299 }
1300
1301 if (i >= sbi->s_partitions)
1302 goto out_bh;
1303
1304 ret = udf_fill_partdesc_info(sb, p, i);
1305 if (ret)
1306 goto out_bh;
1307
1308 if (map->s_partition_type == UDF_METADATA_MAP25) {
1309 ret = udf_load_metadata_files(sb, i);
1310 if (ret) {
1311 printk(KERN_ERR "UDF-fs: error loading MetaData "
1312 "partition map %d\n", i);
1313 goto out_bh;
1314 }
1315 } else {
1316 ret = udf_load_vat(sb, i, type1_idx);
1317 if (ret)
1318 goto out_bh;
1319 /*
1320 * Mark filesystem read-only if we have a partition with
1321 * virtual map since we don't handle writing to it (we
1322 * overwrite blocks instead of relocating them).
1323 */
1324 sb->s_flags |= MS_RDONLY;
1325 printk(KERN_NOTICE "UDF-fs: Filesystem marked read-only "
1326 "because writing to pseudooverwrite partition is "
1327 "not implemented.\n");
1328 }
1329out_bh:
1330 /* In case loading failed, we handle cleanup in udf_fill_super */
1331 brelse(bh);
1332 return ret;
1187} 1333}
1188 1334
1189static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh, 1335static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1190 kernel_lb_addr *fileset) 1336 kernel_lb_addr *fileset)
1191{ 1337{
1192 struct logicalVolDesc *lvd; 1338 struct logicalVolDesc *lvd;
@@ -1194,12 +1340,21 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1194 uint8_t type; 1340 uint8_t type;
1195 struct udf_sb_info *sbi = UDF_SB(sb); 1341 struct udf_sb_info *sbi = UDF_SB(sb);
1196 struct genericPartitionMap *gpm; 1342 struct genericPartitionMap *gpm;
1343 uint16_t ident;
1344 struct buffer_head *bh;
1345 int ret = 0;
1197 1346
1347 bh = udf_read_tagged(sb, block, block, &ident);
1348 if (!bh)
1349 return 1;
1350 BUG_ON(ident != TAG_IDENT_LVD);
1198 lvd = (struct logicalVolDesc *)bh->b_data; 1351 lvd = (struct logicalVolDesc *)bh->b_data;
1199 1352
1200 i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); 1353 i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1201 if (i != 0) 1354 if (i != 0) {
1202 return i; 1355 ret = i;
1356 goto out_bh;
1357 }
1203 1358
1204 for (i = 0, offset = 0; 1359 for (i = 0, offset = 0;
1205 i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength); 1360 i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
@@ -1223,12 +1378,12 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1223 u16 suf = 1378 u16 suf =
1224 le16_to_cpu(((__le16 *)upm2->partIdent. 1379 le16_to_cpu(((__le16 *)upm2->partIdent.
1225 identSuffix)[0]); 1380 identSuffix)[0]);
1226 if (suf == 0x0150) { 1381 if (suf < 0x0200) {
1227 map->s_partition_type = 1382 map->s_partition_type =
1228 UDF_VIRTUAL_MAP15; 1383 UDF_VIRTUAL_MAP15;
1229 map->s_partition_func = 1384 map->s_partition_func =
1230 udf_get_pblock_virt15; 1385 udf_get_pblock_virt15;
1231 } else if (suf == 0x0200) { 1386 } else {
1232 map->s_partition_type = 1387 map->s_partition_type =
1233 UDF_VIRTUAL_MAP20; 1388 UDF_VIRTUAL_MAP20;
1234 map->s_partition_func = 1389 map->s_partition_func =
@@ -1238,7 +1393,6 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1238 UDF_ID_SPARABLE, 1393 UDF_ID_SPARABLE,
1239 strlen(UDF_ID_SPARABLE))) { 1394 strlen(UDF_ID_SPARABLE))) {
1240 uint32_t loc; 1395 uint32_t loc;
1241 uint16_t ident;
1242 struct sparingTable *st; 1396 struct sparingTable *st;
1243 struct sparablePartitionMap *spm = 1397 struct sparablePartitionMap *spm =
1244 (struct sparablePartitionMap *)gpm; 1398 (struct sparablePartitionMap *)gpm;
@@ -1256,22 +1410,64 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1256 map->s_type_specific.s_sparing. 1410 map->s_type_specific.s_sparing.
1257 s_spar_map[j] = bh2; 1411 s_spar_map[j] = bh2;
1258 1412
1259 if (bh2 != NULL) { 1413 if (bh2 == NULL)
1260 st = (struct sparingTable *) 1414 continue;
1261 bh2->b_data; 1415
1262 if (ident != 0 || strncmp( 1416 st = (struct sparingTable *)bh2->b_data;
1263 st->sparingIdent.ident, 1417 if (ident != 0 || strncmp(
1264 UDF_ID_SPARING, 1418 st->sparingIdent.ident,
1265 strlen(UDF_ID_SPARING))) { 1419 UDF_ID_SPARING,
1266 brelse(bh2); 1420 strlen(UDF_ID_SPARING))) {
1267 map->s_type_specific. 1421 brelse(bh2);
1268 s_sparing. 1422 map->s_type_specific.s_sparing.
1269 s_spar_map[j] = 1423 s_spar_map[j] = NULL;
1270 NULL;
1271 }
1272 } 1424 }
1273 } 1425 }
1274 map->s_partition_func = udf_get_pblock_spar15; 1426 map->s_partition_func = udf_get_pblock_spar15;
1427 } else if (!strncmp(upm2->partIdent.ident,
1428 UDF_ID_METADATA,
1429 strlen(UDF_ID_METADATA))) {
1430 struct udf_meta_data *mdata =
1431 &map->s_type_specific.s_metadata;
1432 struct metadataPartitionMap *mdm =
1433 (struct metadataPartitionMap *)
1434 &(lvd->partitionMaps[offset]);
1435 udf_debug("Parsing Logical vol part %d "
1436 "type %d id=%s\n", i, type,
1437 UDF_ID_METADATA);
1438
1439 map->s_partition_type = UDF_METADATA_MAP25;
1440 map->s_partition_func = udf_get_pblock_meta25;
1441
1442 mdata->s_meta_file_loc =
1443 le32_to_cpu(mdm->metadataFileLoc);
1444 mdata->s_mirror_file_loc =
1445 le32_to_cpu(mdm->metadataMirrorFileLoc);
1446 mdata->s_bitmap_file_loc =
1447 le32_to_cpu(mdm->metadataBitmapFileLoc);
1448 mdata->s_alloc_unit_size =
1449 le32_to_cpu(mdm->allocUnitSize);
1450 mdata->s_align_unit_size =
1451 le16_to_cpu(mdm->alignUnitSize);
1452 mdata->s_dup_md_flag =
1453 mdm->flags & 0x01;
1454
1455 udf_debug("Metadata Ident suffix=0x%x\n",
1456 (le16_to_cpu(
1457 ((__le16 *)
1458 mdm->partIdent.identSuffix)[0])));
1459 udf_debug("Metadata part num=%d\n",
1460 le16_to_cpu(mdm->partitionNum));
1461 udf_debug("Metadata part alloc unit size=%d\n",
1462 le32_to_cpu(mdm->allocUnitSize));
1463 udf_debug("Metadata file loc=%d\n",
1464 le32_to_cpu(mdm->metadataFileLoc));
1465 udf_debug("Mirror file loc=%d\n",
1466 le32_to_cpu(mdm->metadataMirrorFileLoc));
1467 udf_debug("Bitmap file loc=%d\n",
1468 le32_to_cpu(mdm->metadataBitmapFileLoc));
1469 udf_debug("Duplicate Flag: %d %d\n",
1470 mdata->s_dup_md_flag, mdm->flags);
1275 } else { 1471 } else {
1276 udf_debug("Unknown ident: %s\n", 1472 udf_debug("Unknown ident: %s\n",
1277 upm2->partIdent.ident); 1473 upm2->partIdent.ident);
@@ -1296,7 +1492,9 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1296 if (lvd->integritySeqExt.extLength) 1492 if (lvd->integritySeqExt.extLength)
1297 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); 1493 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1298 1494
1299 return 0; 1495out_bh:
1496 brelse(bh);
1497 return ret;
1300} 1498}
1301 1499
1302/* 1500/*
@@ -1345,7 +1543,7 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
1345 * July 1, 1997 - Andrew E. Mileski 1543 * July 1, 1997 - Andrew E. Mileski
1346 * Written, tested, and released. 1544 * Written, tested, and released.
1347 */ 1545 */
1348static int udf_process_sequence(struct super_block *sb, long block, 1546static noinline int udf_process_sequence(struct super_block *sb, long block,
1349 long lastblock, kernel_lb_addr *fileset) 1547 long lastblock, kernel_lb_addr *fileset)
1350{ 1548{
1351 struct buffer_head *bh = NULL; 1549 struct buffer_head *bh = NULL;
@@ -1354,19 +1552,25 @@ static int udf_process_sequence(struct super_block *sb, long block,
1354 struct generic_desc *gd; 1552 struct generic_desc *gd;
1355 struct volDescPtr *vdp; 1553 struct volDescPtr *vdp;
1356 int done = 0; 1554 int done = 0;
1357 int i, j;
1358 uint32_t vdsn; 1555 uint32_t vdsn;
1359 uint16_t ident; 1556 uint16_t ident;
1360 long next_s = 0, next_e = 0; 1557 long next_s = 0, next_e = 0;
1361 1558
1362 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1559 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1363 1560
1364 /* Read the main descriptor sequence */ 1561 /*
1562 * Read the main descriptor sequence and find which descriptors
1563 * are in it.
1564 */
1365 for (; (!done && block <= lastblock); block++) { 1565 for (; (!done && block <= lastblock); block++) {
1366 1566
1367 bh = udf_read_tagged(sb, block, block, &ident); 1567 bh = udf_read_tagged(sb, block, block, &ident);
1368 if (!bh) 1568 if (!bh) {
1369 break; 1569 printk(KERN_ERR "udf: Block %Lu of volume descriptor "
1570 "sequence is corrupted or we could not read "
1571 "it.\n", (unsigned long long)block);
1572 return 1;
1573 }
1370 1574
1371 /* Process each descriptor (ISO 13346 3/8.3-8.4) */ 1575 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1372 gd = (struct generic_desc *)bh->b_data; 1576 gd = (struct generic_desc *)bh->b_data;
@@ -1432,41 +1636,31 @@ static int udf_process_sequence(struct super_block *sb, long block,
1432 } 1636 }
1433 brelse(bh); 1637 brelse(bh);
1434 } 1638 }
1435 for (i = 0; i < VDS_POS_LENGTH; i++) { 1639 /*
1436 if (vds[i].block) { 1640 * Now read interesting descriptors again and process them
1437 bh = udf_read_tagged(sb, vds[i].block, vds[i].block, 1641 * in a suitable order
1438 &ident); 1642 */
1439 1643 if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1440 if (i == VDS_POS_PRIMARY_VOL_DESC) { 1644 printk(KERN_ERR "udf: Primary Volume Descriptor not found!\n");
1441 udf_load_pvoldesc(sb, bh); 1645 return 1;
1442 } else if (i == VDS_POS_LOGICAL_VOL_DESC) { 1646 }
1443 if (udf_load_logicalvol(sb, bh, fileset)) { 1647 if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
1444 brelse(bh); 1648 return 1;
1445 return 1; 1649
1446 } 1650 if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
1447 } else if (i == VDS_POS_PARTITION_DESC) { 1651 vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
1448 struct buffer_head *bh2 = NULL; 1652 return 1;
1449 if (udf_load_partdesc(sb, bh)) { 1653
1450 brelse(bh); 1654 if (vds[VDS_POS_PARTITION_DESC].block) {
1451 return 1; 1655 /*
1452 } 1656 * We rescan the whole descriptor sequence to find
1453 for (j = vds[i].block + 1; 1657 * partition descriptor blocks and process them.
1454 j < vds[VDS_POS_TERMINATING_DESC].block; 1658 */
1455 j++) { 1659 for (block = vds[VDS_POS_PARTITION_DESC].block;
1456 bh2 = udf_read_tagged(sb, j, j, &ident); 1660 block < vds[VDS_POS_TERMINATING_DESC].block;
1457 gd = (struct generic_desc *)bh2->b_data; 1661 block++)
1458 if (ident == TAG_IDENT_PD) 1662 if (udf_load_partdesc(sb, block))
1459 if (udf_load_partdesc(sb, 1663 return 1;
1460 bh2)) {
1461 brelse(bh);
1462 brelse(bh2);
1463 return 1;
1464 }
1465 brelse(bh2);
1466 }
1467 }
1468 brelse(bh);
1469 }
1470 } 1664 }
1471 1665
1472 return 0; 1666 return 0;
@@ -1478,6 +1672,7 @@ static int udf_process_sequence(struct super_block *sb, long block,
1478static int udf_check_valid(struct super_block *sb, int novrs, int silent) 1672static int udf_check_valid(struct super_block *sb, int novrs, int silent)
1479{ 1673{
1480 long block; 1674 long block;
1675 struct udf_sb_info *sbi = UDF_SB(sb);
1481 1676
1482 if (novrs) { 1677 if (novrs) {
1483 udf_debug("Validity check skipped because of novrs option\n"); 1678 udf_debug("Validity check skipped because of novrs option\n");
@@ -1485,27 +1680,22 @@ static int udf_check_valid(struct super_block *sb, int novrs, int silent)
1485 } 1680 }
1486 /* Check that it is NSR02 compliant */ 1681 /* Check that it is NSR02 compliant */
1487 /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ 1682 /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
1488 else { 1683 block = udf_vrs(sb, silent);
1489 block = udf_vrs(sb, silent); 1684 if (block == -1)
1490 if (block == -1) { 1685 udf_debug("Failed to read byte 32768. Assuming open "
1491 struct udf_sb_info *sbi = UDF_SB(sb); 1686 "disc. Skipping validity check\n");
1492 udf_debug("Failed to read byte 32768. Assuming open " 1687 if (block && !sbi->s_last_block)
1493 "disc. Skipping validity check\n"); 1688 sbi->s_last_block = udf_get_last_block(sb);
1494 if (!sbi->s_last_block) 1689 return !block;
1495 sbi->s_last_block = udf_get_last_block(sb);
1496 return 0;
1497 } else
1498 return !block;
1499 }
1500} 1690}
1501 1691
1502static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset) 1692static int udf_load_sequence(struct super_block *sb, kernel_lb_addr *fileset)
1503{ 1693{
1504 struct anchorVolDescPtr *anchor; 1694 struct anchorVolDescPtr *anchor;
1505 uint16_t ident; 1695 uint16_t ident;
1506 struct buffer_head *bh; 1696 struct buffer_head *bh;
1507 long main_s, main_e, reserve_s, reserve_e; 1697 long main_s, main_e, reserve_s, reserve_e;
1508 int i, j; 1698 int i;
1509 struct udf_sb_info *sbi; 1699 struct udf_sb_info *sbi;
1510 1700
1511 if (!sb) 1701 if (!sb)
@@ -1515,6 +1705,7 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
1515 for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) { 1705 for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) {
1516 if (!sbi->s_anchor[i]) 1706 if (!sbi->s_anchor[i])
1517 continue; 1707 continue;
1708
1518 bh = udf_read_tagged(sb, sbi->s_anchor[i], sbi->s_anchor[i], 1709 bh = udf_read_tagged(sb, sbi->s_anchor[i], sbi->s_anchor[i],
1519 &ident); 1710 &ident);
1520 if (!bh) 1711 if (!bh)
@@ -1553,76 +1744,6 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
1553 } 1744 }
1554 udf_debug("Using anchor in block %d\n", sbi->s_anchor[i]); 1745 udf_debug("Using anchor in block %d\n", sbi->s_anchor[i]);
1555 1746
1556 for (i = 0; i < sbi->s_partitions; i++) {
1557 kernel_lb_addr uninitialized_var(ino);
1558 struct udf_part_map *map = &sbi->s_partmaps[i];
1559 switch (map->s_partition_type) {
1560 case UDF_VIRTUAL_MAP15:
1561 case UDF_VIRTUAL_MAP20:
1562 if (!sbi->s_last_block) {
1563 sbi->s_last_block = udf_get_last_block(sb);
1564 udf_find_anchor(sb);
1565 }
1566
1567 if (!sbi->s_last_block) {
1568 udf_debug("Unable to determine Lastblock (For "
1569 "Virtual Partition)\n");
1570 return 1;
1571 }
1572
1573 for (j = 0; j < sbi->s_partitions; j++) {
1574 struct udf_part_map *map2 = &sbi->s_partmaps[j];
1575 if (j != i &&
1576 map->s_volumeseqnum ==
1577 map2->s_volumeseqnum &&
1578 map->s_partition_num ==
1579 map2->s_partition_num) {
1580 ino.partitionReferenceNum = j;
1581 ino.logicalBlockNum =
1582 sbi->s_last_block -
1583 map2->s_partition_root;
1584 break;
1585 }
1586 }
1587
1588 if (j == sbi->s_partitions)
1589 return 1;
1590
1591 sbi->s_vat_inode = udf_iget(sb, ino);
1592 if (!sbi->s_vat_inode)
1593 return 1;
1594
1595 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1596 map->s_type_specific.s_virtual.s_start_offset =
1597 udf_ext0_offset(sbi->s_vat_inode);
1598 map->s_type_specific.s_virtual.s_num_entries =
1599 (sbi->s_vat_inode->i_size - 36) >> 2;
1600 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1601 uint32_t pos;
1602 struct virtualAllocationTable20 *vat20;
1603
1604 pos = udf_block_map(sbi->s_vat_inode, 0);
1605 bh = sb_bread(sb, pos);
1606 if (!bh)
1607 return 1;
1608 vat20 = (struct virtualAllocationTable20 *)
1609 bh->b_data +
1610 udf_ext0_offset(sbi->s_vat_inode);
1611 map->s_type_specific.s_virtual.s_start_offset =
1612 le16_to_cpu(vat20->lengthHeader) +
1613 udf_ext0_offset(sbi->s_vat_inode);
1614 map->s_type_specific.s_virtual.s_num_entries =
1615 (sbi->s_vat_inode->i_size -
1616 map->s_type_specific.s_virtual.
1617 s_start_offset) >> 2;
1618 brelse(bh);
1619 }
1620 map->s_partition_root = udf_get_pblock(sb, 0, i, 0);
1621 map->s_partition_len =
1622 sbi->s_partmaps[ino.partitionReferenceNum].
1623 s_partition_len;
1624 }
1625 }
1626 return 0; 1747 return 0;
1627} 1748}
1628 1749
@@ -1630,65 +1751,61 @@ static void udf_open_lvid(struct super_block *sb)
1630{ 1751{
1631 struct udf_sb_info *sbi = UDF_SB(sb); 1752 struct udf_sb_info *sbi = UDF_SB(sb);
1632 struct buffer_head *bh = sbi->s_lvid_bh; 1753 struct buffer_head *bh = sbi->s_lvid_bh;
1633 if (bh) { 1754 struct logicalVolIntegrityDesc *lvid;
1634 kernel_timestamp cpu_time; 1755 struct logicalVolIntegrityDescImpUse *lvidiu;
1635 struct logicalVolIntegrityDesc *lvid = 1756 if (!bh)
1636 (struct logicalVolIntegrityDesc *)bh->b_data; 1757 return;
1637 struct logicalVolIntegrityDescImpUse *lvidiu =
1638 udf_sb_lvidiu(sbi);
1639 1758
1640 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1759 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1641 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1760 lvidiu = udf_sb_lvidiu(sbi);
1642 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
1643 lvid->recordingDateAndTime = cpu_to_lets(cpu_time);
1644 lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
1645 1761
1646 lvid->descTag.descCRC = cpu_to_le16( 1762 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1647 udf_crc((char *)lvid + sizeof(tag), 1763 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1648 le16_to_cpu(lvid->descTag.descCRCLength), 1764 udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
1649 0)); 1765 CURRENT_TIME);
1766 lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
1650 1767
1651 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); 1768 lvid->descTag.descCRC = cpu_to_le16(
1652 mark_buffer_dirty(bh); 1769 crc_itu_t(0, (char *)lvid + sizeof(tag),
1653 } 1770 le16_to_cpu(lvid->descTag.descCRCLength)));
1771
1772 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1773 mark_buffer_dirty(bh);
1654} 1774}
1655 1775
1656static void udf_close_lvid(struct super_block *sb) 1776static void udf_close_lvid(struct super_block *sb)
1657{ 1777{
1658 kernel_timestamp cpu_time;
1659 struct udf_sb_info *sbi = UDF_SB(sb); 1778 struct udf_sb_info *sbi = UDF_SB(sb);
1660 struct buffer_head *bh = sbi->s_lvid_bh; 1779 struct buffer_head *bh = sbi->s_lvid_bh;
1661 struct logicalVolIntegrityDesc *lvid; 1780 struct logicalVolIntegrityDesc *lvid;
1781 struct logicalVolIntegrityDescImpUse *lvidiu;
1662 1782
1663 if (!bh) 1783 if (!bh)
1664 return; 1784 return;
1665 1785
1666 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1786 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1667 1787
1668 if (lvid->integrityType == LVID_INTEGRITY_TYPE_OPEN) { 1788 if (lvid->integrityType != LVID_INTEGRITY_TYPE_OPEN)
1669 struct logicalVolIntegrityDescImpUse *lvidiu = 1789 return;
1670 udf_sb_lvidiu(sbi); 1790
1671 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1791 lvidiu = udf_sb_lvidiu(sbi);
1672 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1792 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1673 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) 1793 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1674 lvid->recordingDateAndTime = cpu_to_lets(cpu_time); 1794 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
1675 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) 1795 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
1676 lvidiu->maxUDFWriteRev = 1796 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
1677 cpu_to_le16(UDF_MAX_WRITE_VERSION); 1797 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
1678 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) 1798 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
1679 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); 1799 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
1680 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) 1800 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
1681 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); 1801 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1682 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); 1802
1683 1803 lvid->descTag.descCRC = cpu_to_le16(
1684 lvid->descTag.descCRC = cpu_to_le16( 1804 crc_itu_t(0, (char *)lvid + sizeof(tag),
1685 udf_crc((char *)lvid + sizeof(tag), 1805 le16_to_cpu(lvid->descTag.descCRCLength)));
1686 le16_to_cpu(lvid->descTag.descCRCLength), 1806
1687 0)); 1807 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1688 1808 mark_buffer_dirty(bh);
1689 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1690 mark_buffer_dirty(bh);
1691 }
1692} 1809}
1693 1810
1694static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) 1811static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1708,22 +1825,35 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
1708 vfree(bitmap); 1825 vfree(bitmap);
1709} 1826}
1710 1827
1711/* 1828static void udf_free_partition(struct udf_part_map *map)
1712 * udf_read_super 1829{
1713 * 1830 int i;
1714 * PURPOSE 1831 struct udf_meta_data *mdata;
1715 * Complete the specified super block. 1832
1716 * 1833 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
1717 * PRE-CONDITIONS 1834 iput(map->s_uspace.s_table);
1718 * sb Pointer to superblock to complete - never NULL. 1835 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
1719 * sb->s_dev Device to read suberblock from. 1836 iput(map->s_fspace.s_table);
1720 * options Pointer to mount options. 1837 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
1721 * silent Silent flag. 1838 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
1722 * 1839 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
1723 * HISTORY 1840 udf_sb_free_bitmap(map->s_fspace.s_bitmap);
1724 * July 1, 1997 - Andrew E. Mileski 1841 if (map->s_partition_type == UDF_SPARABLE_MAP15)
1725 * Written, tested, and released. 1842 for (i = 0; i < 4; i++)
1726 */ 1843 brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
1844 else if (map->s_partition_type == UDF_METADATA_MAP25) {
1845 mdata = &map->s_type_specific.s_metadata;
1846 iput(mdata->s_metadata_fe);
1847 mdata->s_metadata_fe = NULL;
1848
1849 iput(mdata->s_mirror_fe);
1850 mdata->s_mirror_fe = NULL;
1851
1852 iput(mdata->s_bitmap_fe);
1853 mdata->s_bitmap_fe = NULL;
1854 }
1855}
1856
1727static int udf_fill_super(struct super_block *sb, void *options, int silent) 1857static int udf_fill_super(struct super_block *sb, void *options, int silent)
1728{ 1858{
1729 int i; 1859 int i;
@@ -1776,8 +1906,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1776 sbi->s_nls_map = uopt.nls_map; 1906 sbi->s_nls_map = uopt.nls_map;
1777 1907
1778 /* Set the block size for all transfers */ 1908 /* Set the block size for all transfers */
1779 if (!udf_set_blocksize(sb, uopt.blocksize)) 1909 if (!sb_min_blocksize(sb, uopt.blocksize)) {
1910 udf_debug("Bad block size (%d)\n", uopt.blocksize);
1911 printk(KERN_ERR "udf: bad block size (%d)\n", uopt.blocksize);
1780 goto error_out; 1912 goto error_out;
1913 }
1781 1914
1782 if (uopt.session == 0xFFFFFFFF) 1915 if (uopt.session == 0xFFFFFFFF)
1783 sbi->s_session = udf_get_last_session(sb); 1916 sbi->s_session = udf_get_last_session(sb);
@@ -1789,7 +1922,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1789 sbi->s_last_block = uopt.lastblock; 1922 sbi->s_last_block = uopt.lastblock;
1790 sbi->s_anchor[0] = sbi->s_anchor[1] = 0; 1923 sbi->s_anchor[0] = sbi->s_anchor[1] = 0;
1791 sbi->s_anchor[2] = uopt.anchor; 1924 sbi->s_anchor[2] = uopt.anchor;
1792 sbi->s_anchor[3] = 256;
1793 1925
1794 if (udf_check_valid(sb, uopt.novrs, silent)) { 1926 if (udf_check_valid(sb, uopt.novrs, silent)) {
1795 /* read volume recognition sequences */ 1927 /* read volume recognition sequences */
@@ -1806,7 +1938,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1806 sb->s_magic = UDF_SUPER_MAGIC; 1938 sb->s_magic = UDF_SUPER_MAGIC;
1807 sb->s_time_gran = 1000; 1939 sb->s_time_gran = 1000;
1808 1940
1809 if (udf_load_partition(sb, &fileset)) { 1941 if (udf_load_sequence(sb, &fileset)) {
1810 printk(KERN_WARNING "UDF-fs: No partition found (1)\n"); 1942 printk(KERN_WARNING "UDF-fs: No partition found (1)\n");
1811 goto error_out; 1943 goto error_out;
1812 } 1944 }
@@ -1856,12 +1988,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1856 } 1988 }
1857 1989
1858 if (!silent) { 1990 if (!silent) {
1859 kernel_timestamp ts; 1991 timestamp ts;
1860 udf_time_to_stamp(&ts, sbi->s_record_time); 1992 udf_time_to_disk_stamp(&ts, sbi->s_record_time);
1861 udf_info("UDF: Mounting volume '%s', " 1993 udf_info("UDF: Mounting volume '%s', "
1862 "timestamp %04u/%02u/%02u %02u:%02u (%x)\n", 1994 "timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
1863 sbi->s_volume_ident, ts.year, ts.month, ts.day, 1995 sbi->s_volume_ident, le16_to_cpu(ts.year), ts.month, ts.day,
1864 ts.hour, ts.minute, ts.typeAndTimezone); 1996 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
1865 } 1997 }
1866 if (!(sb->s_flags & MS_RDONLY)) 1998 if (!(sb->s_flags & MS_RDONLY))
1867 udf_open_lvid(sb); 1999 udf_open_lvid(sb);
@@ -1890,21 +2022,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1890error_out: 2022error_out:
1891 if (sbi->s_vat_inode) 2023 if (sbi->s_vat_inode)
1892 iput(sbi->s_vat_inode); 2024 iput(sbi->s_vat_inode);
1893 if (sbi->s_partitions) { 2025 if (sbi->s_partitions)
1894 struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition]; 2026 for (i = 0; i < sbi->s_partitions; i++)
1895 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 2027 udf_free_partition(&sbi->s_partmaps[i]);
1896 iput(map->s_uspace.s_table);
1897 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
1898 iput(map->s_fspace.s_table);
1899 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
1900 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
1901 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
1902 udf_sb_free_bitmap(map->s_fspace.s_bitmap);
1903 if (map->s_partition_type == UDF_SPARABLE_MAP15)
1904 for (i = 0; i < 4; i++)
1905 brelse(map->s_type_specific.s_sparing.
1906 s_spar_map[i]);
1907 }
1908#ifdef CONFIG_UDF_NLS 2028#ifdef CONFIG_UDF_NLS
1909 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 2029 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
1910 unload_nls(sbi->s_nls_map); 2030 unload_nls(sbi->s_nls_map);
@@ -1920,8 +2040,8 @@ error_out:
1920 return -EINVAL; 2040 return -EINVAL;
1921} 2041}
1922 2042
1923void udf_error(struct super_block *sb, const char *function, 2043static void udf_error(struct super_block *sb, const char *function,
1924 const char *fmt, ...) 2044 const char *fmt, ...)
1925{ 2045{
1926 va_list args; 2046 va_list args;
1927 2047
@@ -1948,19 +2068,6 @@ void udf_warning(struct super_block *sb, const char *function,
1948 sb->s_id, function, error_buf); 2068 sb->s_id, function, error_buf);
1949} 2069}
1950 2070
1951/*
1952 * udf_put_super
1953 *
1954 * PURPOSE
1955 * Prepare for destruction of the superblock.
1956 *
1957 * DESCRIPTION
1958 * Called before the filesystem is unmounted.
1959 *
1960 * HISTORY
1961 * July 1, 1997 - Andrew E. Mileski
1962 * Written, tested, and released.
1963 */
1964static void udf_put_super(struct super_block *sb) 2071static void udf_put_super(struct super_block *sb)
1965{ 2072{
1966 int i; 2073 int i;
@@ -1969,21 +2076,9 @@ static void udf_put_super(struct super_block *sb)
1969 sbi = UDF_SB(sb); 2076 sbi = UDF_SB(sb);
1970 if (sbi->s_vat_inode) 2077 if (sbi->s_vat_inode)
1971 iput(sbi->s_vat_inode); 2078 iput(sbi->s_vat_inode);
1972 if (sbi->s_partitions) { 2079 if (sbi->s_partitions)
1973 struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition]; 2080 for (i = 0; i < sbi->s_partitions; i++)
1974 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 2081 udf_free_partition(&sbi->s_partmaps[i]);
1975 iput(map->s_uspace.s_table);
1976 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
1977 iput(map->s_fspace.s_table);
1978 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
1979 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
1980 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
1981 udf_sb_free_bitmap(map->s_fspace.s_bitmap);
1982 if (map->s_partition_type == UDF_SPARABLE_MAP15)
1983 for (i = 0; i < 4; i++)
1984 brelse(map->s_type_specific.s_sparing.
1985 s_spar_map[i]);
1986 }
1987#ifdef CONFIG_UDF_NLS 2082#ifdef CONFIG_UDF_NLS
1988 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 2083 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
1989 unload_nls(sbi->s_nls_map); 2084 unload_nls(sbi->s_nls_map);
@@ -1996,19 +2091,6 @@ static void udf_put_super(struct super_block *sb)
1996 sb->s_fs_info = NULL; 2091 sb->s_fs_info = NULL;
1997} 2092}
1998 2093
1999/*
2000 * udf_stat_fs
2001 *
2002 * PURPOSE
2003 * Return info about the filesystem.
2004 *
2005 * DESCRIPTION
2006 * Called by sys_statfs()
2007 *
2008 * HISTORY
2009 * July 1, 1997 - Andrew E. Mileski
2010 * Written, tested, and released.
2011 */
2012static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) 2094static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2013{ 2095{
2014 struct super_block *sb = dentry->d_sb; 2096 struct super_block *sb = dentry->d_sb;
@@ -2035,10 +2117,6 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2035 return 0; 2117 return 0;
2036} 2118}
2037 2119
2038static unsigned char udf_bitmap_lookup[16] = {
2039 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
2040};
2041
2042static unsigned int udf_count_free_bitmap(struct super_block *sb, 2120static unsigned int udf_count_free_bitmap(struct super_block *sb,
2043 struct udf_bitmap *bitmap) 2121 struct udf_bitmap *bitmap)
2044{ 2122{
@@ -2048,7 +2126,6 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
2048 int block = 0, newblock; 2126 int block = 0, newblock;
2049 kernel_lb_addr loc; 2127 kernel_lb_addr loc;
2050 uint32_t bytes; 2128 uint32_t bytes;
2051 uint8_t value;
2052 uint8_t *ptr; 2129 uint8_t *ptr;
2053 uint16_t ident; 2130 uint16_t ident;
2054 struct spaceBitmapDesc *bm; 2131 struct spaceBitmapDesc *bm;
@@ -2074,13 +2151,10 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
2074 ptr = (uint8_t *)bh->b_data; 2151 ptr = (uint8_t *)bh->b_data;
2075 2152
2076 while (bytes > 0) { 2153 while (bytes > 0) {
2077 while ((bytes > 0) && (index < sb->s_blocksize)) { 2154 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2078 value = ptr[index]; 2155 accum += bitmap_weight((const unsigned long *)(ptr + index),
2079 accum += udf_bitmap_lookup[value & 0x0f]; 2156 cur_bytes * 8);
2080 accum += udf_bitmap_lookup[value >> 4]; 2157 bytes -= cur_bytes;
2081 index++;
2082 bytes--;
2083 }
2084 if (bytes) { 2158 if (bytes) {
2085 brelse(bh); 2159 brelse(bh);
2086 newblock = udf_get_lb_pblock(sb, loc, ++block); 2160 newblock = udf_get_lb_pblock(sb, loc, ++block);
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6ec99221e50c..c3265e1385d4 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -23,7 +23,6 @@
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/udf_fs.h>
27#include <linux/time.h> 26#include <linux/time.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/stat.h> 28#include <linux/stat.h>
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index fe61be17cdab..65e19b4f9424 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -22,7 +22,6 @@
22#include "udfdecl.h" 22#include "udfdecl.h"
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/udf_fs.h>
26#include <linux/buffer_head.h> 25#include <linux/buffer_head.h>
27 26
28#include "udf_i.h" 27#include "udf_i.h"
@@ -180,6 +179,24 @@ void udf_discard_prealloc(struct inode *inode)
180 brelse(epos.bh); 179 brelse(epos.bh);
181} 180}
182 181
182static void udf_update_alloc_ext_desc(struct inode *inode,
183 struct extent_position *epos,
184 u32 lenalloc)
185{
186 struct super_block *sb = inode->i_sb;
187 struct udf_sb_info *sbi = UDF_SB(sb);
188
189 struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data);
190 int len = sizeof(struct allocExtDesc);
191
192 aed->lengthAllocDescs = cpu_to_le32(lenalloc);
193 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201)
194 len += lenalloc;
195
196 udf_update_tag(epos->bh->b_data, len);
197 mark_buffer_dirty_inode(epos->bh, inode);
198}
199
183void udf_truncate_extents(struct inode *inode) 200void udf_truncate_extents(struct inode *inode)
184{ 201{
185 struct extent_position epos; 202 struct extent_position epos;
@@ -187,7 +204,6 @@ void udf_truncate_extents(struct inode *inode)
187 uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; 204 uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
188 int8_t etype; 205 int8_t etype;
189 struct super_block *sb = inode->i_sb; 206 struct super_block *sb = inode->i_sb;
190 struct udf_sb_info *sbi = UDF_SB(sb);
191 sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; 207 sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
192 loff_t byte_offset; 208 loff_t byte_offset;
193 int adsize; 209 int adsize;
@@ -224,35 +240,15 @@ void udf_truncate_extents(struct inode *inode)
224 if (indirect_ext_len) { 240 if (indirect_ext_len) {
225 /* We managed to free all extents in the 241 /* We managed to free all extents in the
226 * indirect extent - free it too */ 242 * indirect extent - free it too */
227 if (!epos.bh) 243 BUG_ON(!epos.bh);
228 BUG();
229 udf_free_blocks(sb, inode, epos.block, 244 udf_free_blocks(sb, inode, epos.block,
230 0, indirect_ext_len); 245 0, indirect_ext_len);
231 } else { 246 } else if (!epos.bh) {
232 if (!epos.bh) { 247 iinfo->i_lenAlloc = lenalloc;
233 iinfo->i_lenAlloc = 248 mark_inode_dirty(inode);
234 lenalloc; 249 } else
235 mark_inode_dirty(inode); 250 udf_update_alloc_ext_desc(inode,
236 } else { 251 &epos, lenalloc);
237 struct allocExtDesc *aed =
238 (struct allocExtDesc *)
239 (epos.bh->b_data);
240 int len =
241 sizeof(struct allocExtDesc);
242
243 aed->lengthAllocDescs =
244 cpu_to_le32(lenalloc);
245 if (!UDF_QUERY_FLAG(sb,
246 UDF_FLAG_STRICT) ||
247 sbi->s_udfrev >= 0x0201)
248 len += lenalloc;
249
250 udf_update_tag(epos.bh->b_data,
251 len);
252 mark_buffer_dirty_inode(
253 epos.bh, inode);
254 }
255 }
256 brelse(epos.bh); 252 brelse(epos.bh);
257 epos.offset = sizeof(struct allocExtDesc); 253 epos.offset = sizeof(struct allocExtDesc);
258 epos.block = eloc; 254 epos.block = eloc;
@@ -272,29 +268,14 @@ void udf_truncate_extents(struct inode *inode)
272 } 268 }
273 269
274 if (indirect_ext_len) { 270 if (indirect_ext_len) {
275 if (!epos.bh) 271 BUG_ON(!epos.bh);
276 BUG();
277 udf_free_blocks(sb, inode, epos.block, 0, 272 udf_free_blocks(sb, inode, epos.block, 0,
278 indirect_ext_len); 273 indirect_ext_len);
279 } else { 274 } else if (!epos.bh) {
280 if (!epos.bh) { 275 iinfo->i_lenAlloc = lenalloc;
281 iinfo->i_lenAlloc = lenalloc; 276 mark_inode_dirty(inode);
282 mark_inode_dirty(inode); 277 } else
283 } else { 278 udf_update_alloc_ext_desc(inode, &epos, lenalloc);
284 struct allocExtDesc *aed =
285 (struct allocExtDesc *)(epos.bh->b_data);
286 aed->lengthAllocDescs = cpu_to_le32(lenalloc);
287 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
288 sbi->s_udfrev >= 0x0201)
289 udf_update_tag(epos.bh->b_data,
290 lenalloc +
291 sizeof(struct allocExtDesc));
292 else
293 udf_update_tag(epos.bh->b_data,
294 sizeof(struct allocExtDesc));
295 mark_buffer_dirty_inode(epos.bh, inode);
296 }
297 }
298 } else if (inode->i_size) { 279 } else if (inode->i_size) {
299 if (byte_offset) { 280 if (byte_offset) {
300 kernel_long_ad extent; 281 kernel_long_ad extent;
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index ccc52f16bf7d..4f86b1d98a5d 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,10 +1,32 @@
1#ifndef __LINUX_UDF_I_H 1#ifndef _UDF_I_H
2#define __LINUX_UDF_I_H 2#define _UDF_I_H
3
4struct udf_inode_info {
5 struct timespec i_crtime;
6 /* Physical address of inode */
7 kernel_lb_addr i_location;
8 __u64 i_unique;
9 __u32 i_lenEAttr;
10 __u32 i_lenAlloc;
11 __u64 i_lenExtents;
12 __u32 i_next_alloc_block;
13 __u32 i_next_alloc_goal;
14 unsigned i_alloc_type : 3;
15 unsigned i_efe : 1; /* extendedFileEntry */
16 unsigned i_use : 1; /* unallocSpaceEntry */
17 unsigned i_strat4096 : 1;
18 unsigned reserved : 26;
19 union {
20 short_ad *i_sad;
21 long_ad *i_lad;
22 __u8 *i_data;
23 } i_ext;
24 struct inode vfs_inode;
25};
3 26
4#include <linux/udf_fs_i.h>
5static inline struct udf_inode_info *UDF_I(struct inode *inode) 27static inline struct udf_inode_info *UDF_I(struct inode *inode)
6{ 28{
7 return list_entry(inode, struct udf_inode_info, vfs_inode); 29 return list_entry(inode, struct udf_inode_info, vfs_inode);
8} 30}
9 31
10#endif /* !defined(_LINUX_UDF_I_H) */ 32#endif /* _UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 737d1c604eea..1c1c514a9725 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -1,10 +1,12 @@
1#ifndef __LINUX_UDF_SB_H 1#ifndef __LINUX_UDF_SB_H
2#define __LINUX_UDF_SB_H 2#define __LINUX_UDF_SB_H
3 3
4#include <linux/mutex.h>
5
4/* Since UDF 2.01 is ISO 13346 based... */ 6/* Since UDF 2.01 is ISO 13346 based... */
5#define UDF_SUPER_MAGIC 0x15013346 7#define UDF_SUPER_MAGIC 0x15013346
6 8
7#define UDF_MAX_READ_VERSION 0x0201 9#define UDF_MAX_READ_VERSION 0x0250
8#define UDF_MAX_WRITE_VERSION 0x0201 10#define UDF_MAX_WRITE_VERSION 0x0201
9 11
10#define UDF_FLAG_USE_EXTENDED_FE 0 12#define UDF_FLAG_USE_EXTENDED_FE 0
@@ -38,6 +40,111 @@
38#define UDF_PART_FLAG_REWRITABLE 0x0040 40#define UDF_PART_FLAG_REWRITABLE 0x0040
39#define UDF_PART_FLAG_OVERWRITABLE 0x0080 41#define UDF_PART_FLAG_OVERWRITABLE 0x0080
40 42
43#define UDF_MAX_BLOCK_LOADED 8
44
45#define UDF_TYPE1_MAP15 0x1511U
46#define UDF_VIRTUAL_MAP15 0x1512U
47#define UDF_VIRTUAL_MAP20 0x2012U
48#define UDF_SPARABLE_MAP15 0x1522U
49#define UDF_METADATA_MAP25 0x2511U
50
51#pragma pack(1) /* XXX(hch): Why? This file just defines in-core structures */
52
53struct udf_meta_data {
54 __u32 s_meta_file_loc;
55 __u32 s_mirror_file_loc;
56 __u32 s_bitmap_file_loc;
57 __u32 s_alloc_unit_size;
58 __u16 s_align_unit_size;
59 __u8 s_dup_md_flag;
60 struct inode *s_metadata_fe;
61 struct inode *s_mirror_fe;
62 struct inode *s_bitmap_fe;
63};
64
65struct udf_sparing_data {
66 __u16 s_packet_len;
67 struct buffer_head *s_spar_map[4];
68};
69
70struct udf_virtual_data {
71 __u32 s_num_entries;
72 __u16 s_start_offset;
73};
74
75struct udf_bitmap {
76 __u32 s_extLength;
77 __u32 s_extPosition;
78 __u16 s_nr_groups;
79 struct buffer_head **s_block_bitmap;
80};
81
82struct udf_part_map {
83 union {
84 struct udf_bitmap *s_bitmap;
85 struct inode *s_table;
86 } s_uspace;
87 union {
88 struct udf_bitmap *s_bitmap;
89 struct inode *s_table;
90 } s_fspace;
91 __u32 s_partition_root;
92 __u32 s_partition_len;
93 __u16 s_partition_type;
94 __u16 s_partition_num;
95 union {
96 struct udf_sparing_data s_sparing;
97 struct udf_virtual_data s_virtual;
98 struct udf_meta_data s_metadata;
99 } s_type_specific;
100 __u32 (*s_partition_func)(struct super_block *, __u32, __u16, __u32);
101 __u16 s_volumeseqnum;
102 __u16 s_partition_flags;
103};
104
105#pragma pack()
106
107struct udf_sb_info {
108 struct udf_part_map *s_partmaps;
109 __u8 s_volume_ident[32];
110
111 /* Overall info */
112 __u16 s_partitions;
113 __u16 s_partition;
114
115 /* Sector headers */
116 __s32 s_session;
117 __u32 s_anchor[3];
118 __u32 s_last_block;
119
120 struct buffer_head *s_lvid_bh;
121
122 /* Default permissions */
123 mode_t s_umask;
124 gid_t s_gid;
125 uid_t s_uid;
126
127 /* Root Info */
128 struct timespec s_record_time;
129
130 /* Fileset Info */
131 __u16 s_serial_number;
132
133 /* highest UDF revision we have recorded to this media */
134 __u16 s_udfrev;
135
136 /* Miscellaneous flags */
137 __u32 s_flags;
138
139 /* Encoding info */
140 struct nls_table *s_nls_map;
141
142 /* VAT inode */
143 struct inode *s_vat_inode;
144
145 struct mutex s_alloc_mutex;
146};
147
41static inline struct udf_sb_info *UDF_SB(struct super_block *sb) 148static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
42{ 149{
43 return sb->s_fs_info; 150 return sb->s_fs_info;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 681dc2b66cdb..f3f45d029277 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -1,17 +1,37 @@
1#ifndef __UDF_DECL_H 1#ifndef __UDF_DECL_H
2#define __UDF_DECL_H 2#define __UDF_DECL_H
3 3
4#include <linux/udf_fs.h>
5#include "ecma_167.h" 4#include "ecma_167.h"
6#include "osta_udf.h" 5#include "osta_udf.h"
7 6
8#include <linux/fs.h> 7#include <linux/fs.h>
9#include <linux/types.h> 8#include <linux/types.h>
10#include <linux/udf_fs_i.h>
11#include <linux/udf_fs_sb.h>
12#include <linux/buffer_head.h> 9#include <linux/buffer_head.h>
10#include <linux/udf_fs_i.h>
13 11
12#include "udf_sb.h"
14#include "udfend.h" 13#include "udfend.h"
14#include "udf_i.h"
15
16#define UDF_PREALLOCATE
17#define UDF_DEFAULT_PREALLOC_BLOCKS 8
18
19#define UDFFS_DEBUG
20
21#ifdef UDFFS_DEBUG
22#define udf_debug(f, a...) \
23do { \
24 printk(KERN_DEBUG "UDF-fs DEBUG %s:%d:%s: ", \
25 __FILE__, __LINE__, __func__); \
26 printk(f, ##a); \
27} while (0)
28#else
29#define udf_debug(f, a...) /**/
30#endif
31
32#define udf_info(f, a...) \
33 printk(KERN_INFO "UDF-fs INFO " f, ##a);
34
15 35
16#define udf_fixed_to_variable(x) ( ( ( (x) >> 5 ) * 39 ) + ( (x) & 0x0000001F ) ) 36#define udf_fixed_to_variable(x) ( ( ( (x) >> 5 ) * 39 ) + ( (x) & 0x0000001F ) )
17#define udf_variable_to_fixed(x) ( ( ( (x) / 39 ) << 5 ) + ( (x) % 39 ) ) 37#define udf_variable_to_fixed(x) ( ( ( (x) / 39 ) << 5 ) + ( (x) % 39 ) )
@@ -23,16 +43,24 @@
23#define UDF_NAME_LEN 256 43#define UDF_NAME_LEN 256
24#define UDF_PATH_LEN 1023 44#define UDF_PATH_LEN 1023
25 45
26#define udf_file_entry_alloc_offset(inode)\ 46static inline size_t udf_file_entry_alloc_offset(struct inode *inode)
27 (UDF_I(inode)->i_use ?\ 47{
28 sizeof(struct unallocSpaceEntry) :\ 48 struct udf_inode_info *iinfo = UDF_I(inode);
29 ((UDF_I(inode)->i_efe ?\ 49 if (iinfo->i_use)
30 sizeof(struct extendedFileEntry) :\ 50 return sizeof(struct unallocSpaceEntry);
31 sizeof(struct fileEntry)) + UDF_I(inode)->i_lenEAttr)) 51 else if (iinfo->i_efe)
32 52 return sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr;
33#define udf_ext0_offset(inode)\ 53 else
34 (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ?\ 54 return sizeof(struct fileEntry) + iinfo->i_lenEAttr;
35 udf_file_entry_alloc_offset(inode) : 0) 55}
56
57static inline size_t udf_ext0_offset(struct inode *inode)
58{
59 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
60 return udf_file_entry_alloc_offset(inode);
61 else
62 return 0;
63}
36 64
37#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset)) 65#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset))
38 66
@@ -83,7 +111,6 @@ struct extent_position {
83}; 111};
84 112
85/* super.c */ 113/* super.c */
86extern void udf_error(struct super_block *, const char *, const char *, ...);
87extern void udf_warning(struct super_block *, const char *, const char *, ...); 114extern void udf_warning(struct super_block *, const char *, const char *, ...);
88 115
89/* namei.c */ 116/* namei.c */
@@ -150,6 +177,8 @@ extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t,
150 uint32_t); 177 uint32_t);
151extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t, 178extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t,
152 uint32_t); 179 uint32_t);
180extern uint32_t udf_get_pblock_meta25(struct super_block *, uint32_t, uint16_t,
181 uint32_t);
153extern int udf_relocate_blocks(struct super_block *, long, long *); 182extern int udf_relocate_blocks(struct super_block *, long, long *);
154 183
155/* unicode.c */ 184/* unicode.c */
@@ -157,7 +186,7 @@ extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
157extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *, 186extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
158 int); 187 int);
159extern int udf_build_ustr(struct ustr *, dstring *, int); 188extern int udf_build_ustr(struct ustr *, dstring *, int);
160extern int udf_CS0toUTF8(struct ustr *, struct ustr *); 189extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
161 190
162/* ialloc.c */ 191/* ialloc.c */
163extern void udf_free_inode(struct inode *); 192extern void udf_free_inode(struct inode *);
@@ -191,11 +220,9 @@ extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize,
191extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int); 220extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
192extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int); 221extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
193 222
194/* crc.c */
195extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t);
196
197/* udftime.c */ 223/* udftime.c */
198extern time_t *udf_stamp_to_time(time_t *, long *, kernel_timestamp); 224extern struct timespec *udf_disk_stamp_to_time(struct timespec *dest,
199extern kernel_timestamp *udf_time_to_stamp(kernel_timestamp *, struct timespec); 225 timestamp src);
226extern timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec src);
200 227
201#endif /* __UDF_DECL_H */ 228#endif /* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index c4bd1203f857..489f52fb428c 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -24,17 +24,6 @@ static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
24 return out; 24 return out;
25} 25}
26 26
27static inline kernel_timestamp lets_to_cpu(timestamp in)
28{
29 kernel_timestamp out;
30
31 memcpy(&out, &in, sizeof(timestamp));
32 out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone);
33 out.year = le16_to_cpu(in.year);
34
35 return out;
36}
37
38static inline short_ad lesa_to_cpu(short_ad in) 27static inline short_ad lesa_to_cpu(short_ad in)
39{ 28{
40 short_ad out; 29 short_ad out;
@@ -85,15 +74,4 @@ static inline kernel_extent_ad leea_to_cpu(extent_ad in)
85 return out; 74 return out;
86} 75}
87 76
88static inline timestamp cpu_to_lets(kernel_timestamp in)
89{
90 timestamp out;
91
92 memcpy(&out, &in, sizeof(timestamp));
93 out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone);
94 out.year = cpu_to_le16(in.year);
95
96 return out;
97}
98
99#endif /* __UDF_ENDIAN_H */ 77#endif /* __UDF_ENDIAN_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index ce595732ba6f..5f811655c9b5 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -85,39 +85,38 @@ extern struct timezone sys_tz;
85#define SECS_PER_HOUR (60 * 60) 85#define SECS_PER_HOUR (60 * 60)
86#define SECS_PER_DAY (SECS_PER_HOUR * 24) 86#define SECS_PER_DAY (SECS_PER_HOUR * 24)
87 87
88time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src) 88struct timespec *udf_disk_stamp_to_time(struct timespec *dest, timestamp src)
89{ 89{
90 int yday; 90 int yday;
91 uint8_t type = src.typeAndTimezone >> 12; 91 u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
92 u16 year = le16_to_cpu(src.year);
93 uint8_t type = typeAndTimezone >> 12;
92 int16_t offset; 94 int16_t offset;
93 95
94 if (type == 1) { 96 if (type == 1) {
95 offset = src.typeAndTimezone << 4; 97 offset = typeAndTimezone << 4;
96 /* sign extent offset */ 98 /* sign extent offset */
97 offset = (offset >> 4); 99 offset = (offset >> 4);
98 if (offset == -2047) /* unspecified offset */ 100 if (offset == -2047) /* unspecified offset */
99 offset = 0; 101 offset = 0;
100 } else { 102 } else
101 offset = 0; 103 offset = 0;
102 }
103 104
104 if ((src.year < EPOCH_YEAR) || 105 if ((year < EPOCH_YEAR) ||
105 (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) { 106 (year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
106 *dest = -1;
107 *dest_usec = -1;
108 return NULL; 107 return NULL;
109 } 108 }
110 *dest = year_seconds[src.year - EPOCH_YEAR]; 109 dest->tv_sec = year_seconds[year - EPOCH_YEAR];
111 *dest -= offset * 60; 110 dest->tv_sec -= offset * 60;
112 111
113 yday = ((__mon_yday[__isleap(src.year)][src.month - 1]) + src.day - 1); 112 yday = ((__mon_yday[__isleap(year)][src.month - 1]) + src.day - 1);
114 *dest += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second; 113 dest->tv_sec += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second;
115 *dest_usec = src.centiseconds * 10000 + 114 dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
116 src.hundredsOfMicroseconds * 100 + src.microseconds; 115 src.hundredsOfMicroseconds * 100 + src.microseconds);
117 return dest; 116 return dest;
118} 117}
119 118
120kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts) 119timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec ts)
121{ 120{
122 long int days, rem, y; 121 long int days, rem, y;
123 const unsigned short int *ip; 122 const unsigned short int *ip;
@@ -128,7 +127,7 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
128 if (!dest) 127 if (!dest)
129 return NULL; 128 return NULL;
130 129
131 dest->typeAndTimezone = 0x1000 | (offset & 0x0FFF); 130 dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF));
132 131
133 ts.tv_sec += offset * 60; 132 ts.tv_sec += offset * 60;
134 days = ts.tv_sec / SECS_PER_DAY; 133 days = ts.tv_sec / SECS_PER_DAY;
@@ -151,7 +150,7 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts)
151 - LEAPS_THRU_END_OF(y - 1)); 150 - LEAPS_THRU_END_OF(y - 1));
152 y = yg; 151 y = yg;
153 } 152 }
154 dest->year = y; 153 dest->year = cpu_to_le16(y);
155 ip = __mon_yday[__isleap(y)]; 154 ip = __mon_yday[__isleap(y)];
156 for (y = 11; days < (long int)ip[y]; --y) 155 for (y = 11; days < (long int)ip[y]; --y)
157 continue; 156 continue;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index e533b11703bf..9fdf8c93c58e 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -23,7 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/string.h> /* for memset */ 24#include <linux/string.h> /* for memset */
25#include <linux/nls.h> 25#include <linux/nls.h>
26#include <linux/udf_fs.h> 26#include <linux/crc-itu-t.h>
27 27
28#include "udf_sb.h" 28#include "udf_sb.h"
29 29
@@ -49,14 +49,16 @@ int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
49{ 49{
50 int usesize; 50 int usesize;
51 51
52 if ((!dest) || (!ptr) || (!size)) 52 if (!dest || !ptr || !size)
53 return -1; 53 return -1;
54 BUG_ON(size < 2);
54 55
55 memset(dest, 0, sizeof(struct ustr)); 56 usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
56 usesize = (size > UDF_NAME_LEN) ? UDF_NAME_LEN : size; 57 usesize = min(usesize, size - 2);
57 dest->u_cmpID = ptr[0]; 58 dest->u_cmpID = ptr[0];
58 dest->u_len = ptr[size - 1]; 59 dest->u_len = usesize;
59 memcpy(dest->u_name, ptr + 1, usesize - 1); 60 memcpy(dest->u_name, ptr + 1, usesize);
61 memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
60 62
61 return 0; 63 return 0;
62} 64}
@@ -83,9 +85,6 @@ static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
83 * PURPOSE 85 * PURPOSE
84 * Convert OSTA Compressed Unicode to the UTF-8 equivalent. 86 * Convert OSTA Compressed Unicode to the UTF-8 equivalent.
85 * 87 *
86 * DESCRIPTION
87 * This routine is only called by udf_filldir().
88 *
89 * PRE-CONDITIONS 88 * PRE-CONDITIONS
90 * utf Pointer to UTF-8 output buffer. 89 * utf Pointer to UTF-8 output buffer.
91 * ocu Pointer to OSTA Compressed Unicode input buffer 90 * ocu Pointer to OSTA Compressed Unicode input buffer
@@ -99,43 +98,39 @@ static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
99 * November 12, 1997 - Andrew E. Mileski 98 * November 12, 1997 - Andrew E. Mileski
100 * Written, tested, and released. 99 * Written, tested, and released.
101 */ 100 */
102int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i) 101int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
103{ 102{
104 uint8_t *ocu; 103 const uint8_t *ocu;
105 uint32_t c;
106 uint8_t cmp_id, ocu_len; 104 uint8_t cmp_id, ocu_len;
107 int i; 105 int i;
108 106
109 ocu = ocu_i->u_name;
110
111 ocu_len = ocu_i->u_len; 107 ocu_len = ocu_i->u_len;
112 cmp_id = ocu_i->u_cmpID;
113 utf_o->u_len = 0;
114
115 if (ocu_len == 0) { 108 if (ocu_len == 0) {
116 memset(utf_o, 0, sizeof(struct ustr)); 109 memset(utf_o, 0, sizeof(struct ustr));
117 utf_o->u_cmpID = 0;
118 utf_o->u_len = 0;
119 return 0; 110 return 0;
120 } 111 }
121 112
122 if ((cmp_id != 8) && (cmp_id != 16)) { 113 cmp_id = ocu_i->u_cmpID;
114 if (cmp_id != 8 && cmp_id != 16) {
115 memset(utf_o, 0, sizeof(struct ustr));
123 printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", 116 printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
124 cmp_id, ocu_i->u_name); 117 cmp_id, ocu_i->u_name);
125 return 0; 118 return 0;
126 } 119 }
127 120
121 ocu = ocu_i->u_name;
122 utf_o->u_len = 0;
128 for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) { 123 for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
129 124
130 /* Expand OSTA compressed Unicode to Unicode */ 125 /* Expand OSTA compressed Unicode to Unicode */
131 c = ocu[i++]; 126 uint32_t c = ocu[i++];
132 if (cmp_id == 16) 127 if (cmp_id == 16)
133 c = (c << 8) | ocu[i++]; 128 c = (c << 8) | ocu[i++];
134 129
135 /* Compress Unicode to UTF-8 */ 130 /* Compress Unicode to UTF-8 */
136 if (c < 0x80U) { 131 if (c < 0x80U)
137 utf_o->u_name[utf_o->u_len++] = (uint8_t)c; 132 utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
138 } else if (c < 0x800U) { 133 else if (c < 0x800U) {
139 utf_o->u_name[utf_o->u_len++] = 134 utf_o->u_name[utf_o->u_len++] =
140 (uint8_t)(0xc0 | (c >> 6)); 135 (uint8_t)(0xc0 | (c >> 6));
141 utf_o->u_name[utf_o->u_len++] = 136 utf_o->u_name[utf_o->u_len++] =
@@ -255,35 +250,32 @@ error_out:
255} 250}
256 251
257static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, 252static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
258 struct ustr *ocu_i) 253 const struct ustr *ocu_i)
259{ 254{
260 uint8_t *ocu; 255 const uint8_t *ocu;
261 uint32_t c;
262 uint8_t cmp_id, ocu_len; 256 uint8_t cmp_id, ocu_len;
263 int i; 257 int i;
264 258
265 ocu = ocu_i->u_name;
266 259
267 ocu_len = ocu_i->u_len; 260 ocu_len = ocu_i->u_len;
268 cmp_id = ocu_i->u_cmpID;
269 utf_o->u_len = 0;
270
271 if (ocu_len == 0) { 261 if (ocu_len == 0) {
272 memset(utf_o, 0, sizeof(struct ustr)); 262 memset(utf_o, 0, sizeof(struct ustr));
273 utf_o->u_cmpID = 0;
274 utf_o->u_len = 0;
275 return 0; 263 return 0;
276 } 264 }
277 265
278 if ((cmp_id != 8) && (cmp_id != 16)) { 266 cmp_id = ocu_i->u_cmpID;
267 if (cmp_id != 8 && cmp_id != 16) {
268 memset(utf_o, 0, sizeof(struct ustr));
279 printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n", 269 printk(KERN_ERR "udf: unknown compression code (%d) stri=%s\n",
280 cmp_id, ocu_i->u_name); 270 cmp_id, ocu_i->u_name);
281 return 0; 271 return 0;
282 } 272 }
283 273
274 ocu = ocu_i->u_name;
275 utf_o->u_len = 0;
284 for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) { 276 for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
285 /* Expand OSTA compressed Unicode to Unicode */ 277 /* Expand OSTA compressed Unicode to Unicode */
286 c = ocu[i++]; 278 uint32_t c = ocu[i++];
287 if (cmp_id == 16) 279 if (cmp_id == 16)
288 c = (c << 8) | ocu[i++]; 280 c = (c << 8) | ocu[i++];
289 281
@@ -463,7 +455,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
463 } else if (newIndex > 250) 455 } else if (newIndex > 250)
464 newIndex = 250; 456 newIndex = 250;
465 newName[newIndex++] = CRC_MARK; 457 newName[newIndex++] = CRC_MARK;
466 valueCRC = udf_crc(fidName, fidNameLen, 0); 458 valueCRC = crc_itu_t(0, fidName, fidNameLen);
467 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; 459 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
468 newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; 460 newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8];
469 newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; 461 newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4];
diff --git a/fs/xattr.c b/fs/xattr.c
index f7062da505d4..89a942f07e1b 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -307,7 +307,6 @@ sys_fsetxattr(int fd, char __user *name, void __user *value,
307 error = setxattr(dentry, name, value, size, flags); 307 error = setxattr(dentry, name, value, size, flags);
308 mnt_drop_write(f->f_path.mnt); 308 mnt_drop_write(f->f_path.mnt);
309 } 309 }
310out_fput:
311 fput(f); 310 fput(f);
312 return error; 311 return error;
313} 312}
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index f1663aa94a52..18a4321349a3 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
157struct ia64_mca_notify_die { 157struct ia64_mca_notify_die {
158 struct ia64_sal_os_state *sos; 158 struct ia64_sal_os_state *sos;
159 int *monarch_cpu; 159 int *monarch_cpu;
160 int *data;
160}; 161};
161 162
162DECLARE_PER_CPU(u64, ia64_mca_pal_base); 163DECLARE_PER_CPU(u64, ia64_mca_pal_base);
diff --git a/include/asm-sh/i2c-sh7760.h b/include/asm-sh/i2c-sh7760.h
new file mode 100644
index 000000000000..24182116711f
--- /dev/null
+++ b/include/asm-sh/i2c-sh7760.h
@@ -0,0 +1,22 @@
1/*
2 * MMIO/IRQ and platform data for SH7760 I2C channels
3 */
4
5#ifndef _I2C_SH7760_H_
6#define _I2C_SH7760_H_
7
8#define SH7760_I2C_DEVNAME "sh7760-i2c"
9
10#define SH7760_I2C0_MMIO 0xFE140000
11#define SH7760_I2C0_MMIOEND 0xFE14003B
12#define SH7760_I2C0_IRQ 62
13
14#define SH7760_I2C1_MMIO 0xFE150000
15#define SH7760_I2C1_MMIOEND 0xFE15003B
16#define SH7760_I2C1_IRQ 63
17
18struct sh7760_i2c_platdata {
19 unsigned int speed_khz;
20};
21
22#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b3d9ccde0c27..cbb5ccb27de3 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -100,7 +100,7 @@ header-y += ixjuser.h
100header-y += jffs2.h 100header-y += jffs2.h
101header-y += keyctl.h 101header-y += keyctl.h
102header-y += limits.h 102header-y += limits.h
103header-y += lock_dlm_plock.h 103header-y += dlm_plock.h
104header-y += magic.h 104header-y += magic.h
105header-y += major.h 105header-y += major.h
106header-y += matroxfb.h 106header-y += matroxfb.h
@@ -150,6 +150,7 @@ header-y += tiocl.h
150header-y += tipc.h 150header-y += tipc.h
151header-y += tipc_config.h 151header-y += tipc_config.h
152header-y += toshiba.h 152header-y += toshiba.h
153header-y += udf_fs_i.h
153header-y += ultrasound.h 154header-y += ultrasound.h
154header-y += un.h 155header-y += un.h
155header-y += utime.h 156header-y += utime.h
@@ -210,7 +211,9 @@ unifdef-y += hdlcdrv.h
210unifdef-y += hdlc.h 211unifdef-y += hdlc.h
211unifdef-y += hdreg.h 212unifdef-y += hdreg.h
212unifdef-y += hdsmart.h 213unifdef-y += hdsmart.h
214unifdef-y += hid.h
213unifdef-y += hiddev.h 215unifdef-y += hiddev.h
216unifdef-y += hidraw.h
214unifdef-y += hpet.h 217unifdef-y += hpet.h
215unifdef-y += i2c.h 218unifdef-y += i2c.h
216unifdef-y += i2c-dev.h 219unifdef-y += i2c-dev.h
@@ -334,7 +337,6 @@ unifdef-y += time.h
334unifdef-y += timex.h 337unifdef-y += timex.h
335unifdef-y += tty.h 338unifdef-y += tty.h
336unifdef-y += types.h 339unifdef-y += types.h
337unifdef-y += udf_fs_i.h
338unifdef-y += udp.h 340unifdef-y += udp.h
339unifdef-y += uinput.h 341unifdef-y += uinput.h
340unifdef-y += uio.h 342unifdef-y += uio.h
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6bd646096fa6..cfb1627ac51c 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -301,7 +301,9 @@ extern int d_validate(struct dentry *, struct dentry *);
301 */ 301 */
302extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 302extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
303 303
304extern char *__d_path(const struct path *path, struct path *root, char *, int);
304extern char *d_path(struct path *, char *, int); 305extern char *d_path(struct path *, char *, int);
306extern char *dentry_path(struct dentry *, char *, int);
305 307
306/* Allocation counts.. */ 308/* Allocation counts.. */
307 309
@@ -359,7 +361,6 @@ static inline int d_mountpoint(struct dentry *dentry)
359} 361}
360 362
361extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); 363extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *);
362extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
363extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); 364extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
364 365
365extern int sysctl_vfs_cache_pressure; 366extern int sysctl_vfs_cache_pressure;
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index c743fbc769db..203a025e30e5 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -21,10 +21,7 @@
21 21
22/* Lock levels and flags are here */ 22/* Lock levels and flags are here */
23#include <linux/dlmconstants.h> 23#include <linux/dlmconstants.h>
24 24#include <linux/types.h>
25
26#define DLM_RESNAME_MAXLEN 64
27
28 25
29typedef void dlm_lockspace_t; 26typedef void dlm_lockspace_t;
30 27
@@ -63,7 +60,7 @@ typedef void dlm_lockspace_t;
63 60
64struct dlm_lksb { 61struct dlm_lksb {
65 int sb_status; 62 int sb_status;
66 uint32_t sb_lkid; 63 __u32 sb_lkid;
67 char sb_flags; 64 char sb_flags;
68 char * sb_lvbptr; 65 char * sb_lvbptr;
69}; 66};
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
index 9642277a152a..c6034508fed9 100644
--- a/include/linux/dlm_device.h
+++ b/include/linux/dlm_device.h
@@ -11,10 +11,16 @@
11******************************************************************************* 11*******************************************************************************
12******************************************************************************/ 12******************************************************************************/
13 13
14#ifndef _LINUX_DLM_DEVICE_H
15#define _LINUX_DLM_DEVICE_H
16
14/* This is the device interface for dlm, most users will use a library 17/* This is the device interface for dlm, most users will use a library
15 * interface. 18 * interface.
16 */ 19 */
17 20
21#include <linux/dlm.h>
22#include <linux/types.h>
23
18#define DLM_USER_LVB_LEN 32 24#define DLM_USER_LVB_LEN 32
19 25
20/* Version of the device interface */ 26/* Version of the device interface */
@@ -94,10 +100,9 @@ struct dlm_lock_result {
94#define DLM_USER_PURGE 6 100#define DLM_USER_PURGE 6
95#define DLM_USER_DEADLOCK 7 101#define DLM_USER_DEADLOCK 7
96 102
97/* Arbitrary length restriction */
98#define MAX_LS_NAME_LEN 64
99
100/* Lockspace flags */ 103/* Lockspace flags */
101#define DLM_USER_LSFLG_AUTOFREE 1 104#define DLM_USER_LSFLG_AUTOFREE 1
102#define DLM_USER_LSFLG_FORCEFREE 2 105#define DLM_USER_LSFLG_FORCEFREE 2
103 106
107#endif
108
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
new file mode 100644
index 000000000000..18d5fdbceb74
--- /dev/null
+++ b/include/linux/dlm_plock.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#ifndef __DLM_PLOCK_DOT_H__
10#define __DLM_PLOCK_DOT_H__
11
12#define DLM_PLOCK_MISC_NAME "dlm_plock"
13
14#define DLM_PLOCK_VERSION_MAJOR 1
15#define DLM_PLOCK_VERSION_MINOR 1
16#define DLM_PLOCK_VERSION_PATCH 0
17
18enum {
19 DLM_PLOCK_OP_LOCK = 1,
20 DLM_PLOCK_OP_UNLOCK,
21 DLM_PLOCK_OP_GET,
22};
23
24struct dlm_plock_info {
25 __u32 version[3];
26 __u8 optype;
27 __u8 ex;
28 __u8 wait;
29 __u8 pad;
30 __u32 pid;
31 __s32 nodeid;
32 __s32 rv;
33 __u32 fsid;
34 __u64 number;
35 __u64 start;
36 __u64 end;
37 __u64 owner;
38};
39
40#ifdef __KERNEL__
41int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
42 int cmd, struct file_lock *fl);
43int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
44 struct file_lock *fl);
45int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
46 struct file_lock *fl);
47#endif /* __KERNEL__ */
48
49#endif
50
diff --git a/include/linux/dlmconstants.h b/include/linux/dlmconstants.h
index fddb3d3ff321..47bf08dc7566 100644
--- a/include/linux/dlmconstants.h
+++ b/include/linux/dlmconstants.h
@@ -18,6 +18,10 @@
18 * Constants used by DLM interface. 18 * Constants used by DLM interface.
19 */ 19 */
20 20
21#define DLM_LOCKSPACE_LEN 64
22#define DLM_RESNAME_MAXLEN 64
23
24
21/* 25/*
22 * Lock Modes 26 * Lock Modes
23 */ 27 */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0c609e71c379..cc2be2cf7d41 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -305,7 +305,6 @@ struct vfsmount;
305 305
306extern void __init inode_init(void); 306extern void __init inode_init(void);
307extern void __init inode_init_early(void); 307extern void __init inode_init_early(void);
308extern void __init mnt_init(void);
309extern void __init files_init(unsigned long); 308extern void __init files_init(unsigned long);
310 309
311struct buffer_head; 310struct buffer_head;
@@ -1536,12 +1535,7 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
1536#define kern_mount(type) kern_mount_data(type, NULL) 1535#define kern_mount(type) kern_mount_data(type, NULL)
1537extern int may_umount_tree(struct vfsmount *); 1536extern int may_umount_tree(struct vfsmount *);
1538extern int may_umount(struct vfsmount *); 1537extern int may_umount(struct vfsmount *);
1539extern void umount_tree(struct vfsmount *, int, struct list_head *);
1540extern void release_mounts(struct list_head *);
1541extern long do_mount(char *, char *, char *, unsigned long, void *); 1538extern long do_mount(char *, char *, char *, unsigned long, void *);
1542extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
1543extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
1544 struct vfsmount *);
1545extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); 1539extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *);
1546extern void drop_collected_mounts(struct vfsmount *); 1540extern void drop_collected_mounts(struct vfsmount *);
1547 1541
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 74ff57596eb1..d951ec411241 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -284,6 +284,7 @@ struct hid_item {
284#define HID_QUIRK_2WHEEL_MOUSE_HACK_B8 0x02000000 284#define HID_QUIRK_2WHEEL_MOUSE_HACK_B8 0x02000000
285#define HID_QUIRK_HWHEEL_WHEEL_INVERT 0x04000000 285#define HID_QUIRK_HWHEEL_WHEEL_INVERT 0x04000000
286#define HID_QUIRK_MICROSOFT_KEYS 0x08000000 286#define HID_QUIRK_MICROSOFT_KEYS 0x08000000
287#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
287 288
288/* 289/*
289 * Separate quirks for runtime report descriptor fixup 290 * Separate quirks for runtime report descriptor fixup
@@ -296,6 +297,8 @@ struct hid_item {
296#define HID_QUIRK_RDESC_MACBOOK_JIS 0x00000010 297#define HID_QUIRK_RDESC_MACBOOK_JIS 0x00000010
297#define HID_QUIRK_RDESC_BUTTON_CONSUMER 0x00000020 298#define HID_QUIRK_RDESC_BUTTON_CONSUMER 0x00000020
298#define HID_QUIRK_RDESC_SAMSUNG_REMOTE 0x00000040 299#define HID_QUIRK_RDESC_SAMSUNG_REMOTE 0x00000040
300#define HID_QUIRK_RDESC_MICROSOFT_RECV_1028 0x00000080
301#define HID_QUIRK_RDESC_SUNPLUS_WDESKTOP 0x00000100
299 302
300/* 303/*
301 * This is the global environment of the parser. This information is 304 * This is the global environment of the parser. This information is
@@ -320,7 +323,7 @@ struct hid_global {
320 * This is the local environment. It is persistent up the next main-item. 323 * This is the local environment. It is persistent up the next main-item.
321 */ 324 */
322 325
323#define HID_MAX_USAGES 8192 326#define HID_MAX_USAGES 12288
324#define HID_DEFAULT_NUM_COLLECTIONS 16 327#define HID_DEFAULT_NUM_COLLECTIONS 16
325 328
326struct hid_local { 329struct hid_local {
@@ -421,6 +424,7 @@ struct hid_control_fifo {
421#define HID_RESET_PENDING 4 424#define HID_RESET_PENDING 4
422#define HID_SUSPENDED 5 425#define HID_SUSPENDED 5
423#define HID_CLEAR_HALT 6 426#define HID_CLEAR_HALT 6
427#define HID_DISCONNECTED 7
424 428
425struct hid_input { 429struct hid_input {
426 struct list_head list; 430 struct list_head list;
@@ -452,8 +456,6 @@ struct hid_device { /* device report descriptor */
452 void *hidraw; 456 void *hidraw;
453 int minor; /* Hiddev minor number */ 457 int minor; /* Hiddev minor number */
454 458
455 wait_queue_head_t wait; /* For sleeping */
456
457 int open; /* is the device open by anyone? */ 459 int open; /* is the device open by anyone? */
458 char name[128]; /* Device name */ 460 char name[128]; /* Device name */
459 char phys[64]; /* Device physical location */ 461 char phys[64]; /* Device physical location */
@@ -530,14 +532,12 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
530int hidinput_mapping_quirks(struct hid_usage *, struct input_dev *, unsigned long **, int *); 532int hidinput_mapping_quirks(struct hid_usage *, struct input_dev *, unsigned long **, int *);
531int hidinput_event_quirks(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); 533int hidinput_event_quirks(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
532int hidinput_apple_event(struct hid_device *, struct input_dev *, struct hid_usage *, __s32); 534int hidinput_apple_event(struct hid_device *, struct input_dev *, struct hid_usage *, __s32);
533void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt);
534void hid_output_report(struct hid_report *report, __u8 *data); 535void hid_output_report(struct hid_report *report, __u8 *data);
535void hid_free_device(struct hid_device *device); 536void hid_free_device(struct hid_device *device);
536struct hid_device *hid_parse_report(__u8 *start, unsigned size); 537struct hid_device *hid_parse_report(__u8 *start, unsigned size);
537 538
538/* HID quirks API */ 539/* HID quirks API */
539u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); 540u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
540int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, const u32 quirks);
541int usbhid_quirks_init(char **quirks_param); 541int usbhid_quirks_init(char **quirks_param);
542void usbhid_quirks_exit(void); 542void usbhid_quirks_exit(void);
543void usbhid_fixup_report_descriptor(const u16, const u16, char *, unsigned, char **); 543void usbhid_fixup_report_descriptor(const u16, const u16, char *, unsigned, char **);
@@ -546,6 +546,7 @@ void usbhid_fixup_report_descriptor(const u16, const u16, char *, unsigned, char
546int hid_ff_init(struct hid_device *hid); 546int hid_ff_init(struct hid_device *hid);
547 547
548int hid_lgff_init(struct hid_device *hid); 548int hid_lgff_init(struct hid_device *hid);
549int hid_lg2ff_init(struct hid_device *hid);
549int hid_plff_init(struct hid_device *hid); 550int hid_plff_init(struct hid_device *hid);
550int hid_tmff_init(struct hid_device *hid); 551int hid_tmff_init(struct hid_device *hid);
551int hid_zpff_init(struct hid_device *hid); 552int hid_zpff_init(struct hid_device *hid);
@@ -566,7 +567,11 @@ static inline int hid_ff_init(struct hid_device *hid) { return -1; }
566#define dbg_hid_line(format, arg...) if (hid_debug) \ 567#define dbg_hid_line(format, arg...) if (hid_debug) \
567 printk(format, ## arg) 568 printk(format, ## arg)
568#else 569#else
569#define dbg_hid(format, arg...) do {} while (0) 570static inline int __attribute__((format(printf, 1, 2)))
571dbg_hid(const char *fmt, ...)
572{
573 return 0;
574}
570#define dbg_hid_line dbg_hid 575#define dbg_hid_line dbg_hid
571#endif 576#endif
572 577
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index 0536f299f7ff..dbb5c8c374f0 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/hid.h> 18#include <linux/hid.h>
19#include <linux/types.h>
19 20
20struct hidraw_report_descriptor { 21struct hidraw_report_descriptor {
21 __u32 size; 22 __u32 size;
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index fce47c051bb1..adcb3dc7ac26 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -1,14 +1,41 @@
1#ifndef _LINUX_I2C_ALGO_PCA_H 1#ifndef _LINUX_I2C_ALGO_PCA_H
2#define _LINUX_I2C_ALGO_PCA_H 2#define _LINUX_I2C_ALGO_PCA_H
3 3
4/* Clock speeds for the bus */
5#define I2C_PCA_CON_330kHz 0x00
6#define I2C_PCA_CON_288kHz 0x01
7#define I2C_PCA_CON_217kHz 0x02
8#define I2C_PCA_CON_146kHz 0x03
9#define I2C_PCA_CON_88kHz 0x04
10#define I2C_PCA_CON_59kHz 0x05
11#define I2C_PCA_CON_44kHz 0x06
12#define I2C_PCA_CON_36kHz 0x07
13
14/* PCA9564 registers */
15#define I2C_PCA_STA 0x00 /* STATUS Read Only */
16#define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */
17#define I2C_PCA_DAT 0x01 /* DATA Read/Write */
18#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */
19#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */
20
21#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */
22#define I2C_PCA_CON_ENSIO 0x40 /* Enable */
23#define I2C_PCA_CON_STA 0x20 /* Start */
24#define I2C_PCA_CON_STO 0x10 /* Stop */
25#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
26#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
27
4struct i2c_algo_pca_data { 28struct i2c_algo_pca_data {
5 int (*get_own) (struct i2c_algo_pca_data *adap); /* Obtain own address */ 29 void *data; /* private low level data */
6 int (*get_clock) (struct i2c_algo_pca_data *adap); 30 void (*write_byte) (void *data, int reg, int val);
7 void (*write_byte) (struct i2c_algo_pca_data *adap, int reg, int val); 31 int (*read_byte) (void *data, int reg);
8 int (*read_byte) (struct i2c_algo_pca_data *adap, int reg); 32 int (*wait_for_completion) (void *data);
9 int (*wait_for_interrupt) (struct i2c_algo_pca_data *adap); 33 void (*reset_chip) (void *data);
34 /* i2c_clock values are defined in linux/i2c-algo-pca.h */
35 unsigned int i2c_clock;
10}; 36};
11 37
12int i2c_pca_add_bus(struct i2c_adapter *); 38int i2c_pca_add_bus(struct i2c_adapter *);
39int i2c_pca_add_numbered_bus(struct i2c_adapter *);
13 40
14#endif /* _LINUX_I2C_ALGO_PCA_H */ 41#endif /* _LINUX_I2C_ALGO_PCA_H */
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h
new file mode 100644
index 000000000000..3d191873f2d1
--- /dev/null
+++ b/include/linux/i2c-pca-platform.h
@@ -0,0 +1,12 @@
1#ifndef I2C_PCA9564_PLATFORM_H
2#define I2C_PCA9564_PLATFORM_H
3
4struct i2c_pca9564_pf_platform_data {
5 int gpio; /* pin to reset chip. driver will work when
6 * not supplied (negative value), but it
7 * cannot exit some error conditions then */
8 int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
9 int timeout; /* timeout = this value * 10us */
10};
11
12#endif /* I2C_PCA9564_PLATFORM_H */
diff --git a/include/linux/lock_dlm_plock.h b/include/linux/lock_dlm_plock.h
deleted file mode 100644
index fc3415113973..000000000000
--- a/include/linux/lock_dlm_plock.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#ifndef __LOCK_DLM_PLOCK_DOT_H__
10#define __LOCK_DLM_PLOCK_DOT_H__
11
12#define GDLM_PLOCK_MISC_NAME "lock_dlm_plock"
13
14#define GDLM_PLOCK_VERSION_MAJOR 1
15#define GDLM_PLOCK_VERSION_MINOR 1
16#define GDLM_PLOCK_VERSION_PATCH 0
17
18enum {
19 GDLM_PLOCK_OP_LOCK = 1,
20 GDLM_PLOCK_OP_UNLOCK,
21 GDLM_PLOCK_OP_GET,
22};
23
24struct gdlm_plock_info {
25 __u32 version[3];
26 __u8 optype;
27 __u8 ex;
28 __u8 wait;
29 __u8 pad;
30 __u32 pid;
31 __s32 nodeid;
32 __s32 rv;
33 __u32 fsid;
34 __u64 number;
35 __u64 start;
36 __u64 end;
37 __u64 owner;
38};
39
40#endif
41
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 8eed44f8ca73..830bbcd449d6 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -5,6 +5,7 @@
5#include <linux/mount.h> 5#include <linux/mount.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/nsproxy.h> 7#include <linux/nsproxy.h>
8#include <linux/seq_file.h>
8 9
9struct mnt_namespace { 10struct mnt_namespace {
10 atomic_t count; 11 atomic_t count;
@@ -14,6 +15,13 @@ struct mnt_namespace {
14 int event; 15 int event;
15}; 16};
16 17
18struct proc_mounts {
19 struct seq_file m; /* must be the first element */
20 struct mnt_namespace *ns;
21 struct path root;
22 int event;
23};
24
17extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, 25extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
18 struct fs_struct *); 26 struct fs_struct *);
19extern void __put_mnt_ns(struct mnt_namespace *ns); 27extern void __put_mnt_ns(struct mnt_namespace *ns);
@@ -37,5 +45,9 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
37 atomic_inc(&ns->count); 45 atomic_inc(&ns->count);
38} 46}
39 47
48extern const struct seq_operations mounts_op;
49extern const struct seq_operations mountinfo_op;
50extern const struct seq_operations mountstats_op;
51
40#endif 52#endif
41#endif 53#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index d6600e3f7e45..b4836d58f428 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -56,6 +56,8 @@ struct vfsmount {
56 struct list_head mnt_slave; /* slave list entry */ 56 struct list_head mnt_slave; /* slave list entry */
57 struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */ 57 struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */
58 struct mnt_namespace *mnt_ns; /* containing namespace */ 58 struct mnt_namespace *mnt_ns; /* containing namespace */
59 int mnt_id; /* mount identifier */
60 int mnt_group_id; /* peer group identifier */
59 /* 61 /*
60 * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount 62 * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount
61 * to let these frequently modified fields in a separate cache line 63 * to let these frequently modified fields in a separate cache line
@@ -94,8 +96,6 @@ static inline void mntput(struct vfsmount *mnt)
94 } 96 }
95} 97}
96 98
97extern void free_vfsmnt(struct vfsmount *mnt);
98extern struct vfsmount *alloc_vfsmnt(const char *name);
99extern struct vfsmount *do_kern_mount(const char *fstype, int flags, 99extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
100 const char *name, void *data); 100 const char *name, void *data);
101 101
diff --git a/include/linux/security.h b/include/linux/security.h
index fea1f4aa4dd5..53a34539382a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -230,7 +230,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
230 * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the 230 * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
231 * pathname of the object being mounted. 231 * pathname of the object being mounted.
232 * @dev_name contains the name for object being mounted. 232 * @dev_name contains the name for object being mounted.
233 * @nd contains the nameidata structure for mount point object. 233 * @path contains the path for mount point object.
234 * @type contains the filesystem type. 234 * @type contains the filesystem type.
235 * @flags contains the mount flags. 235 * @flags contains the mount flags.
236 * @data contains the filesystem-specific data. 236 * @data contains the filesystem-specific data.
@@ -249,7 +249,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
249 * Check permission before the device with superblock @mnt->sb is mounted 249 * Check permission before the device with superblock @mnt->sb is mounted
250 * on the mount point named by @nd. 250 * on the mount point named by @nd.
251 * @mnt contains the vfsmount for device being mounted. 251 * @mnt contains the vfsmount for device being mounted.
252 * @nd contains the nameidata object for the mount point. 252 * @path contains the path for the mount point.
253 * Return 0 if permission is granted. 253 * Return 0 if permission is granted.
254 * @sb_umount: 254 * @sb_umount:
255 * Check permission before the @mnt file system is unmounted. 255 * Check permission before the @mnt file system is unmounted.
@@ -278,16 +278,16 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
278 * This hook is called any time a mount is successfully grafetd to 278 * This hook is called any time a mount is successfully grafetd to
279 * the tree. 279 * the tree.
280 * @mnt contains the mounted filesystem. 280 * @mnt contains the mounted filesystem.
281 * @mountpoint_nd contains the nameidata structure for the mount point. 281 * @mountpoint contains the path for the mount point.
282 * @sb_pivotroot: 282 * @sb_pivotroot:
283 * Check permission before pivoting the root filesystem. 283 * Check permission before pivoting the root filesystem.
284 * @old_nd contains the nameidata structure for the new location of the current root (put_old). 284 * @old_path contains the path for the new location of the current root (put_old).
285 * @new_nd contains the nameidata structure for the new root (new_root). 285 * @new_path contains the path for the new root (new_root).
286 * Return 0 if permission is granted. 286 * Return 0 if permission is granted.
287 * @sb_post_pivotroot: 287 * @sb_post_pivotroot:
288 * Update module state after a successful pivot. 288 * Update module state after a successful pivot.
289 * @old_nd contains the nameidata structure for the old root. 289 * @old_path contains the path for the old root.
290 * @new_nd contains the nameidata structure for the new root. 290 * @new_path contains the path for the new root.
291 * @sb_get_mnt_opts: 291 * @sb_get_mnt_opts:
292 * Get the security relevant mount options used for a superblock 292 * Get the security relevant mount options used for a superblock
293 * @sb the superblock to get security mount options from 293 * @sb the superblock to get security mount options from
@@ -1315,20 +1315,20 @@ struct security_operations {
1315 int (*sb_copy_data)(char *orig, char *copy); 1315 int (*sb_copy_data)(char *orig, char *copy);
1316 int (*sb_kern_mount) (struct super_block *sb, void *data); 1316 int (*sb_kern_mount) (struct super_block *sb, void *data);
1317 int (*sb_statfs) (struct dentry *dentry); 1317 int (*sb_statfs) (struct dentry *dentry);
1318 int (*sb_mount) (char *dev_name, struct nameidata * nd, 1318 int (*sb_mount) (char *dev_name, struct path *path,
1319 char *type, unsigned long flags, void *data); 1319 char *type, unsigned long flags, void *data);
1320 int (*sb_check_sb) (struct vfsmount * mnt, struct nameidata * nd); 1320 int (*sb_check_sb) (struct vfsmount * mnt, struct path *path);
1321 int (*sb_umount) (struct vfsmount * mnt, int flags); 1321 int (*sb_umount) (struct vfsmount * mnt, int flags);
1322 void (*sb_umount_close) (struct vfsmount * mnt); 1322 void (*sb_umount_close) (struct vfsmount * mnt);
1323 void (*sb_umount_busy) (struct vfsmount * mnt); 1323 void (*sb_umount_busy) (struct vfsmount * mnt);
1324 void (*sb_post_remount) (struct vfsmount * mnt, 1324 void (*sb_post_remount) (struct vfsmount * mnt,
1325 unsigned long flags, void *data); 1325 unsigned long flags, void *data);
1326 void (*sb_post_addmount) (struct vfsmount * mnt, 1326 void (*sb_post_addmount) (struct vfsmount * mnt,
1327 struct nameidata * mountpoint_nd); 1327 struct path *mountpoint);
1328 int (*sb_pivotroot) (struct nameidata * old_nd, 1328 int (*sb_pivotroot) (struct path *old_path,
1329 struct nameidata * new_nd); 1329 struct path *new_path);
1330 void (*sb_post_pivotroot) (struct nameidata * old_nd, 1330 void (*sb_post_pivotroot) (struct path *old_path,
1331 struct nameidata * new_nd); 1331 struct path *new_path);
1332 int (*sb_get_mnt_opts) (const struct super_block *sb, 1332 int (*sb_get_mnt_opts) (const struct super_block *sb,
1333 struct security_mnt_opts *opts); 1333 struct security_mnt_opts *opts);
1334 int (*sb_set_mnt_opts) (struct super_block *sb, 1334 int (*sb_set_mnt_opts) (struct super_block *sb,
@@ -1593,16 +1593,16 @@ void security_sb_free(struct super_block *sb);
1593int security_sb_copy_data(char *orig, char *copy); 1593int security_sb_copy_data(char *orig, char *copy);
1594int security_sb_kern_mount(struct super_block *sb, void *data); 1594int security_sb_kern_mount(struct super_block *sb, void *data);
1595int security_sb_statfs(struct dentry *dentry); 1595int security_sb_statfs(struct dentry *dentry);
1596int security_sb_mount(char *dev_name, struct nameidata *nd, 1596int security_sb_mount(char *dev_name, struct path *path,
1597 char *type, unsigned long flags, void *data); 1597 char *type, unsigned long flags, void *data);
1598int security_sb_check_sb(struct vfsmount *mnt, struct nameidata *nd); 1598int security_sb_check_sb(struct vfsmount *mnt, struct path *path);
1599int security_sb_umount(struct vfsmount *mnt, int flags); 1599int security_sb_umount(struct vfsmount *mnt, int flags);
1600void security_sb_umount_close(struct vfsmount *mnt); 1600void security_sb_umount_close(struct vfsmount *mnt);
1601void security_sb_umount_busy(struct vfsmount *mnt); 1601void security_sb_umount_busy(struct vfsmount *mnt);
1602void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data); 1602void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data);
1603void security_sb_post_addmount(struct vfsmount *mnt, struct nameidata *mountpoint_nd); 1603void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint);
1604int security_sb_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); 1604int security_sb_pivotroot(struct path *old_path, struct path *new_path);
1605void security_sb_post_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); 1605void security_sb_post_pivotroot(struct path *old_path, struct path *new_path);
1606int security_sb_get_mnt_opts(const struct super_block *sb, 1606int security_sb_get_mnt_opts(const struct super_block *sb,
1607 struct security_mnt_opts *opts); 1607 struct security_mnt_opts *opts);
1608int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts); 1608int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts);
@@ -1872,7 +1872,7 @@ static inline int security_sb_statfs (struct dentry *dentry)
1872 return 0; 1872 return 0;
1873} 1873}
1874 1874
1875static inline int security_sb_mount (char *dev_name, struct nameidata *nd, 1875static inline int security_sb_mount (char *dev_name, struct path *path,
1876 char *type, unsigned long flags, 1876 char *type, unsigned long flags,
1877 void *data) 1877 void *data)
1878{ 1878{
@@ -1880,7 +1880,7 @@ static inline int security_sb_mount (char *dev_name, struct nameidata *nd,
1880} 1880}
1881 1881
1882static inline int security_sb_check_sb (struct vfsmount *mnt, 1882static inline int security_sb_check_sb (struct vfsmount *mnt,
1883 struct nameidata *nd) 1883 struct path *path)
1884{ 1884{
1885 return 0; 1885 return 0;
1886} 1886}
@@ -1901,17 +1901,17 @@ static inline void security_sb_post_remount (struct vfsmount *mnt,
1901{ } 1901{ }
1902 1902
1903static inline void security_sb_post_addmount (struct vfsmount *mnt, 1903static inline void security_sb_post_addmount (struct vfsmount *mnt,
1904 struct nameidata *mountpoint_nd) 1904 struct path *mountpoint)
1905{ } 1905{ }
1906 1906
1907static inline int security_sb_pivotroot (struct nameidata *old_nd, 1907static inline int security_sb_pivotroot (struct path *old_path,
1908 struct nameidata *new_nd) 1908 struct path *new_path)
1909{ 1909{
1910 return 0; 1910 return 0;
1911} 1911}
1912 1912
1913static inline void security_sb_post_pivotroot (struct nameidata *old_nd, 1913static inline void security_sb_post_pivotroot (struct path *old_path,
1914 struct nameidata *new_nd) 1914 struct path *new_path)
1915{ } 1915{ }
1916static inline int security_sb_get_mnt_opts(const struct super_block *sb, 1916static inline int security_sb_get_mnt_opts(const struct super_block *sb,
1917 struct security_mnt_opts *opts) 1917 struct security_mnt_opts *opts)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 1da1e6208a0a..5b5369c3c209 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -10,6 +10,7 @@ struct seq_operations;
10struct file; 10struct file;
11struct path; 11struct path;
12struct inode; 12struct inode;
13struct dentry;
13 14
14struct seq_file { 15struct seq_file {
15 char *buf; 16 char *buf;
@@ -30,6 +31,8 @@ struct seq_operations {
30 int (*show) (struct seq_file *m, void *v); 31 int (*show) (struct seq_file *m, void *v);
31}; 32};
32 33
34#define SEQ_SKIP 1
35
33int seq_open(struct file *, const struct seq_operations *); 36int seq_open(struct file *, const struct seq_operations *);
34ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 37ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
35loff_t seq_lseek(struct file *, loff_t, int); 38loff_t seq_lseek(struct file *, loff_t, int);
@@ -42,6 +45,9 @@ int seq_printf(struct seq_file *, const char *, ...)
42 __attribute__ ((format (printf,2,3))); 45 __attribute__ ((format (printf,2,3)));
43 46
44int seq_path(struct seq_file *, struct path *, char *); 47int seq_path(struct seq_file *, struct path *, char *);
48int seq_dentry(struct seq_file *, struct dentry *, char *);
49int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
50 char *esc);
45 51
46int single_open(struct file *, int (*)(struct seq_file *, void *), void *); 52int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
47int single_release(struct inode *, struct file *); 53int single_release(struct inode *, struct file *);
diff --git a/include/linux/udf_fs.h b/include/linux/udf_fs.h
deleted file mode 100644
index aa88654eb76b..000000000000
--- a/include/linux/udf_fs.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * udf_fs.h
3 *
4 * PURPOSE
5 * Included by fs/filesystems.c
6 *
7 * DESCRIPTION
8 * OSTA-UDF(tm) = Optical Storage Technology Association
9 * Universal Disk Format.
10 *
11 * This code is based on version 2.50 of the UDF specification,
12 * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
13 * http://www.osta.org/ * http://www.ecma.ch/
14 * http://www.iso.org/
15 *
16 * COPYRIGHT
17 * This file is distributed under the terms of the GNU General Public
18 * License (GPL). Copies of the GPL can be obtained from:
19 * ftp://prep.ai.mit.edu/pub/gnu/GPL
20 * Each contributing author retains all rights to their own work.
21 *
22 * (C) 1999-2004 Ben Fennema
23 * (C) 1999-2000 Stelias Computing Inc
24 *
25 * HISTORY
26 *
27 */
28
29#ifndef _UDF_FS_H
30#define _UDF_FS_H 1
31
32#define UDF_PREALLOCATE
33#define UDF_DEFAULT_PREALLOC_BLOCKS 8
34
35#undef UDFFS_DEBUG
36
37#ifdef UDFFS_DEBUG
38#define udf_debug(f, a...) \
39 do { \
40 printk (KERN_DEBUG "UDF-fs DEBUG %s:%d:%s: ", \
41 __FILE__, __LINE__, __FUNCTION__); \
42 printk (f, ##a); \
43 } while (0)
44#else
45#define udf_debug(f, a...) /**/
46#endif
47
48#define udf_info(f, a...) \
49 printk (KERN_INFO "UDF-fs INFO " f, ##a);
50
51#endif /* _UDF_FS_H */
diff --git a/include/linux/udf_fs_i.h b/include/linux/udf_fs_i.h
index ffaf05679ffb..3536965913b0 100644
--- a/include/linux/udf_fs_i.h
+++ b/include/linux/udf_fs_i.h
@@ -9,41 +9,10 @@
9 * ftp://prep.ai.mit.edu/pub/gnu/GPL 9 * ftp://prep.ai.mit.edu/pub/gnu/GPL
10 * Each contributing author retains all rights to their own work. 10 * Each contributing author retains all rights to their own work.
11 */ 11 */
12
13#ifndef _UDF_FS_I_H 12#ifndef _UDF_FS_I_H
14#define _UDF_FS_I_H 1 13#define _UDF_FS_I_H 1
15 14
16#ifdef __KERNEL__
17
18struct udf_inode_info
19{
20 struct timespec i_crtime;
21 /* Physical address of inode */
22 kernel_lb_addr i_location;
23 __u64 i_unique;
24 __u32 i_lenEAttr;
25 __u32 i_lenAlloc;
26 __u64 i_lenExtents;
27 __u32 i_next_alloc_block;
28 __u32 i_next_alloc_goal;
29 unsigned i_alloc_type : 3;
30 unsigned i_efe : 1;
31 unsigned i_use : 1;
32 unsigned i_strat4096 : 1;
33 unsigned reserved : 26;
34 union
35 {
36 short_ad *i_sad;
37 long_ad *i_lad;
38 __u8 *i_data;
39 } i_ext;
40 struct inode vfs_inode;
41};
42
43#endif
44
45/* exported IOCTLs, we have 'l', 0x40-0x7f */ 15/* exported IOCTLs, we have 'l', 0x40-0x7f */
46
47#define UDF_GETEASIZE _IOR('l', 0x40, int) 16#define UDF_GETEASIZE _IOR('l', 0x40, int)
48#define UDF_GETEABLOCK _IOR('l', 0x41, void *) 17#define UDF_GETEABLOCK _IOR('l', 0x41, void *)
49#define UDF_GETVOLIDENT _IOR('l', 0x42, void *) 18#define UDF_GETVOLIDENT _IOR('l', 0x42, void *)
diff --git a/include/linux/udf_fs_sb.h b/include/linux/udf_fs_sb.h
deleted file mode 100644
index 9bc47352b6b4..000000000000
--- a/include/linux/udf_fs_sb.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * udf_fs_sb.h
3 *
4 * This include file is for the Linux kernel/module.
5 *
6 * COPYRIGHT
7 * This file is distributed under the terms of the GNU General Public
8 * License (GPL). Copies of the GPL can be obtained from:
9 * ftp://prep.ai.mit.edu/pub/gnu/GPL
10 * Each contributing author retains all rights to their own work.
11 */
12
13#ifndef _UDF_FS_SB_H
14#define _UDF_FS_SB_H 1
15
16#include <linux/mutex.h>
17
18#pragma pack(1)
19
20#define UDF_MAX_BLOCK_LOADED 8
21
22#define UDF_TYPE1_MAP15 0x1511U
23#define UDF_VIRTUAL_MAP15 0x1512U
24#define UDF_VIRTUAL_MAP20 0x2012U
25#define UDF_SPARABLE_MAP15 0x1522U
26
27struct udf_sparing_data
28{
29 __u16 s_packet_len;
30 struct buffer_head *s_spar_map[4];
31};
32
33struct udf_virtual_data
34{
35 __u32 s_num_entries;
36 __u16 s_start_offset;
37};
38
39struct udf_bitmap
40{
41 __u32 s_extLength;
42 __u32 s_extPosition;
43 __u16 s_nr_groups;
44 struct buffer_head **s_block_bitmap;
45};
46
47struct udf_part_map
48{
49 union
50 {
51 struct udf_bitmap *s_bitmap;
52 struct inode *s_table;
53 } s_uspace;
54 union
55 {
56 struct udf_bitmap *s_bitmap;
57 struct inode *s_table;
58 } s_fspace;
59 __u32 s_partition_root;
60 __u32 s_partition_len;
61 __u16 s_partition_type;
62 __u16 s_partition_num;
63 union
64 {
65 struct udf_sparing_data s_sparing;
66 struct udf_virtual_data s_virtual;
67 } s_type_specific;
68 __u32 (*s_partition_func)(struct super_block *, __u32, __u16, __u32);
69 __u16 s_volumeseqnum;
70 __u16 s_partition_flags;
71};
72
73#pragma pack()
74
75struct udf_sb_info
76{
77 struct udf_part_map *s_partmaps;
78 __u8 s_volume_ident[32];
79
80 /* Overall info */
81 __u16 s_partitions;
82 __u16 s_partition;
83
84 /* Sector headers */
85 __s32 s_session;
86 __u32 s_anchor[4];
87 __u32 s_last_block;
88
89 struct buffer_head *s_lvid_bh;
90
91 /* Default permissions */
92 mode_t s_umask;
93 gid_t s_gid;
94 uid_t s_uid;
95
96 /* Root Info */
97 struct timespec s_record_time;
98
99 /* Fileset Info */
100 __u16 s_serial_number;
101
102 /* highest UDF revision we have recorded to this media */
103 __u16 s_udfrev;
104
105 /* Miscellaneous flags */
106 __u32 s_flags;
107
108 /* Encoding info */
109 struct nls_table *s_nls_map;
110
111 /* VAT inode */
112 struct inode *s_vat_inode;
113
114 struct mutex s_alloc_mutex;
115};
116
117#endif /* _UDF_FS_SB_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b56b6a10fe5e..baa9f372cfd1 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -436,6 +436,9 @@ struct xfrm_tmpl
436/* May skip this transfomration if no SA is found */ 436/* May skip this transfomration if no SA is found */
437 __u8 optional; 437 __u8 optional;
438 438
439/* Skip aalgos/ealgos/calgos checks. */
440 __u8 allalgs;
441
439/* Bit mask of algos allowed for acquisition */ 442/* Bit mask of algos allowed for acquisition */
440 __u32 aalgos; 443 __u32 aalgos;
441 __u32 ealgos; 444 __u32 ealgos;
diff --git a/kernel/exit.c b/kernel/exit.c
index 073005b1cfb2..cece89f80ab4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -521,7 +521,7 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
521} 521}
522EXPORT_SYMBOL(reset_files_struct); 522EXPORT_SYMBOL(reset_files_struct);
523 523
524static void __exit_files(struct task_struct *tsk) 524void exit_files(struct task_struct *tsk)
525{ 525{
526 struct files_struct * files = tsk->files; 526 struct files_struct * files = tsk->files;
527 527
@@ -533,12 +533,7 @@ static void __exit_files(struct task_struct *tsk)
533 } 533 }
534} 534}
535 535
536void exit_files(struct task_struct *tsk) 536void put_fs_struct(struct fs_struct *fs)
537{
538 __exit_files(tsk);
539}
540
541static void __put_fs_struct(struct fs_struct *fs)
542{ 537{
543 /* No need to hold fs->lock if we are killing it */ 538 /* No need to hold fs->lock if we are killing it */
544 if (atomic_dec_and_test(&fs->count)) { 539 if (atomic_dec_and_test(&fs->count)) {
@@ -550,12 +545,7 @@ static void __put_fs_struct(struct fs_struct *fs)
550 } 545 }
551} 546}
552 547
553void put_fs_struct(struct fs_struct *fs) 548void exit_fs(struct task_struct *tsk)
554{
555 __put_fs_struct(fs);
556}
557
558static void __exit_fs(struct task_struct *tsk)
559{ 549{
560 struct fs_struct * fs = tsk->fs; 550 struct fs_struct * fs = tsk->fs;
561 551
@@ -563,15 +553,10 @@ static void __exit_fs(struct task_struct *tsk)
563 task_lock(tsk); 553 task_lock(tsk);
564 tsk->fs = NULL; 554 tsk->fs = NULL;
565 task_unlock(tsk); 555 task_unlock(tsk);
566 __put_fs_struct(fs); 556 put_fs_struct(fs);
567 } 557 }
568} 558}
569 559
570void exit_fs(struct task_struct *tsk)
571{
572 __exit_fs(tsk);
573}
574
575EXPORT_SYMBOL_GPL(exit_fs); 560EXPORT_SYMBOL_GPL(exit_fs);
576 561
577/* 562/*
@@ -967,8 +952,8 @@ NORET_TYPE void do_exit(long code)
967 if (group_dead) 952 if (group_dead)
968 acct_process(); 953 acct_process();
969 exit_sem(tsk); 954 exit_sem(tsk);
970 __exit_files(tsk); 955 exit_files(tsk);
971 __exit_fs(tsk); 956 exit_fs(tsk);
972 check_stack_usage(); 957 check_stack_usage();
973 exit_thread(); 958 exit_thread();
974 cgroup_exit(tsk, 1); 959 cgroup_exit(tsk, 1);
diff --git a/kernel/sched.c b/kernel/sched.c
index 57ba7ea9b744..0014b03adaca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7035,6 +7035,7 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
7035/** 7035/**
7036 * sched_domain_node_span - get a cpumask for a node's sched_domain 7036 * sched_domain_node_span - get a cpumask for a node's sched_domain
7037 * @node: node whose cpumask we're constructing 7037 * @node: node whose cpumask we're constructing
7038 * @span: resulting cpumask
7038 * 7039 *
7039 * Given a node, construct a good cpumask for its sched_domain to span. It 7040 * Given a node, construct a good cpumask for its sched_domain to span. It
7040 * should be one that prevents unnecessary balancing, but also spreads tasks 7041 * should be one that prevents unnecessary balancing, but also spreads tasks
diff --git a/kernel/time.c b/kernel/time.c
index a5ec013b6c80..35d373a98782 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -379,6 +379,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
379 ts->tv_sec = sec; 379 ts->tv_sec = sec;
380 ts->tv_nsec = nsec; 380 ts->tv_nsec = nsec;
381} 381}
382EXPORT_SYMBOL(set_normalized_timespec);
382 383
383/** 384/**
384 * ns_to_timespec - Convert nanoseconds to timespec 385 * ns_to_timespec - Convert nanoseconds to timespec
diff --git a/mm/slub.c b/mm/slub.c
index 7f8aaa291a4e..39592b5ce68a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -521,7 +521,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
521static void object_err(struct kmem_cache *s, struct page *page, 521static void object_err(struct kmem_cache *s, struct page *page,
522 u8 *object, char *reason) 522 u8 *object, char *reason)
523{ 523{
524 slab_bug(s, reason); 524 slab_bug(s, "%s", reason);
525 print_trailer(s, page, object); 525 print_trailer(s, page, object);
526} 526}
527 527
@@ -533,7 +533,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
533 va_start(args, fmt); 533 va_start(args, fmt);
534 vsnprintf(buf, sizeof(buf), fmt, args); 534 vsnprintf(buf, sizeof(buf), fmt, args);
535 va_end(args); 535 va_end(args);
536 slab_bug(s, fmt); 536 slab_bug(s, "%s", buf);
537 print_page_info(page); 537 print_page_info(page);
538 dump_stack(); 538 dump_stack();
539} 539}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7635d3f72723..4e7b847347f7 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -87,6 +87,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
87 return ret; 87 return ret;
88} 88}
89 89
90NETDEVICE_SHOW(dev_id, fmt_hex);
90NETDEVICE_SHOW(addr_len, fmt_dec); 91NETDEVICE_SHOW(addr_len, fmt_dec);
91NETDEVICE_SHOW(iflink, fmt_dec); 92NETDEVICE_SHOW(iflink, fmt_dec);
92NETDEVICE_SHOW(ifindex, fmt_dec); 93NETDEVICE_SHOW(ifindex, fmt_dec);
@@ -210,6 +211,7 @@ static ssize_t store_tx_queue_len(struct device *dev,
210 211
211static struct device_attribute net_class_attributes[] = { 212static struct device_attribute net_class_attributes[] = {
212 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 213 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
214 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
213 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 215 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
214 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 216 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
215 __ATTR(features, S_IRUGO, show_features, NULL), 217 __ATTR(features, S_IRUGO, show_features, NULL),
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 7053bb827bc8..6e1df62bd7c9 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -46,29 +46,24 @@ struct {
46 struct kfifo *fifo; 46 struct kfifo *fifo;
47 spinlock_t lock; 47 spinlock_t lock;
48 wait_queue_head_t wait; 48 wait_queue_head_t wait;
49 struct timeval tstart; 49 struct timespec tstart;
50} dccpw; 50} dccpw;
51 51
52static void printl(const char *fmt, ...) 52static void printl(const char *fmt, ...)
53{ 53{
54 va_list args; 54 va_list args;
55 int len; 55 int len;
56 struct timeval now; 56 struct timespec now;
57 char tbuf[256]; 57 char tbuf[256];
58 58
59 va_start(args, fmt); 59 va_start(args, fmt);
60 do_gettimeofday(&now); 60 getnstimeofday(&now);
61 61
62 now.tv_sec -= dccpw.tstart.tv_sec; 62 now = timespec_sub(now, dccpw.tstart);
63 now.tv_usec -= dccpw.tstart.tv_usec;
64 if (now.tv_usec < 0) {
65 --now.tv_sec;
66 now.tv_usec += 1000000;
67 }
68 63
69 len = sprintf(tbuf, "%lu.%06lu ", 64 len = sprintf(tbuf, "%lu.%06lu ",
70 (unsigned long) now.tv_sec, 65 (unsigned long) now.tv_sec,
71 (unsigned long) now.tv_usec); 66 (unsigned long) now.tv_nsec / NSEC_PER_USEC);
72 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); 67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
73 va_end(args); 68 va_end(args);
74 69
@@ -119,7 +114,7 @@ static struct jprobe dccp_send_probe = {
119static int dccpprobe_open(struct inode *inode, struct file *file) 114static int dccpprobe_open(struct inode *inode, struct file *file)
120{ 115{
121 kfifo_reset(dccpw.fifo); 116 kfifo_reset(dccpw.fifo);
122 do_gettimeofday(&dccpw.tstart); 117 getnstimeofday(&dccpw.tstart);
123 return 0; 118 return 0;
124} 119}
125 120
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f064031f2031..c67d00e8c600 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -847,7 +847,7 @@ static void icmp_echo(struct sk_buff *skb)
847 */ 847 */
848static void icmp_timestamp(struct sk_buff *skb) 848static void icmp_timestamp(struct sk_buff *skb)
849{ 849{
850 struct timeval tv; 850 struct timespec tv;
851 struct icmp_bxm icmp_param; 851 struct icmp_bxm icmp_param;
852 /* 852 /*
853 * Too short. 853 * Too short.
@@ -858,9 +858,9 @@ static void icmp_timestamp(struct sk_buff *skb)
858 /* 858 /*
859 * Fill in the current time as ms since midnight UT: 859 * Fill in the current time as ms since midnight UT:
860 */ 860 */
861 do_gettimeofday(&tv); 861 getnstimeofday(&tv);
862 icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * 1000 + 862 icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
863 tv.tv_usec / 1000); 863 tv.tv_nsec / NSEC_PER_MSEC);
864 icmp_param.data.times[2] = icmp_param.data.times[1]; 864 icmp_param.data.times[2] = icmp_param.data.times[1];
865 if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) 865 if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
866 BUG(); 866 BUG();
@@ -1144,7 +1144,7 @@ static void __net_exit icmp_sk_exit(struct net *net)
1144 net->ipv4.icmp_sk = NULL; 1144 net->ipv4.icmp_sk = NULL;
1145} 1145}
1146 1146
1147int __net_init icmp_sk_init(struct net *net) 1147static int __net_init icmp_sk_init(struct net *net)
1148{ 1148{
1149 int i, err; 1149 int i, err;
1150 1150
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index d107543d3f81..33126ad2cfdc 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -55,10 +55,10 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
55 if (opt->ts_needaddr) 55 if (opt->ts_needaddr)
56 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); 56 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt);
57 if (opt->ts_needtime) { 57 if (opt->ts_needtime) {
58 struct timeval tv; 58 struct timespec tv;
59 __be32 midtime; 59 __be32 midtime;
60 do_gettimeofday(&tv); 60 getnstimeofday(&tv);
61 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); 61 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
62 memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); 62 memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
63 } 63 }
64 return; 64 return;
@@ -406,10 +406,10 @@ int ip_options_compile(struct net *net,
406 break; 406 break;
407 } 407 }
408 if (timeptr) { 408 if (timeptr) {
409 struct timeval tv; 409 struct timespec tv;
410 __be32 midtime; 410 __be32 midtime;
411 do_gettimeofday(&tv); 411 getnstimeofday(&tv);
412 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); 412 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
413 memcpy(timeptr, &midtime, sizeof(__be32)); 413 memcpy(timeptr, &midtime, sizeof(__be32));
414 opt->is_changed = 1; 414 opt->is_changed = 1;
415 } 415 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 58ac838bf460..f88653138621 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1722,7 +1722,7 @@ static int tcp_close_state(struct sock *sk)
1722 1722
1723/* 1723/*
1724 * Shutdown the sending side of a connection. Much like close except 1724 * Shutdown the sending side of a connection. Much like close except
1725 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD). 1725 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1726 */ 1726 */
1727 1727
1728void tcp_shutdown(struct sock *sk, int how) 1728void tcp_shutdown(struct sock *sk, int how)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cdc051bfdb4d..ac9b8482f702 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2298,7 +2298,7 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
2298{ 2298{
2299 return !tp->retrans_stamp || 2299 return !tp->retrans_stamp ||
2300 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2300 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2301 (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0); 2301 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
2302} 2302}
2303 2303
2304/* Undo procedures. */ 2304/* Undo procedures. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8a0fd4007bdb..e591e09e5e4e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4338,12 +4338,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4338 4338
4339EXPORT_SYMBOL(unregister_inet6addr_notifier); 4339EXPORT_SYMBOL(unregister_inet6addr_notifier);
4340 4340
4341
4342static int addrconf_net_init(struct net *net)
4343{
4344 return 0;
4345}
4346
4347static void addrconf_net_exit(struct net *net) 4341static void addrconf_net_exit(struct net *net)
4348{ 4342{
4349 struct net_device *dev; 4343 struct net_device *dev;
@@ -4360,7 +4354,6 @@ static void addrconf_net_exit(struct net *net)
4360} 4354}
4361 4355
4362static struct pernet_operations addrconf_net_ops = { 4356static struct pernet_operations addrconf_net_ops = {
4363 .init = addrconf_net_init,
4364 .exit = addrconf_net_exit, 4357 .exit = addrconf_net_exit,
4365}; 4358};
4366 4359
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 50f3f8f8a59b..1ee4fa17c129 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1543,7 +1543,7 @@ out_timer:
1543static void fib6_net_exit(struct net *net) 1543static void fib6_net_exit(struct net *net)
1544{ 1544{
1545 rt6_ifdown(net, NULL); 1545 rt6_ifdown(net, NULL);
1546 del_timer(net->ipv6.ip6_fib_timer); 1546 del_timer_sync(net->ipv6.ip6_fib_timer);
1547 kfree(net->ipv6.ip6_fib_timer); 1547 kfree(net->ipv6.ip6_fib_timer);
1548#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1548#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1549 kfree(net->ipv6.fib6_local_tbl); 1549 kfree(net->ipv6.fib6_local_tbl);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 210a079cfc6f..a493ad9b8914 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -150,7 +150,7 @@ static struct rt6_info ip6_null_entry_template = {
150static int ip6_pkt_prohibit(struct sk_buff *skb); 150static int ip6_pkt_prohibit(struct sk_buff *skb);
151static int ip6_pkt_prohibit_out(struct sk_buff *skb); 151static int ip6_pkt_prohibit_out(struct sk_buff *skb);
152 152
153struct rt6_info ip6_prohibit_entry_template = { 153static struct rt6_info ip6_prohibit_entry_template = {
154 .u = { 154 .u = {
155 .dst = { 155 .dst = {
156 .__refcnt = ATOMIC_INIT(1), 156 .__refcnt = ATOMIC_INIT(1),
@@ -2614,9 +2614,8 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2614 2614
2615static int ip6_route_net_init(struct net *net) 2615static int ip6_route_net_init(struct net *net)
2616{ 2616{
2617 int ret = 0; 2617 int ret = -ENOMEM;
2618 2618
2619 ret = -ENOMEM;
2620 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template, 2619 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
2621 sizeof(*net->ipv6.ip6_dst_ops), 2620 sizeof(*net->ipv6.ip6_dst_ops),
2622 GFP_KERNEL); 2621 GFP_KERNEL);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1fb0fe42a72e..81a8e5297ad1 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1907,7 +1907,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1907 t->encap_family = xp->family; 1907 t->encap_family = xp->family;
1908 1908
1909 /* No way to set this via kame pfkey */ 1909 /* No way to set this via kame pfkey */
1910 t->aalgos = t->ealgos = t->calgos = ~0; 1910 t->allalgs = 1;
1911 xp->xfrm_nr++; 1911 xp->xfrm_nr++;
1912 return 0; 1912 return 0;
1913} 1913}
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index fb9359fb2358..5053a53ba24f 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -857,7 +857,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
857 src_addr = (rose_address *)(skb->data + 9); 857 src_addr = (rose_address *)(skb->data + 9);
858 dest_addr = (rose_address *)(skb->data + 4); 858 dest_addr = (rose_address *)(skb->data + 4);
859 859
860 spin_lock_bh(&rose_node_list_lock);
861 spin_lock_bh(&rose_neigh_list_lock); 860 spin_lock_bh(&rose_neigh_list_lock);
862 spin_lock_bh(&rose_route_list_lock); 861 spin_lock_bh(&rose_route_list_lock);
863 862
@@ -1060,7 +1059,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1060out: 1059out:
1061 spin_unlock_bh(&rose_route_list_lock); 1060 spin_unlock_bh(&rose_route_list_lock);
1062 spin_unlock_bh(&rose_neigh_list_lock); 1061 spin_unlock_bh(&rose_neigh_list_lock);
1063 spin_unlock_bh(&rose_node_list_lock);
1064 1062
1065 return res; 1063 return res;
1066} 1064}
diff --git a/net/socket.c b/net/socket.c
index 9b5c917f8a6b..66c4a8cf6db9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2327,9 +2327,6 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
2327 return sock->ops->shutdown(sock, how); 2327 return sock->ops->shutdown(sock, how);
2328} 2328}
2329 2329
2330/* ABI emulation layers need these two */
2331EXPORT_SYMBOL(move_addr_to_kernel);
2332EXPORT_SYMBOL(move_addr_to_user);
2333EXPORT_SYMBOL(sock_create); 2330EXPORT_SYMBOL(sock_create);
2334EXPORT_SYMBOL(sock_create_kern); 2331EXPORT_SYMBOL(sock_create_kern);
2335EXPORT_SYMBOL(sock_create_lite); 2332EXPORT_SYMBOL(sock_create_lite);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ab4d0e598a2c..e0c0390613c0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1819,7 +1819,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1819 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1819 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1820 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1820 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1821 x->props.mode == tmpl->mode && 1821 x->props.mode == tmpl->mode &&
1822 ((tmpl->aalgos & (1<<x->props.aalgo)) || 1822 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1823 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1823 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1824 !(x->props.mode != XFRM_MODE_TRANSPORT && 1824 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1825 xfrm_state_addr_cmp(tmpl, x, family)); 1825 xfrm_state_addr_cmp(tmpl, x, family));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1810f5645bb5..22a30ae582a2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -981,6 +981,8 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
981 t->aalgos = ut->aalgos; 981 t->aalgos = ut->aalgos;
982 t->ealgos = ut->ealgos; 982 t->ealgos = ut->ealgos;
983 t->calgos = ut->calgos; 983 t->calgos = ut->calgos;
984 /* If all masks are ~0, then we allow all algorithms. */
985 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
984 t->encap_family = ut->family; 986 t->encap_family = ut->family;
985 } 987 }
986} 988}
diff --git a/security/dummy.c b/security/dummy.c
index 98d5f969cdc8..b0232bbf427b 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -196,13 +196,13 @@ static int dummy_sb_statfs (struct dentry *dentry)
196 return 0; 196 return 0;
197} 197}
198 198
199static int dummy_sb_mount (char *dev_name, struct nameidata *nd, char *type, 199static int dummy_sb_mount (char *dev_name, struct path *path, char *type,
200 unsigned long flags, void *data) 200 unsigned long flags, void *data)
201{ 201{
202 return 0; 202 return 0;
203} 203}
204 204
205static int dummy_sb_check_sb (struct vfsmount *mnt, struct nameidata *nd) 205static int dummy_sb_check_sb (struct vfsmount *mnt, struct path *path)
206{ 206{
207 return 0; 207 return 0;
208} 208}
@@ -229,17 +229,17 @@ static void dummy_sb_post_remount (struct vfsmount *mnt, unsigned long flags,
229} 229}
230 230
231 231
232static void dummy_sb_post_addmount (struct vfsmount *mnt, struct nameidata *nd) 232static void dummy_sb_post_addmount (struct vfsmount *mnt, struct path *path)
233{ 233{
234 return; 234 return;
235} 235}
236 236
237static int dummy_sb_pivotroot (struct nameidata *old_nd, struct nameidata *new_nd) 237static int dummy_sb_pivotroot (struct path *old_path, struct path *new_path)
238{ 238{
239 return 0; 239 return 0;
240} 240}
241 241
242static void dummy_sb_post_pivotroot (struct nameidata *old_nd, struct nameidata *new_nd) 242static void dummy_sb_post_pivotroot (struct path *old_path, struct path *new_path)
243{ 243{
244 return; 244 return;
245} 245}
diff --git a/security/security.c b/security/security.c
index 2e250c7028eb..8a285c7b9962 100644
--- a/security/security.c
+++ b/security/security.c
@@ -296,15 +296,15 @@ int security_sb_statfs(struct dentry *dentry)
296 return security_ops->sb_statfs(dentry); 296 return security_ops->sb_statfs(dentry);
297} 297}
298 298
299int security_sb_mount(char *dev_name, struct nameidata *nd, 299int security_sb_mount(char *dev_name, struct path *path,
300 char *type, unsigned long flags, void *data) 300 char *type, unsigned long flags, void *data)
301{ 301{
302 return security_ops->sb_mount(dev_name, nd, type, flags, data); 302 return security_ops->sb_mount(dev_name, path, type, flags, data);
303} 303}
304 304
305int security_sb_check_sb(struct vfsmount *mnt, struct nameidata *nd) 305int security_sb_check_sb(struct vfsmount *mnt, struct path *path)
306{ 306{
307 return security_ops->sb_check_sb(mnt, nd); 307 return security_ops->sb_check_sb(mnt, path);
308} 308}
309 309
310int security_sb_umount(struct vfsmount *mnt, int flags) 310int security_sb_umount(struct vfsmount *mnt, int flags)
@@ -327,19 +327,19 @@ void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *d
327 security_ops->sb_post_remount(mnt, flags, data); 327 security_ops->sb_post_remount(mnt, flags, data);
328} 328}
329 329
330void security_sb_post_addmount(struct vfsmount *mnt, struct nameidata *mountpoint_nd) 330void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint)
331{ 331{
332 security_ops->sb_post_addmount(mnt, mountpoint_nd); 332 security_ops->sb_post_addmount(mnt, mountpoint);
333} 333}
334 334
335int security_sb_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd) 335int security_sb_pivotroot(struct path *old_path, struct path *new_path)
336{ 336{
337 return security_ops->sb_pivotroot(old_nd, new_nd); 337 return security_ops->sb_pivotroot(old_path, new_path);
338} 338}
339 339
340void security_sb_post_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd) 340void security_sb_post_pivotroot(struct path *old_path, struct path *new_path)
341{ 341{
342 security_ops->sb_post_pivotroot(old_nd, new_nd); 342 security_ops->sb_post_pivotroot(old_path, new_path);
343} 343}
344 344
345int security_sb_get_mnt_opts(const struct super_block *sb, 345int security_sb_get_mnt_opts(const struct super_block *sb,
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 1d69f6649bff..95a8ef4a5073 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -312,6 +312,7 @@ static inline int avc_reclaim_node(void)
312 if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) 312 if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
313 continue; 313 continue;
314 314
315 rcu_read_lock();
315 list_for_each_entry(node, &avc_cache.slots[hvalue], list) { 316 list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
316 if (atomic_dec_and_test(&node->ae.used)) { 317 if (atomic_dec_and_test(&node->ae.used)) {
317 /* Recently Unused */ 318 /* Recently Unused */
@@ -319,11 +320,13 @@ static inline int avc_reclaim_node(void)
319 avc_cache_stats_incr(reclaims); 320 avc_cache_stats_incr(reclaims);
320 ecx++; 321 ecx++;
321 if (ecx >= AVC_CACHE_RECLAIM) { 322 if (ecx >= AVC_CACHE_RECLAIM) {
323 rcu_read_unlock();
322 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 324 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
323 goto out; 325 goto out;
324 } 326 }
325 } 327 }
326 } 328 }
329 rcu_read_unlock();
327 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 330 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
328 } 331 }
329out: 332out:
@@ -821,8 +824,14 @@ int avc_ss_reset(u32 seqno)
821 824
822 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 825 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
823 spin_lock_irqsave(&avc_cache.slots_lock[i], flag); 826 spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
827 /*
828 * With preemptable RCU, the outer spinlock does not
829 * prevent RCU grace periods from ending.
830 */
831 rcu_read_lock();
824 list_for_each_entry(node, &avc_cache.slots[i], list) 832 list_for_each_entry(node, &avc_cache.slots[i], list)
825 avc_node_delete(node); 833 avc_node_delete(node);
834 rcu_read_unlock();
826 spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); 835 spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
827 } 836 }
828 837
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 1bf2543ea942..308e2cf17d75 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -755,9 +755,18 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
755 int set_context = (oldsbsec->flags & CONTEXT_MNT); 755 int set_context = (oldsbsec->flags & CONTEXT_MNT);
756 int set_rootcontext = (oldsbsec->flags & ROOTCONTEXT_MNT); 756 int set_rootcontext = (oldsbsec->flags & ROOTCONTEXT_MNT);
757 757
758 /* we can't error, we can't save the info, this shouldn't get called 758 /*
759 * this early in the boot process. */ 759 * if the parent was able to be mounted it clearly had no special lsm
760 BUG_ON(!ss_initialized); 760 * mount options. thus we can safely put this sb on the list and deal
761 * with it later
762 */
763 if (!ss_initialized) {
764 spin_lock(&sb_security_lock);
765 if (list_empty(&newsbsec->list))
766 list_add(&newsbsec->list, &superblock_security_head);
767 spin_unlock(&sb_security_lock);
768 return;
769 }
761 770
762 /* how can we clone if the old one wasn't set up?? */ 771 /* how can we clone if the old one wasn't set up?? */
763 BUG_ON(!oldsbsec->initialized); 772 BUG_ON(!oldsbsec->initialized);
@@ -2392,22 +2401,22 @@ static int selinux_sb_statfs(struct dentry *dentry)
2392} 2401}
2393 2402
2394static int selinux_mount(char *dev_name, 2403static int selinux_mount(char *dev_name,
2395 struct nameidata *nd, 2404 struct path *path,
2396 char *type, 2405 char *type,
2397 unsigned long flags, 2406 unsigned long flags,
2398 void *data) 2407 void *data)
2399{ 2408{
2400 int rc; 2409 int rc;
2401 2410
2402 rc = secondary_ops->sb_mount(dev_name, nd, type, flags, data); 2411 rc = secondary_ops->sb_mount(dev_name, path, type, flags, data);
2403 if (rc) 2412 if (rc)
2404 return rc; 2413 return rc;
2405 2414
2406 if (flags & MS_REMOUNT) 2415 if (flags & MS_REMOUNT)
2407 return superblock_has_perm(current, nd->path.mnt->mnt_sb, 2416 return superblock_has_perm(current, path->mnt->mnt_sb,
2408 FILESYSTEM__REMOUNT, NULL); 2417 FILESYSTEM__REMOUNT, NULL);
2409 else 2418 else
2410 return dentry_has_perm(current, nd->path.mnt, nd->path.dentry, 2419 return dentry_has_perm(current, path->mnt, path->dentry,
2411 FILE__MOUNTON); 2420 FILE__MOUNTON);
2412} 2421}
2413 2422
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index c658b84c3196..b4e14bc0bf32 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -239,11 +239,13 @@ static void sel_netif_kill(int ifindex)
239{ 239{
240 struct sel_netif *netif; 240 struct sel_netif *netif;
241 241
242 rcu_read_lock();
242 spin_lock_bh(&sel_netif_lock); 243 spin_lock_bh(&sel_netif_lock);
243 netif = sel_netif_find(ifindex); 244 netif = sel_netif_find(ifindex);
244 if (netif) 245 if (netif)
245 sel_netif_destroy(netif); 246 sel_netif_destroy(netif);
246 spin_unlock_bh(&sel_netif_lock); 247 spin_unlock_bh(&sel_netif_lock);
248 rcu_read_unlock();
247} 249}
248 250
249/** 251/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 93f5b0ce662a..4215971434e6 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -315,10 +315,10 @@ static int smack_sb_statfs(struct dentry *dentry)
315 * Returns 0 if current can write the floor of the filesystem 315 * Returns 0 if current can write the floor of the filesystem
316 * being mounted on, an error code otherwise. 316 * being mounted on, an error code otherwise.
317 */ 317 */
318static int smack_sb_mount(char *dev_name, struct nameidata *nd, 318static int smack_sb_mount(char *dev_name, struct path *path,
319 char *type, unsigned long flags, void *data) 319 char *type, unsigned long flags, void *data)
320{ 320{
321 struct superblock_smack *sbp = nd->path.mnt->mnt_sb->s_security; 321 struct superblock_smack *sbp = path->mnt->mnt_sb->s_security;
322 322
323 return smk_curacc(sbp->smk_floor, MAY_WRITE); 323 return smk_curacc(sbp->smk_floor, MAY_WRITE);
324} 324}