aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_keys.txt36
-rw-r--r--Documentation/filesystems/nfs/Exporting9
-rw-r--r--arch/microblaze/include/asm/cpuinfo.h1
-rw-r--r--arch/microblaze/include/asm/irqflags.h20
-rw-r--r--arch/microblaze/include/asm/processor.h3
-rw-r--r--arch/microblaze/include/asm/prom.h8
-rw-r--r--arch/microblaze/include/asm/pvr.h5
-rw-r--r--arch/microblaze/include/asm/setup.h1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c4
-rw-r--r--arch/microblaze/kernel/cpu/mb.c9
-rw-r--r--arch/microblaze/kernel/early_printk.c68
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S56
-rw-r--r--arch/microblaze/kernel/intc.c2
-rw-r--r--arch/microblaze/kernel/process.c1
-rw-r--r--arch/microblaze/kernel/prom.c97
-rw-r--r--arch/microblaze/kernel/setup.c5
-rw-r--r--arch/sparc/lib/atomic32.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/input/joystick/xpad.c31
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/atkbd.c4
-rw-r--r--drivers/input/keyboard/gpio_keys.c166
-rw-r--r--drivers/input/keyboard/lm8323.c23
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c16
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c8
-rw-r--r--drivers/input/keyboard/qt1070.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c2
-rw-r--r--drivers/input/misc/Kconfig38
-rw-r--r--drivers/input/misc/Makefile4
-rw-r--r--drivers/input/misc/bfin_rotary.c1
-rw-r--r--drivers/input/misc/kxtj9.c671
-rw-r--r--drivers/input/misc/mma8450.c256
-rw-r--r--drivers/input/misc/mpu3050.c376
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/mouse/lifebook.c4
-rw-r--r--drivers/input/mouse/pxa930_trkball.c1
-rw-r--r--drivers/input/mouse/sentelic.c1
-rw-r--r--drivers/input/mouse/synaptics.c107
-rw-r--r--drivers/input/mouse/synaptics.h18
-rw-r--r--drivers/input/serio/at32psif.c2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/tablet/aiptek.c1
-rw-r--r--drivers/input/tablet/wacom_wac.c68
-rw-r--r--drivers/input/touchscreen/ads7846.c15
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c190
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c8
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c15
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c6
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c2
-rw-r--r--drivers/input/touchscreen/wm9705.c25
-rw-r--r--drivers/input/touchscreen/wm9712.c27
-rw-r--r--drivers/input/touchscreen/wm9713.c25
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c6
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Kconfig8
-rw-r--r--drivers/target/iscsi/Makefile20
-rw-r--r--drivers/target/iscsi/iscsi_target.c4559
-rw-r--r--drivers/target/iscsi/iscsi_target.h42
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c490
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h31
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1882
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h859
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c531
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c87
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c1004
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1299
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c474
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h18
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1232
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1067
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c263
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1905
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h269
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c664
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h86
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c950
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c849
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c759
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h41
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c551
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h88
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1819
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h60
-rw-r--r--drivers/target/target_core_transport.c7
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/inode.c39
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jfs/jfs_dmap.c5
-rw-r--r--fs/jfs/jfs_txnmgr.c6
-rw-r--r--fs/jfs/namei.c3
-rw-r--r--fs/lockd/clntproc.c9
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/callback_proc.c57
-rw-r--r--fs/nfs/client.c7
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/internal.h13
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4_fs.h5
-rw-r--r--fs/nfs/nfs4filelayout.c80
-rw-r--r--fs/nfs/nfs4filelayout.h17
-rw-r--r--fs/nfs/nfs4filelayoutdev.c452
-rw-r--r--fs/nfs/nfs4proc.c215
-rw-r--r--fs/nfs/nfs4state.c9
-rw-r--r--fs/nfs/nfs4xdr.c247
-rw-r--r--fs/nfs/objlayout/objio_osd.c20
-rw-r--r--fs/nfs/pagelist.c69
-rw-r--r--fs/nfs/pnfs.c221
-rw-r--r--fs/nfs/pnfs.h74
-rw-r--r--fs/nfs/pnfs_dev.c64
-rw-r--r--fs/nfs/read.c166
-rw-r--r--fs/nfs/unlink.c37
-rw-r--r--fs/nfs/write.c156
-rw-r--r--fs/omfs/dir.c2
-rw-r--r--fs/open.c78
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/generic.c3
-rw-r--r--fs/proc/proc_net.c4
-rw-r--r--fs/proc/root.c2
-rw-r--r--fs/read_write.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c6
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_dir2.c16
-rw-r--r--fs/xfs/xfs_filestream.c14
-rw-r--r--fs/xfs/xfs_inode.c16
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_rename.c4
-rw-r--r--fs/xfs/xfs_vnodeops.c10
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/input.h6
-rw-r--r--include/linux/input/kxtj9.h70
-rw-r--r--include/linux/kernel.h8
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h5
-rw-r--r--include/linux/nfs_page.h17
-rw-r--r--include/linux/nfs_xdr.h34
-rw-r--r--include/linux/pnfs_osd_xdr.h31
-rw-r--r--include/linux/proc_fs.h6
-rw-r--r--include/linux/sunrpc/bc_xprt.h6
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/sunrpc/xprt.h34
-rw-r--r--include/linux/wm97xx.h13
-rw-r--r--include/scsi/iscsi_proto.h60
-rw-r--r--include/sound/pcm.h35
-rw-r--r--include/sound/pcm_params.h16
-rw-r--r--include/sound/soc-dapm.h6
-rw-r--r--kernel/compat.c5
-rw-r--r--kernel/signal.c17
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/Kconfig4
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/backchannel_rqst.c7
-rw-r--r--net/sunrpc/bc_svc.c3
-rw-r--r--net/sunrpc/clnt.c15
-rw-r--r--net/sunrpc/sched.c38
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c14
-rw-r--r--net/sunrpc/xdr.c2
-rw-r--r--net/sunrpc/xprt.c257
-rw-r--r--net/sunrpc/xprtrdma/transport.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h2
-rw-r--r--net/sunrpc/xprtsock.c57
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--sound/core/pcm_lib.c13
-rw-r--r--sound/isa/msnd/msnd.h2
-rw-r--r--sound/pci/asihpi/asihpi.c21
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/hda_codec.c114
-rw-r--r--sound/pci/hda/hda_codec.h15
-rw-r--r--sound/pci/hda/hda_local.h2
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_cirrus.c743
-rw-r--r--sound/pci/hda/patch_conexant.c14
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/pci/hda/patch_sigmatel.c122
-rw-r--r--sound/pci/hda/patch_via.c4
-rw-r--r--sound/soc/codecs/sgtl5000.c70
-rw-r--r--sound/soc/codecs/wm8962.c6
-rw-r--r--sound/soc/davinci/davinci-vcif.c9
-rw-r--r--sound/soc/samsung/i2s.c7
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/soc/soc-dapm.c30
211 files changed, 27310 insertions, 1499 deletions
diff --git a/Documentation/devicetree/bindings/gpio/gpio_keys.txt b/Documentation/devicetree/bindings/gpio/gpio_keys.txt
new file mode 100644
index 000000000000..7190c99d7611
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio_keys.txt
@@ -0,0 +1,36 @@
1Device-Tree bindings for input/gpio_keys.c keyboard driver
2
3Required properties:
4 - compatible = "gpio-keys";
5
6Optional properties:
7 - autorepeat: Boolean, Enable auto repeat feature of Linux input
8 subsystem.
9
10Each button (key) is represented as a sub-node of "gpio-keys":
11Subnode properties:
12
13 - gpios: OF devcie-tree gpio specificatin.
14 - label: Descriptive name of the key.
15 - linux,code: Keycode to emit.
16
17Optional subnode-properties:
18 - linux,input-type: Specify event type this button/key generates.
19 If not specified defaults to <1> == EV_KEY.
20 - debounce-interval: Debouncing interval time in milliseconds.
21 If not specified defaults to 5.
22 - gpio-key,wakeup: Boolean, button can wake-up the system.
23
24Example nodes:
25
26 gpio_keys {
27 compatible = "gpio-keys";
28 #address-cells = <1>;
29 #size-cells = <0>;
30 autorepeat;
31 button@21 {
32 label = "GPIO Key UP";
33 linux,code = <103>;
34 gpios = <&gpio1 0 1>;
35 };
36 ...
diff --git a/Documentation/filesystems/nfs/Exporting b/Documentation/filesystems/nfs/Exporting
index 87019d2b5981..09994c247289 100644
--- a/Documentation/filesystems/nfs/Exporting
+++ b/Documentation/filesystems/nfs/Exporting
@@ -92,7 +92,14 @@ For a filesystem to be exportable it must:
92 1/ provide the filehandle fragment routines described below. 92 1/ provide the filehandle fragment routines described below.
93 2/ make sure that d_splice_alias is used rather than d_add 93 2/ make sure that d_splice_alias is used rather than d_add
94 when ->lookup finds an inode for a given parent and name. 94 when ->lookup finds an inode for a given parent and name.
95 Typically the ->lookup routine will end with a: 95
96 If inode is NULL, d_splice_alias(inode, dentry) is eqivalent to
97
98 d_add(dentry, inode), NULL
99
100 Similarly, d_splice_alias(ERR_PTR(err), dentry) = ERR_PTR(err)
101
102 Typically the ->lookup routine will simply end with a:
96 103
97 return d_splice_alias(inode, dentry); 104 return d_splice_alias(inode, dentry);
98 } 105 }
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h
index d8f013347a9e..7d6831ac8a46 100644
--- a/arch/microblaze/include/asm/cpuinfo.h
+++ b/arch/microblaze/include/asm/cpuinfo.h
@@ -38,6 +38,7 @@ struct cpuinfo {
38 u32 use_exc; 38 u32 use_exc;
39 u32 ver_code; 39 u32 ver_code;
40 u32 mmu; 40 u32 mmu;
41 u32 mmu_privins;
41 u32 endian; 42 u32 endian;
42 43
43 /* CPU caches */ 44 /* CPU caches */
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index c4532f032b3b..c9a6262832c4 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -14,7 +14,7 @@
14 14
15#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 15#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
16 16
17static inline unsigned long arch_local_irq_save(void) 17static inline notrace unsigned long arch_local_irq_save(void)
18{ 18{
19 unsigned long flags; 19 unsigned long flags;
20 asm volatile(" msrclr %0, %1 \n" 20 asm volatile(" msrclr %0, %1 \n"
@@ -25,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void)
25 return flags; 25 return flags;
26} 26}
27 27
28static inline void arch_local_irq_disable(void) 28static inline notrace void arch_local_irq_disable(void)
29{ 29{
30 /* this uses r0 without declaring it - is that correct? */ 30 /* this uses r0 without declaring it - is that correct? */
31 asm volatile(" msrclr r0, %0 \n" 31 asm volatile(" msrclr r0, %0 \n"
@@ -35,7 +35,7 @@ static inline void arch_local_irq_disable(void)
35 : "memory"); 35 : "memory");
36} 36}
37 37
38static inline void arch_local_irq_enable(void) 38static inline notrace void arch_local_irq_enable(void)
39{ 39{
40 /* this uses r0 without declaring it - is that correct? */ 40 /* this uses r0 without declaring it - is that correct? */
41 asm volatile(" msrset r0, %0 \n" 41 asm volatile(" msrset r0, %0 \n"
@@ -47,7 +47,7 @@ static inline void arch_local_irq_enable(void)
47 47
48#else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 48#else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
49 49
50static inline unsigned long arch_local_irq_save(void) 50static inline notrace unsigned long arch_local_irq_save(void)
51{ 51{
52 unsigned long flags, tmp; 52 unsigned long flags, tmp;
53 asm volatile (" mfs %0, rmsr \n" 53 asm volatile (" mfs %0, rmsr \n"
@@ -61,7 +61,7 @@ static inline unsigned long arch_local_irq_save(void)
61 return flags; 61 return flags;
62} 62}
63 63
64static inline void arch_local_irq_disable(void) 64static inline notrace void arch_local_irq_disable(void)
65{ 65{
66 unsigned long tmp; 66 unsigned long tmp;
67 asm volatile(" mfs %0, rmsr \n" 67 asm volatile(" mfs %0, rmsr \n"
@@ -74,7 +74,7 @@ static inline void arch_local_irq_disable(void)
74 : "memory"); 74 : "memory");
75} 75}
76 76
77static inline void arch_local_irq_enable(void) 77static inline notrace void arch_local_irq_enable(void)
78{ 78{
79 unsigned long tmp; 79 unsigned long tmp;
80 asm volatile(" mfs %0, rmsr \n" 80 asm volatile(" mfs %0, rmsr \n"
@@ -89,7 +89,7 @@ static inline void arch_local_irq_enable(void)
89 89
90#endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 90#endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
91 91
92static inline unsigned long arch_local_save_flags(void) 92static inline notrace unsigned long arch_local_save_flags(void)
93{ 93{
94 unsigned long flags; 94 unsigned long flags;
95 asm volatile(" mfs %0, rmsr \n" 95 asm volatile(" mfs %0, rmsr \n"
@@ -100,7 +100,7 @@ static inline unsigned long arch_local_save_flags(void)
100 return flags; 100 return flags;
101} 101}
102 102
103static inline void arch_local_irq_restore(unsigned long flags) 103static inline notrace void arch_local_irq_restore(unsigned long flags)
104{ 104{
105 asm volatile(" mts rmsr, %0 \n" 105 asm volatile(" mts rmsr, %0 \n"
106 " nop \n" 106 " nop \n"
@@ -109,12 +109,12 @@ static inline void arch_local_irq_restore(unsigned long flags)
109 : "memory"); 109 : "memory");
110} 110}
111 111
112static inline bool arch_irqs_disabled_flags(unsigned long flags) 112static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
113{ 113{
114 return (flags & MSR_IE) == 0; 114 return (flags & MSR_IE) == 0;
115} 115}
116 116
117static inline bool arch_irqs_disabled(void) 117static inline notrace bool arch_irqs_disabled(void)
118{ 118{
119 return arch_irqs_disabled_flags(arch_local_save_flags()); 119 return arch_irqs_disabled_flags(arch_local_save_flags());
120} 120}
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index aed2a6be8e27..7283bfb2f7e4 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -125,9 +125,6 @@ struct thread_struct {
125 .pgdir = swapper_pg_dir, \ 125 .pgdir = swapper_pg_dir, \
126} 126}
127 127
128/* Do necessary setup to start up a newly executed thread. */
129void start_thread(struct pt_regs *regs,
130 unsigned long pc, unsigned long usp);
131 128
132/* Free all resources held by a thread. */ 129/* Free all resources held by a thread. */
133extern inline void release_thread(struct task_struct *dead_task) 130extern inline void release_thread(struct task_struct *dead_task)
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 9ad567e2d425..20c5e8e5121b 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -26,8 +26,12 @@
26#define HAVE_ARCH_DEVTREE_FIXUPS 26#define HAVE_ARCH_DEVTREE_FIXUPS
27 27
28/* Other Prototypes */ 28/* Other Prototypes */
29extern int early_uartlite_console(void); 29enum early_consoles {
30extern int early_uart16550_console(void); 30 UARTLITE = 1,
31 UART16550 = 2,
32};
33
34extern int of_early_console(void *version);
31 35
32/* 36/*
33 * OF address retreival & translation 37 * OF address retreival & translation
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index a10bec62e857..4bbdb4c03b57 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -111,16 +111,16 @@ struct pvr_s {
111/* Target family PVR mask */ 111/* Target family PVR mask */
112#define PVR10_TARGET_FAMILY_MASK 0xFF000000 112#define PVR10_TARGET_FAMILY_MASK 0xFF000000
113 113
114/* MMU descrtiption */ 114/* MMU description */
115#define PVR11_USE_MMU 0xC0000000 115#define PVR11_USE_MMU 0xC0000000
116#define PVR11_MMU_ITLB_SIZE 0x38000000 116#define PVR11_MMU_ITLB_SIZE 0x38000000
117#define PVR11_MMU_DTLB_SIZE 0x07000000 117#define PVR11_MMU_DTLB_SIZE 0x07000000
118#define PVR11_MMU_TLB_ACCESS 0x00C00000 118#define PVR11_MMU_TLB_ACCESS 0x00C00000
119#define PVR11_MMU_ZONES 0x003C0000 119#define PVR11_MMU_ZONES 0x003C0000
120#define PVR11_MMU_PRIVINS 0x00010000
120/* MSR Reset value PVR mask */ 121/* MSR Reset value PVR mask */
121#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF 122#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF
122 123
123
124/* PVR access macros */ 124/* PVR access macros */
125#define PVR_IS_FULL(_pvr) (_pvr.pvr[0] & PVR0_PVR_FULL_MASK) 125#define PVR_IS_FULL(_pvr) (_pvr.pvr[0] & PVR0_PVR_FULL_MASK)
126#define PVR_USE_BARREL(_pvr) (_pvr.pvr[0] & PVR0_USE_BARREL_MASK) 126#define PVR_USE_BARREL(_pvr) (_pvr.pvr[0] & PVR0_USE_BARREL_MASK)
@@ -216,6 +216,7 @@ struct pvr_s {
216#define PVR_MMU_DTLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE) 216#define PVR_MMU_DTLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
217#define PVR_MMU_TLB_ACCESS(_pvr) (_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS) 217#define PVR_MMU_TLB_ACCESS(_pvr) (_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
218#define PVR_MMU_ZONES(_pvr) (_pvr.pvr[11] & PVR11_MMU_ZONES) 218#define PVR_MMU_ZONES(_pvr) (_pvr.pvr[11] & PVR11_MMU_ZONES)
219#define PVR_MMU_PRIVINS(pvr) (pvr.pvr[11] & PVR11_MMU_PRIVINS)
219 220
220/* endian */ 221/* endian */
221#define PVR_ENDIAN(_pvr) (_pvr.pvr[0] & PVR0_ENDI) 222#define PVR_ENDIAN(_pvr) (_pvr.pvr[0] & PVR0_ENDI)
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 8f3968971e4e..904e5ef6a11b 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -23,6 +23,7 @@ extern char cmd_line[COMMAND_LINE_SIZE];
23void early_printk(const char *fmt, ...); 23void early_printk(const char *fmt, ...);
24 24
25int setup_early_printk(char *opt); 25int setup_early_printk(char *opt);
26void remap_early_printk(void);
26void disable_early_printk(void); 27void disable_early_printk(void);
27 28
28#if defined(CONFIG_EARLY_PRINTK) 29#if defined(CONFIG_EARLY_PRINTK)
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index f70a6047f08e..916aaedf1945 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -72,6 +72,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
72 CI(pvr_user2, USER2); 72 CI(pvr_user2, USER2);
73 73
74 CI(mmu, USE_MMU); 74 CI(mmu, USE_MMU);
75 CI(mmu_privins, MMU_PRIVINS);
75 CI(endian, ENDIAN); 76 CI(endian, ENDIAN);
76 77
77 CI(use_icache, USE_ICACHE); 78 CI(use_icache, USE_ICACHE);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index b16b994ca3d2..592bb2e838c4 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -119,6 +119,7 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
119 ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2"); 119 ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2");
120 120
121 ci->mmu = fcpu(cpu, "xlnx,use-mmu"); 121 ci->mmu = fcpu(cpu, "xlnx,use-mmu");
122 ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr");
122 ci->endian = fcpu(cpu, "xlnx,endianness"); 123 ci->endian = fcpu(cpu, "xlnx,endianness");
123 124
124 ci->ver_code = 0; 125 ci->ver_code = 0;
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index c1640c52711f..44394d80a683 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -88,4 +88,8 @@ void __init setup_cpuinfo(void)
88 printk(KERN_WARNING "%s: Unsupported PVR setting\n", __func__); 88 printk(KERN_WARNING "%s: Unsupported PVR setting\n", __func__);
89 set_cpuinfo_static(&cpuinfo, cpu); 89 set_cpuinfo_static(&cpuinfo, cpu);
90 } 90 }
91
92 if (cpuinfo.mmu_privins)
93 printk(KERN_WARNING "%s: Stream instructions enabled"
94 " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
91} 95}
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index b4048af02615..7b5dca7ed39d 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -97,6 +97,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
97 (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "", 97 (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
98 (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : ""); 98 (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
99 99
100 count += seq_printf(m,
101 "Stream-insns:\t%sprivileged\n",
102 cpuinfo.mmu_privins ? "un" : "");
103
100 if (cpuinfo.use_icache) 104 if (cpuinfo.use_icache)
101 count += seq_printf(m, 105 count += seq_printf(m,
102 "Icache:\t\t%ukB\tline length:\t%dB\n", 106 "Icache:\t\t%ukB\tline length:\t%dB\n",
@@ -110,10 +114,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
110 "Dcache:\t\t%ukB\tline length:\t%dB\n", 114 "Dcache:\t\t%ukB\tline length:\t%dB\n",
111 cpuinfo.dcache_size >> 10, 115 cpuinfo.dcache_size >> 10,
112 cpuinfo.dcache_line_length); 116 cpuinfo.dcache_line_length);
117 seq_printf(m, "Dcache-Policy:\t");
113 if (cpuinfo.dcache_wb) 118 if (cpuinfo.dcache_wb)
114 count += seq_printf(m, "\t\twrite-back\n"); 119 count += seq_printf(m, "write-back\n");
115 else 120 else
116 count += seq_printf(m, "\t\twrite-through\n"); 121 count += seq_printf(m, "write-through\n");
117 } else 122 } else
118 count += seq_printf(m, "Dcache:\t\tno\n"); 123 count += seq_printf(m, "Dcache:\t\tno\n");
119 124
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index c3616a080ebf..d26d92d47754 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -35,7 +35,7 @@ static void early_printk_uartlite_putc(char c)
35 * we'll never timeout on a working UART. 35 * we'll never timeout on a working UART.
36 */ 36 */
37 37
38 unsigned retries = 10000; 38 unsigned retries = 1000000;
39 /* read status bit - 0x8 offset */ 39 /* read status bit - 0x8 offset */
40 while (--retries && (in_be32(base_addr + 8) & (1 << 3))) 40 while (--retries && (in_be32(base_addr + 8) & (1 << 3)))
41 ; 41 ;
@@ -60,7 +60,7 @@ static void early_printk_uartlite_write(struct console *unused,
60static struct console early_serial_uartlite_console = { 60static struct console early_serial_uartlite_console = {
61 .name = "earlyser", 61 .name = "earlyser",
62 .write = early_printk_uartlite_write, 62 .write = early_printk_uartlite_write,
63 .flags = CON_PRINTBUFFER, 63 .flags = CON_PRINTBUFFER | CON_BOOT,
64 .index = -1, 64 .index = -1,
65}; 65};
66#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */ 66#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
@@ -104,7 +104,7 @@ static void early_printk_uart16550_write(struct console *unused,
104static struct console early_serial_uart16550_console = { 104static struct console early_serial_uart16550_console = {
105 .name = "earlyser", 105 .name = "earlyser",
106 .write = early_printk_uart16550_write, 106 .write = early_printk_uart16550_write,
107 .flags = CON_PRINTBUFFER, 107 .flags = CON_PRINTBUFFER | CON_BOOT,
108 .index = -1, 108 .index = -1,
109}; 109};
110#endif /* CONFIG_SERIAL_8250_CONSOLE */ 110#endif /* CONFIG_SERIAL_8250_CONSOLE */
@@ -127,48 +127,56 @@ void early_printk(const char *fmt, ...)
127 127
128int __init setup_early_printk(char *opt) 128int __init setup_early_printk(char *opt)
129{ 129{
130 int version = 0;
131
130 if (early_console_initialized) 132 if (early_console_initialized)
131 return 1; 133 return 1;
132 134
133#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE 135 base_addr = of_early_console(&version);
134 base_addr = early_uartlite_console();
135 if (base_addr) { 136 if (base_addr) {
136 early_console_initialized = 1;
137#ifdef CONFIG_MMU 137#ifdef CONFIG_MMU
138 early_console_reg_tlb_alloc(base_addr); 138 early_console_reg_tlb_alloc(base_addr);
139#endif 139#endif
140 early_console = &early_serial_uartlite_console; 140 switch (version) {
141 early_printk("early_printk_console is enabled at 0x%08x\n", 141#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
142 base_addr); 142 case UARTLITE:
143 143 printk(KERN_INFO "Early console on uartlite "
144 /* register_console(early_console); */ 144 "at 0x%08x\n", base_addr);
145 145 early_console = &early_serial_uartlite_console;
146 return 0; 146 break;
147 } 147#endif
148#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
149
150#ifdef CONFIG_SERIAL_8250_CONSOLE 148#ifdef CONFIG_SERIAL_8250_CONSOLE
151 base_addr = early_uart16550_console(); 149 case UART16550:
152 base_addr &= ~3; /* clear register offset */ 150 printk(KERN_INFO "Early console on uart16650 "
153 if (base_addr) { 151 "at 0x%08x\n", base_addr);
154 early_console_initialized = 1; 152 early_console = &early_serial_uart16550_console;
155#ifdef CONFIG_MMU 153 break;
156 early_console_reg_tlb_alloc(base_addr);
157#endif 154#endif
158 early_console = &early_serial_uart16550_console; 155 default:
159 156 printk(KERN_INFO "Unsupported early console %d\n",
160 early_printk("early_printk_console is enabled at 0x%08x\n", 157 version);
161 base_addr); 158 return 1;
162 159 }
163 /* register_console(early_console); */
164 160
161 register_console(early_console);
162 early_console_initialized = 1;
165 return 0; 163 return 0;
166 } 164 }
167#endif /* CONFIG_SERIAL_8250_CONSOLE */
168
169 return 1; 165 return 1;
170} 166}
171 167
168/* Remap early console to virtual address and do not allocate one TLB
169 * only for early console because of performance degression */
170void __init remap_early_printk(void)
171{
172 if (!early_console_initialized || !early_console)
173 return;
174 printk(KERN_INFO "early_printk_console remaping from 0x%x to ",
175 base_addr);
176 base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
177 printk(KERN_CONT "0x%x\n", base_addr);
178}
179
172void __init disable_early_printk(void) 180void __init disable_early_printk(void)
173{ 181{
174 if (!early_console_initialized || !early_console) 182 if (!early_console_initialized || !early_console)
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 56572e923a83..e62be8379604 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -1113,23 +1113,23 @@ lw_r10_vm: R3_TO_LWREG_VM_V (10);
1113lw_r11_vm: R3_TO_LWREG_VM_V (11); 1113lw_r11_vm: R3_TO_LWREG_VM_V (11);
1114lw_r12_vm: R3_TO_LWREG_VM_V (12); 1114lw_r12_vm: R3_TO_LWREG_VM_V (12);
1115lw_r13_vm: R3_TO_LWREG_VM_V (13); 1115lw_r13_vm: R3_TO_LWREG_VM_V (13);
1116lw_r14_vm: R3_TO_LWREG_VM (14); 1116lw_r14_vm: R3_TO_LWREG_VM_V (14);
1117lw_r15_vm: R3_TO_LWREG_VM_V (15); 1117lw_r15_vm: R3_TO_LWREG_VM_V (15);
1118lw_r16_vm: R3_TO_LWREG_VM (16); 1118lw_r16_vm: R3_TO_LWREG_VM_V (16);
1119lw_r17_vm: R3_TO_LWREG_VM_V (17); 1119lw_r17_vm: R3_TO_LWREG_VM_V (17);
1120lw_r18_vm: R3_TO_LWREG_VM_V (18); 1120lw_r18_vm: R3_TO_LWREG_VM_V (18);
1121lw_r19_vm: R3_TO_LWREG_VM (19); 1121lw_r19_vm: R3_TO_LWREG_VM_V (19);
1122lw_r20_vm: R3_TO_LWREG_VM (20); 1122lw_r20_vm: R3_TO_LWREG_VM_V (20);
1123lw_r21_vm: R3_TO_LWREG_VM (21); 1123lw_r21_vm: R3_TO_LWREG_VM_V (21);
1124lw_r22_vm: R3_TO_LWREG_VM (22); 1124lw_r22_vm: R3_TO_LWREG_VM_V (22);
1125lw_r23_vm: R3_TO_LWREG_VM (23); 1125lw_r23_vm: R3_TO_LWREG_VM_V (23);
1126lw_r24_vm: R3_TO_LWREG_VM (24); 1126lw_r24_vm: R3_TO_LWREG_VM_V (24);
1127lw_r25_vm: R3_TO_LWREG_VM (25); 1127lw_r25_vm: R3_TO_LWREG_VM_V (25);
1128lw_r26_vm: R3_TO_LWREG_VM (26); 1128lw_r26_vm: R3_TO_LWREG_VM_V (26);
1129lw_r27_vm: R3_TO_LWREG_VM (27); 1129lw_r27_vm: R3_TO_LWREG_VM_V (27);
1130lw_r28_vm: R3_TO_LWREG_VM (28); 1130lw_r28_vm: R3_TO_LWREG_VM_V (28);
1131lw_r29_vm: R3_TO_LWREG_VM (29); 1131lw_r29_vm: R3_TO_LWREG_VM_V (29);
1132lw_r30_vm: R3_TO_LWREG_VM (30); 1132lw_r30_vm: R3_TO_LWREG_VM_V (30);
1133lw_r31_vm: R3_TO_LWREG_VM_V (31); 1133lw_r31_vm: R3_TO_LWREG_VM_V (31);
1134 1134
1135sw_table_vm: 1135sw_table_vm:
@@ -1147,23 +1147,23 @@ sw_r10_vm: SWREG_TO_R3_VM_V (10);
1147sw_r11_vm: SWREG_TO_R3_VM_V (11); 1147sw_r11_vm: SWREG_TO_R3_VM_V (11);
1148sw_r12_vm: SWREG_TO_R3_VM_V (12); 1148sw_r12_vm: SWREG_TO_R3_VM_V (12);
1149sw_r13_vm: SWREG_TO_R3_VM_V (13); 1149sw_r13_vm: SWREG_TO_R3_VM_V (13);
1150sw_r14_vm: SWREG_TO_R3_VM (14); 1150sw_r14_vm: SWREG_TO_R3_VM_V (14);
1151sw_r15_vm: SWREG_TO_R3_VM_V (15); 1151sw_r15_vm: SWREG_TO_R3_VM_V (15);
1152sw_r16_vm: SWREG_TO_R3_VM (16); 1152sw_r16_vm: SWREG_TO_R3_VM_V (16);
1153sw_r17_vm: SWREG_TO_R3_VM_V (17); 1153sw_r17_vm: SWREG_TO_R3_VM_V (17);
1154sw_r18_vm: SWREG_TO_R3_VM_V (18); 1154sw_r18_vm: SWREG_TO_R3_VM_V (18);
1155sw_r19_vm: SWREG_TO_R3_VM (19); 1155sw_r19_vm: SWREG_TO_R3_VM_V (19);
1156sw_r20_vm: SWREG_TO_R3_VM (20); 1156sw_r20_vm: SWREG_TO_R3_VM_V (20);
1157sw_r21_vm: SWREG_TO_R3_VM (21); 1157sw_r21_vm: SWREG_TO_R3_VM_V (21);
1158sw_r22_vm: SWREG_TO_R3_VM (22); 1158sw_r22_vm: SWREG_TO_R3_VM_V (22);
1159sw_r23_vm: SWREG_TO_R3_VM (23); 1159sw_r23_vm: SWREG_TO_R3_VM_V (23);
1160sw_r24_vm: SWREG_TO_R3_VM (24); 1160sw_r24_vm: SWREG_TO_R3_VM_V (24);
1161sw_r25_vm: SWREG_TO_R3_VM (25); 1161sw_r25_vm: SWREG_TO_R3_VM_V (25);
1162sw_r26_vm: SWREG_TO_R3_VM (26); 1162sw_r26_vm: SWREG_TO_R3_VM_V (26);
1163sw_r27_vm: SWREG_TO_R3_VM (27); 1163sw_r27_vm: SWREG_TO_R3_VM_V (27);
1164sw_r28_vm: SWREG_TO_R3_VM (28); 1164sw_r28_vm: SWREG_TO_R3_VM_V (28);
1165sw_r29_vm: SWREG_TO_R3_VM (29); 1165sw_r29_vm: SWREG_TO_R3_VM_V (29);
1166sw_r30_vm: SWREG_TO_R3_VM (30); 1166sw_r30_vm: SWREG_TO_R3_VM_V (30);
1167sw_r31_vm: SWREG_TO_R3_VM_V (31); 1167sw_r31_vm: SWREG_TO_R3_VM_V (31);
1168#endif /* CONFIG_MMU */ 1168#endif /* CONFIG_MMU */
1169 1169
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index c88f066f41bd..eb41441c7fd0 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -134,7 +134,7 @@ void __init init_IRQ(void)
134 intr_type = 134 intr_type =
135 be32_to_cpup(of_get_property(intc, 135 be32_to_cpup(of_get_property(intc,
136 "xlnx,kind-of-intr", NULL)); 136 "xlnx,kind-of-intr", NULL));
137 if (intr_type >= (1 << (nr_irq + 1))) 137 if (intr_type > (u32)((1ULL << nr_irq) - 1))
138 printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n"); 138 printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
139 139
140#ifdef CONFIG_SELFMOD_INTC 140#ifdef CONFIG_SELFMOD_INTC
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 968648a81c1e..dbb812421d8a 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -237,7 +237,6 @@ unsigned long get_wchan(struct task_struct *p)
237/* Set up a thread for executing a new program */ 237/* Set up a thread for executing a new program */
238void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) 238void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
239{ 239{
240 set_fs(USER_DS);
241 regs->pc = pc; 240 regs->pc = pc;
242 regs->r1 = usp; 241 regs->r1 = usp;
243 regs->pt_mode = 0; 242 regs->pt_mode = 0;
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index b15cc219b1d9..977484add216 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -53,69 +53,58 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
53} 53}
54 54
55#ifdef CONFIG_EARLY_PRINTK 55#ifdef CONFIG_EARLY_PRINTK
56/* MS this is Microblaze specifig function */ 56char *stdout;
57static int __init early_init_dt_scan_serial(unsigned long node,
58 const char *uname, int depth, void *data)
59{
60 unsigned long l;
61 char *p;
62 const __be32 *addr;
63
64 pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname);
65
66/* find all serial nodes */
67 if (strncmp(uname, "serial", 6) != 0)
68 return 0;
69
70/* find compatible node with uartlite */
71 p = of_get_flat_dt_prop(node, "compatible", &l);
72 if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
73 (strncmp(p, "xlnx,opb-uartlite", 17) != 0) &&
74 (strncmp(p, "xlnx,axi-uartlite", 17) != 0))
75 return 0;
76
77 addr = of_get_flat_dt_prop(node, "reg", &l);
78 return be32_to_cpup(addr); /* return address */
79}
80 57
81/* this function is looking for early uartlite console - Microblaze specific */ 58int __init early_init_dt_scan_chosen_serial(unsigned long node,
82int __init early_uartlite_console(void)
83{
84 return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
85}
86
87/* MS this is Microblaze specifig function */
88static int __init early_init_dt_scan_serial_full(unsigned long node,
89 const char *uname, int depth, void *data) 59 const char *uname, int depth, void *data)
90{ 60{
91 unsigned long l; 61 unsigned long l;
92 char *p; 62 char *p;
93 unsigned int addr;
94
95 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
96
97/* find all serial nodes */
98 if (strncmp(uname, "serial", 6) != 0)
99 return 0;
100 63
101 early_init_dt_check_for_initrd(node); 64 pr_debug("%s: depth: %d, uname: %s\n", __func__, depth, uname);
102 65
103/* find compatible node with uartlite */ 66 if (depth == 1 && (strcmp(uname, "chosen") == 0 ||
104 p = of_get_flat_dt_prop(node, "compatible", &l); 67 strcmp(uname, "chosen@0") == 0)) {
105 68 p = of_get_flat_dt_prop(node, "linux,stdout-path", &l);
106 if ((strncmp(p, "xlnx,xps-uart16550", 18) != 0) && 69 if (p != NULL && l > 0)
107 (strncmp(p, "xlnx,axi-uart16550", 18) != 0)) 70 stdout = p; /* store pointer to stdout-path */
108 return 0; 71 }
109 72
110 addr = *(u32 *)of_get_flat_dt_prop(node, "reg", &l); 73 if (stdout && strstr(stdout, uname)) {
111 addr += *(u32 *)of_get_flat_dt_prop(node, "reg-offset", &l); 74 p = of_get_flat_dt_prop(node, "compatible", &l);
112 return be32_to_cpu(addr); /* return address */ 75 pr_debug("Compatible string: %s\n", p);
76
77 if ((strncmp(p, "xlnx,xps-uart16550", 18) == 0) ||
78 (strncmp(p, "xlnx,axi-uart16550", 18) == 0)) {
79 unsigned int addr;
80
81 *(u32 *)data = UART16550;
82
83 addr = *(u32 *)of_get_flat_dt_prop(node, "reg", &l);
84 addr += *(u32 *)of_get_flat_dt_prop(node,
85 "reg-offset", &l);
86 /* clear register offset */
87 return be32_to_cpu(addr) & ~3;
88 }
89 if ((strncmp(p, "xlnx,xps-uartlite", 17) == 0) ||
90 (strncmp(p, "xlnx,opb-uartlite", 17) == 0) ||
91 (strncmp(p, "xlnx,axi-uartlite", 17) == 0) ||
92 (strncmp(p, "xlnx,mdm", 8) == 0)) {
93 unsigned int *addrp;
94
95 *(u32 *)data = UARTLITE;
96
97 addrp = of_get_flat_dt_prop(node, "reg", &l);
98 return be32_to_cpup(addrp); /* return address */
99 }
100 }
101 return 0;
113} 102}
114 103
115/* this function is looking for early uartlite console - Microblaze specific */ 104/* this function is looking for early console - Microblaze specific */
116int __init early_uart16550_console(void) 105int __init of_early_console(void *version)
117{ 106{
118 return of_scan_flat_dt(early_init_dt_scan_serial_full, NULL); 107 return of_scan_flat_dt(early_init_dt_scan_chosen_serial, version);
119} 108}
120#endif 109#endif
121 110
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 8e2c09b7ff26..0e654a12d37e 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -59,6 +59,11 @@ void __init setup_arch(char **cmdline_p)
59 59
60 setup_memory(); 60 setup_memory();
61 61
62#ifdef CONFIG_EARLY_PRINTK
63 /* remap early console to virtual address */
64 remap_early_printk();
65#endif
66
62 xilinx_pci_init(); 67 xilinx_pci_init();
63 68
64#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 69#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 1a371f8ae0b0..8600eb2461b5 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -55,7 +55,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
55} 55}
56EXPORT_SYMBOL(atomic_cmpxchg); 56EXPORT_SYMBOL(atomic_cmpxchg);
57 57
58int atomic_add_unless(atomic_t *v, int a, int u) 58int __atomic_add_unless(atomic_t *v, int a, int u)
59{ 59{
60 int ret; 60 int ret;
61 unsigned long flags; 61 unsigned long flags;
@@ -67,7 +67,7 @@ int atomic_add_unless(atomic_t *v, int a, int u)
67 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 67 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
68 return ret != u; 68 return ret != u;
69} 69}
70EXPORT_SYMBOL(atomic_add_unless); 70EXPORT_SYMBOL(__atomic_add_unless);
71 71
72/* Atomic operations are already serializing */ 72/* Atomic operations are already serializing */
73void atomic_set(atomic_t *v, int i) 73void atomic_set(atomic_t *v, int i)
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 95a08a8ca8aa..5745b7fe158c 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -271,7 +271,7 @@ int iser_send_command(struct iscsi_conn *conn,
271 unsigned long edtl; 271 unsigned long edtl;
272 int err; 272 int err;
273 struct iser_data_buf *data_buf; 273 struct iser_data_buf *data_buf;
274 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; 274 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
275 struct scsi_cmnd *sc = task->sc; 275 struct scsi_cmnd *sc = task->sc;
276 struct iser_tx_desc *tx_desc = &iser_task->desc; 276 struct iser_tx_desc *tx_desc = &iser_task->desc;
277 277
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 56abf3d0e911..d72887585a14 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -154,10 +154,13 @@ static const struct xpad_device {
154 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, 154 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
155 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, 155 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
156 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 156 { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
157 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
158 { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
157 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, 159 { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
158 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 160 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
159 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 161 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
160 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, 162 { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
163 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
161 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 164 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
162 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 165 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
163 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 166 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -236,9 +239,10 @@ static struct usb_device_id xpad_table [] = {
236 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 239 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
237 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 240 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
238 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 241 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
242 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
239 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 243 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
240 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 244 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
241 XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */ 245 XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
242 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ 246 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
243 { } 247 { }
244}; 248};
@@ -545,7 +549,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
545 struct usb_endpoint_descriptor *ep_irq_out; 549 struct usb_endpoint_descriptor *ep_irq_out;
546 int error; 550 int error;
547 551
548 if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX) 552 if (xpad->xtype == XTYPE_UNKNOWN)
549 return 0; 553 return 0;
550 554
551 xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN, 555 xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
@@ -579,13 +583,13 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
579 583
580static void xpad_stop_output(struct usb_xpad *xpad) 584static void xpad_stop_output(struct usb_xpad *xpad)
581{ 585{
582 if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) 586 if (xpad->xtype != XTYPE_UNKNOWN)
583 usb_kill_urb(xpad->irq_out); 587 usb_kill_urb(xpad->irq_out);
584} 588}
585 589
586static void xpad_deinit_output(struct usb_xpad *xpad) 590static void xpad_deinit_output(struct usb_xpad *xpad)
587{ 591{
588 if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) { 592 if (xpad->xtype != XTYPE_UNKNOWN) {
589 usb_free_urb(xpad->irq_out); 593 usb_free_urb(xpad->irq_out);
590 usb_free_coherent(xpad->udev, XPAD_PKT_LEN, 594 usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
591 xpad->odata, xpad->odata_dma); 595 xpad->odata, xpad->odata_dma);
@@ -632,6 +636,23 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
632 636
633 return usb_submit_urb(xpad->irq_out, GFP_ATOMIC); 637 return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
634 638
639 case XTYPE_XBOX360W:
640 xpad->odata[0] = 0x00;
641 xpad->odata[1] = 0x01;
642 xpad->odata[2] = 0x0F;
643 xpad->odata[3] = 0xC0;
644 xpad->odata[4] = 0x00;
645 xpad->odata[5] = strong / 256;
646 xpad->odata[6] = weak / 256;
647 xpad->odata[7] = 0x00;
648 xpad->odata[8] = 0x00;
649 xpad->odata[9] = 0x00;
650 xpad->odata[10] = 0x00;
651 xpad->odata[11] = 0x00;
652 xpad->irq_out->transfer_buffer_length = 12;
653
654 return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
655
635 default: 656 default:
636 dbg("%s - rumble command sent to unsupported xpad type: %d", 657 dbg("%s - rumble command sent to unsupported xpad type: %d",
637 __func__, xpad->xtype); 658 __func__, xpad->xtype);
@@ -644,7 +665,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
644 665
645static int xpad_init_ff(struct usb_xpad *xpad) 666static int xpad_init_ff(struct usb_xpad *xpad)
646{ 667{
647 if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX) 668 if (xpad->xtype == XTYPE_UNKNOWN)
648 return 0; 669 return 0;
649 670
650 input_set_capability(xpad->dev, EV_FF, FF_RUMBLE); 671 input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index af45d275f686..7b404e5443ed 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/version.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
15#include <linux/irq.h> 14#include <linux/irq.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 631598663aab..c7708263051b 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/version.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/interrupt.h> 12#include <linux/interrupt.h>
14#include <linux/irq.h> 13#include <linux/irq.h>
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 11478eb2c27d..19cfc0cf558c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1578,14 +1578,14 @@ static int __init atkbd_setup_forced_release(const struct dmi_system_id *id)
1578 atkbd_platform_fixup = atkbd_apply_forced_release_keylist; 1578 atkbd_platform_fixup = atkbd_apply_forced_release_keylist;
1579 atkbd_platform_fixup_data = id->driver_data; 1579 atkbd_platform_fixup_data = id->driver_data;
1580 1580
1581 return 0; 1581 return 1;
1582} 1582}
1583 1583
1584static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id) 1584static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
1585{ 1585{
1586 atkbd_platform_scancode_fixup = id->driver_data; 1586 atkbd_platform_scancode_fixup = id->driver_data;
1587 1587
1588 return 0; 1588 return 1;
1589} 1589}
1590 1590
1591static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { 1591static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6e6145b9a4c1..ce281d152275 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -2,6 +2,7 @@
2 * Driver for keys on GPIO lines capable of generating interrupts. 2 * Driver for keys on GPIO lines capable of generating interrupts.
3 * 3 *
4 * Copyright 2005 Phil Blundell 4 * Copyright 2005 Phil Blundell
5 * Copyright 2010, 2011 David Jander <david@protonic.nl>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -25,6 +26,8 @@
25#include <linux/gpio_keys.h> 26#include <linux/gpio_keys.h>
26#include <linux/workqueue.h> 27#include <linux/workqueue.h>
27#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_platform.h>
30#include <linux/of_gpio.h>
28 31
29struct gpio_button_data { 32struct gpio_button_data {
30 struct gpio_keys_button *button; 33 struct gpio_keys_button *button;
@@ -415,7 +418,7 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
415 if (!button->can_disable) 418 if (!button->can_disable)
416 irqflags |= IRQF_SHARED; 419 irqflags |= IRQF_SHARED;
417 420
418 error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata); 421 error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata);
419 if (error < 0) { 422 if (error < 0) {
420 dev_err(dev, "Unable to claim irq %d; error %d\n", 423 dev_err(dev, "Unable to claim irq %d; error %d\n",
421 irq, error); 424 irq, error);
@@ -445,15 +448,120 @@ static void gpio_keys_close(struct input_dev *input)
445 ddata->disable(input->dev.parent); 448 ddata->disable(input->dev.parent);
446} 449}
447 450
451/*
452 * Handlers for alternative sources of platform_data
453 */
454#ifdef CONFIG_OF
455/*
456 * Translate OpenFirmware node properties into platform_data
457 */
458static int gpio_keys_get_devtree_pdata(struct device *dev,
459 struct gpio_keys_platform_data *pdata)
460{
461 struct device_node *node, *pp;
462 int i;
463 struct gpio_keys_button *buttons;
464 const u32 *reg;
465 int len;
466
467 node = dev->of_node;
468 if (node == NULL)
469 return -ENODEV;
470
471 memset(pdata, 0, sizeof *pdata);
472
473 pdata->rep = !!of_get_property(node, "autorepeat", &len);
474
475 /* First count the subnodes */
476 pdata->nbuttons = 0;
477 pp = NULL;
478 while ((pp = of_get_next_child(node, pp)))
479 pdata->nbuttons++;
480
481 if (pdata->nbuttons == 0)
482 return -ENODEV;
483
484 buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL);
485 if (!buttons)
486 return -ENODEV;
487
488 pp = NULL;
489 i = 0;
490 while ((pp = of_get_next_child(node, pp))) {
491 enum of_gpio_flags flags;
492
493 if (!of_find_property(pp, "gpios", NULL)) {
494 pdata->nbuttons--;
495 dev_warn(dev, "Found button without gpios\n");
496 continue;
497 }
498 buttons[i].gpio = of_get_gpio_flags(pp, 0, &flags);
499 buttons[i].active_low = flags & OF_GPIO_ACTIVE_LOW;
500
501 reg = of_get_property(pp, "linux,code", &len);
502 if (!reg) {
503 dev_err(dev, "Button without keycode: 0x%x\n", buttons[i].gpio);
504 goto out_fail;
505 }
506 buttons[i].code = be32_to_cpup(reg);
507
508 buttons[i].desc = of_get_property(pp, "label", &len);
509
510 reg = of_get_property(pp, "linux,input-type", &len);
511 buttons[i].type = reg ? be32_to_cpup(reg) : EV_KEY;
512
513 buttons[i].wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
514
515 reg = of_get_property(pp, "debounce-interval", &len);
516 buttons[i].debounce_interval = reg ? be32_to_cpup(reg) : 5;
517
518 i++;
519 }
520
521 pdata->buttons = buttons;
522
523 return 0;
524
525out_fail:
526 kfree(buttons);
527 return -ENODEV;
528}
529
530static struct of_device_id gpio_keys_of_match[] = {
531 { .compatible = "gpio-keys", },
532 { },
533};
534MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
535
536#else
537
538static int gpio_keys_get_devtree_pdata(struct device *dev,
539 struct gpio_keys_platform_data *altp)
540{
541 return -ENODEV;
542}
543
544#define gpio_keys_of_match NULL
545
546#endif
547
448static int __devinit gpio_keys_probe(struct platform_device *pdev) 548static int __devinit gpio_keys_probe(struct platform_device *pdev)
449{ 549{
450 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; 550 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
451 struct gpio_keys_drvdata *ddata; 551 struct gpio_keys_drvdata *ddata;
452 struct device *dev = &pdev->dev; 552 struct device *dev = &pdev->dev;
553 struct gpio_keys_platform_data alt_pdata;
453 struct input_dev *input; 554 struct input_dev *input;
454 int i, error; 555 int i, error;
455 int wakeup = 0; 556 int wakeup = 0;
456 557
558 if (!pdata) {
559 error = gpio_keys_get_devtree_pdata(dev, &alt_pdata);
560 if (error)
561 return error;
562 pdata = &alt_pdata;
563 }
564
457 ddata = kzalloc(sizeof(struct gpio_keys_drvdata) + 565 ddata = kzalloc(sizeof(struct gpio_keys_drvdata) +
458 pdata->nbuttons * sizeof(struct gpio_button_data), 566 pdata->nbuttons * sizeof(struct gpio_button_data),
459 GFP_KERNEL); 567 GFP_KERNEL);
@@ -544,13 +652,15 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
544 fail1: 652 fail1:
545 input_free_device(input); 653 input_free_device(input);
546 kfree(ddata); 654 kfree(ddata);
655 /* If we have no platform_data, we allocated buttons dynamically. */
656 if (!pdev->dev.platform_data)
657 kfree(pdata->buttons);
547 658
548 return error; 659 return error;
549} 660}
550 661
551static int __devexit gpio_keys_remove(struct platform_device *pdev) 662static int __devexit gpio_keys_remove(struct platform_device *pdev)
552{ 663{
553 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
554 struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev); 664 struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
555 struct input_dev *input = ddata->input; 665 struct input_dev *input = ddata->input;
556 int i; 666 int i;
@@ -559,31 +669,39 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
559 669
560 device_init_wakeup(&pdev->dev, 0); 670 device_init_wakeup(&pdev->dev, 0);
561 671
562 for (i = 0; i < pdata->nbuttons; i++) { 672 for (i = 0; i < ddata->n_buttons; i++) {
563 int irq = gpio_to_irq(pdata->buttons[i].gpio); 673 int irq = gpio_to_irq(ddata->data[i].button->gpio);
564 free_irq(irq, &ddata->data[i]); 674 free_irq(irq, &ddata->data[i]);
565 if (ddata->data[i].timer_debounce) 675 if (ddata->data[i].timer_debounce)
566 del_timer_sync(&ddata->data[i].timer); 676 del_timer_sync(&ddata->data[i].timer);
567 cancel_work_sync(&ddata->data[i].work); 677 cancel_work_sync(&ddata->data[i].work);
568 gpio_free(pdata->buttons[i].gpio); 678 gpio_free(ddata->data[i].button->gpio);
569 } 679 }
570 680
571 input_unregister_device(input); 681 input_unregister_device(input);
572 682
683 /*
684 * If we had no platform_data, we allocated buttons dynamically, and
685 * must free them here. ddata->data[0].button is the pointer to the
686 * beginning of the allocated array.
687 */
688 if (!pdev->dev.platform_data)
689 kfree(ddata->data[0].button);
690
691 kfree(ddata);
692
573 return 0; 693 return 0;
574} 694}
575 695
576 696#ifdef CONFIG_PM_SLEEP
577#ifdef CONFIG_PM
578static int gpio_keys_suspend(struct device *dev) 697static int gpio_keys_suspend(struct device *dev)
579{ 698{
580 struct platform_device *pdev = to_platform_device(dev); 699 struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
581 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
582 int i; 700 int i;
583 701
584 if (device_may_wakeup(&pdev->dev)) { 702 if (device_may_wakeup(dev)) {
585 for (i = 0; i < pdata->nbuttons; i++) { 703 for (i = 0; i < ddata->n_buttons; i++) {
586 struct gpio_keys_button *button = &pdata->buttons[i]; 704 struct gpio_keys_button *button = ddata->data[i].button;
587 if (button->wakeup) { 705 if (button->wakeup) {
588 int irq = gpio_to_irq(button->gpio); 706 int irq = gpio_to_irq(button->gpio);
589 enable_irq_wake(irq); 707 enable_irq_wake(irq);
@@ -596,15 +714,13 @@ static int gpio_keys_suspend(struct device *dev)
596 714
597static int gpio_keys_resume(struct device *dev) 715static int gpio_keys_resume(struct device *dev)
598{ 716{
599 struct platform_device *pdev = to_platform_device(dev); 717 struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
600 struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
601 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
602 int i; 718 int i;
603 719
604 for (i = 0; i < pdata->nbuttons; i++) { 720 for (i = 0; i < ddata->n_buttons; i++) {
605 721
606 struct gpio_keys_button *button = &pdata->buttons[i]; 722 struct gpio_keys_button *button = ddata->data[i].button;
607 if (button->wakeup && device_may_wakeup(&pdev->dev)) { 723 if (button->wakeup && device_may_wakeup(dev)) {
608 int irq = gpio_to_irq(button->gpio); 724 int irq = gpio_to_irq(button->gpio);
609 disable_irq_wake(irq); 725 disable_irq_wake(irq);
610 } 726 }
@@ -615,22 +731,18 @@ static int gpio_keys_resume(struct device *dev)
615 731
616 return 0; 732 return 0;
617} 733}
618
619static const struct dev_pm_ops gpio_keys_pm_ops = {
620 .suspend = gpio_keys_suspend,
621 .resume = gpio_keys_resume,
622};
623#endif 734#endif
624 735
736static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
737
625static struct platform_driver gpio_keys_device_driver = { 738static struct platform_driver gpio_keys_device_driver = {
626 .probe = gpio_keys_probe, 739 .probe = gpio_keys_probe,
627 .remove = __devexit_p(gpio_keys_remove), 740 .remove = __devexit_p(gpio_keys_remove),
628 .driver = { 741 .driver = {
629 .name = "gpio-keys", 742 .name = "gpio-keys",
630 .owner = THIS_MODULE, 743 .owner = THIS_MODULE,
631#ifdef CONFIG_PM
632 .pm = &gpio_keys_pm_ops, 744 .pm = &gpio_keys_pm_ops,
633#endif 745 .of_match_table = gpio_keys_of_match,
634 } 746 }
635}; 747};
636 748
@@ -644,10 +756,10 @@ static void __exit gpio_keys_exit(void)
644 platform_driver_unregister(&gpio_keys_device_driver); 756 platform_driver_unregister(&gpio_keys_device_driver);
645} 757}
646 758
647module_init(gpio_keys_init); 759late_initcall(gpio_keys_init);
648module_exit(gpio_keys_exit); 760module_exit(gpio_keys_exit);
649 761
650MODULE_LICENSE("GPL"); 762MODULE_LICENSE("GPL");
651MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>"); 763MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>");
652MODULE_DESCRIPTION("Keyboard driver for CPU GPIOs"); 764MODULE_DESCRIPTION("Keyboard driver for GPIOs");
653MODULE_ALIAS("platform:gpio-keys"); 765MODULE_ALIAS("platform:gpio-keys");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 71f744a8e686..ab0acaf7fe8f 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -146,7 +146,6 @@ struct lm8323_chip {
146 /* device lock */ 146 /* device lock */
147 struct mutex lock; 147 struct mutex lock;
148 struct i2c_client *client; 148 struct i2c_client *client;
149 struct work_struct work;
150 struct input_dev *idev; 149 struct input_dev *idev;
151 bool kp_enabled; 150 bool kp_enabled;
152 bool pm_suspend; 151 bool pm_suspend;
@@ -162,7 +161,6 @@ struct lm8323_chip {
162 161
163#define client_to_lm8323(c) container_of(c, struct lm8323_chip, client) 162#define client_to_lm8323(c) container_of(c, struct lm8323_chip, client)
164#define dev_to_lm8323(d) container_of(d, struct lm8323_chip, client->dev) 163#define dev_to_lm8323(d) container_of(d, struct lm8323_chip, client->dev)
165#define work_to_lm8323(w) container_of(w, struct lm8323_chip, work)
166#define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev) 164#define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev)
167#define work_to_pwm(w) container_of(w, struct lm8323_pwm, work) 165#define work_to_pwm(w) container_of(w, struct lm8323_pwm, work)
168 166
@@ -375,9 +373,9 @@ static void pwm_done(struct lm8323_pwm *pwm)
375 * Bottom half: handle the interrupt by posting key events, or dealing with 373 * Bottom half: handle the interrupt by posting key events, or dealing with
376 * errors appropriately. 374 * errors appropriately.
377 */ 375 */
378static void lm8323_work(struct work_struct *work) 376static irqreturn_t lm8323_irq(int irq, void *_lm)
379{ 377{
380 struct lm8323_chip *lm = work_to_lm8323(work); 378 struct lm8323_chip *lm = _lm;
381 u8 ints; 379 u8 ints;
382 int i; 380 int i;
383 381
@@ -409,16 +407,6 @@ static void lm8323_work(struct work_struct *work)
409 } 407 }
410 408
411 mutex_unlock(&lm->lock); 409 mutex_unlock(&lm->lock);
412}
413
414/*
415 * We cannot use I2C in interrupt context, so we just schedule work.
416 */
417static irqreturn_t lm8323_irq(int irq, void *data)
418{
419 struct lm8323_chip *lm = data;
420
421 schedule_work(&lm->work);
422 410
423 return IRQ_HANDLED; 411 return IRQ_HANDLED;
424} 412}
@@ -675,7 +663,6 @@ static int __devinit lm8323_probe(struct i2c_client *client,
675 lm->client = client; 663 lm->client = client;
676 lm->idev = idev; 664 lm->idev = idev;
677 mutex_init(&lm->lock); 665 mutex_init(&lm->lock);
678 INIT_WORK(&lm->work, lm8323_work);
679 666
680 lm->size_x = pdata->size_x; 667 lm->size_x = pdata->size_x;
681 lm->size_y = pdata->size_y; 668 lm->size_y = pdata->size_y;
@@ -746,9 +733,8 @@ static int __devinit lm8323_probe(struct i2c_client *client,
746 goto fail3; 733 goto fail3;
747 } 734 }
748 735
749 err = request_irq(client->irq, lm8323_irq, 736 err = request_threaded_irq(client->irq, NULL, lm8323_irq,
750 IRQF_TRIGGER_FALLING | IRQF_DISABLED, 737 IRQF_TRIGGER_LOW|IRQF_ONESHOT, "lm8323", lm);
751 "lm8323", lm);
752 if (err) { 738 if (err) {
753 dev_err(&client->dev, "could not get IRQ %d\n", client->irq); 739 dev_err(&client->dev, "could not get IRQ %d\n", client->irq);
754 goto fail4; 740 goto fail4;
@@ -783,7 +769,6 @@ static int __devexit lm8323_remove(struct i2c_client *client)
783 769
784 disable_irq_wake(client->irq); 770 disable_irq_wake(client->irq);
785 free_irq(client->irq, lm); 771 free_irq(client->irq, lm);
786 cancel_work_sync(&lm->work);
787 772
788 input_unregister_device(lm->idev); 773 input_unregister_device(lm->idev);
789 774
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 0a9e81194888..1c1615d9a7f9 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -43,14 +43,15 @@
43 * enabled capacitance sensing inputs and its run/suspend mode. 43 * enabled capacitance sensing inputs and its run/suspend mode.
44 */ 44 */
45#define ELECTRODE_CONF_ADDR 0x5e 45#define ELECTRODE_CONF_ADDR 0x5e
46#define ELECTRODE_CONF_QUICK_CHARGE 0x80
46#define AUTO_CONFIG_CTRL_ADDR 0x7b 47#define AUTO_CONFIG_CTRL_ADDR 0x7b
47#define AUTO_CONFIG_USL_ADDR 0x7d 48#define AUTO_CONFIG_USL_ADDR 0x7d
48#define AUTO_CONFIG_LSL_ADDR 0x7e 49#define AUTO_CONFIG_LSL_ADDR 0x7e
49#define AUTO_CONFIG_TL_ADDR 0x7f 50#define AUTO_CONFIG_TL_ADDR 0x7f
50 51
51/* Threshold of touch/release trigger */ 52/* Threshold of touch/release trigger */
52#define TOUCH_THRESHOLD 0x0f 53#define TOUCH_THRESHOLD 0x08
53#define RELEASE_THRESHOLD 0x0a 54#define RELEASE_THRESHOLD 0x05
54/* Masks for touch and release triggers */ 55/* Masks for touch and release triggers */
55#define TOUCH_STATUS_MASK 0xfff 56#define TOUCH_STATUS_MASK 0xfff
56/* MPR121 has 12 keys */ 57/* MPR121 has 12 keys */
@@ -127,7 +128,7 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
127 struct i2c_client *client) 128 struct i2c_client *client)
128{ 129{
129 const struct mpr121_init_register *reg; 130 const struct mpr121_init_register *reg;
130 unsigned char usl, lsl, tl; 131 unsigned char usl, lsl, tl, eleconf;
131 int i, t, vdd, ret; 132 int i, t, vdd, ret;
132 133
133 /* Set up touch/release threshold for ele0-ele11 */ 134 /* Set up touch/release threshold for ele0-ele11 */
@@ -163,8 +164,15 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
163 ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl); 164 ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl);
164 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl); 165 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl);
165 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl); 166 ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl);
167
168 /*
169 * Quick charge bit will let the capacitive charge to ready
170 * state quickly, or the buttons may not function after system
171 * boot.
172 */
173 eleconf = mpr121->keycount | ELECTRODE_CONF_QUICK_CHARGE;
166 ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, 174 ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR,
167 mpr121->keycount); 175 eleconf);
168 if (ret != 0) 176 if (ret != 0)
169 goto err_i2c_write; 177 goto err_i2c_write;
170 178
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 6229c3e8e78b..e7cc51d0fb34 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -700,9 +700,9 @@ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
700 return 0; 700 return 0;
701 701
702err_pmic_reg_read: 702err_pmic_reg_read:
703 free_irq(kp->key_stuck_irq, NULL); 703 free_irq(kp->key_stuck_irq, kp);
704err_req_stuck_irq: 704err_req_stuck_irq:
705 free_irq(kp->key_sense_irq, NULL); 705 free_irq(kp->key_sense_irq, kp);
706err_gpio_config: 706err_gpio_config:
707err_get_irq: 707err_get_irq:
708 input_free_device(kp->input); 708 input_free_device(kp->input);
@@ -717,8 +717,8 @@ static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
717 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); 717 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
718 718
719 device_init_wakeup(&pdev->dev, 0); 719 device_init_wakeup(&pdev->dev, 0);
720 free_irq(kp->key_stuck_irq, NULL); 720 free_irq(kp->key_stuck_irq, kp);
721 free_irq(kp->key_sense_irq, NULL); 721 free_irq(kp->key_sense_irq, kp);
722 input_unregister_device(kp->input); 722 input_unregister_device(kp->input);
723 kfree(kp); 723 kfree(kp);
724 724
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index ca7b89196ab7..b21bf5b876bb 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -239,8 +239,6 @@ static int __devexit qt1070_remove(struct i2c_client *client)
239 input_unregister_device(data->input); 239 input_unregister_device(data->input);
240 kfree(data); 240 kfree(data);
241 241
242 i2c_set_clientdata(client, NULL);
243
244 return 0; 242 return 0;
245} 243}
246 244
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 6876700a4469..934aeb583b30 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -291,7 +291,7 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev)
291 return 0; 291 return 0;
292} 292}
293 293
294#if CONFIG_PM_SLEEP 294#ifdef CONFIG_PM_SLEEP
295static int sh_keysc_suspend(struct device *dev) 295static int sh_keysc_suspend(struct device *dev)
296{ 296{
297 struct platform_device *pdev = to_platform_device(dev); 297 struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 2b3b73ec6689..da3828fc2c09 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -657,7 +657,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
657 657
658 input_set_drvdata(input_dev, kbc); 658 input_set_drvdata(input_dev, kbc);
659 659
660 input_dev->evbit[0] = BIT_MASK(EV_KEY); 660 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
661 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 661 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
662 662
663 input_dev->keycode = kbc->keycode; 663 input_dev->keycode = kbc->keycode;
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index c8f097a15d89..1c58681de81f 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -337,5 +337,5 @@ module_exit(keypad_exit);
337 337
338MODULE_AUTHOR("Cyril Chemparathy"); 338MODULE_AUTHOR("Cyril Chemparathy");
339MODULE_DESCRIPTION("TNETV107X Keypad Driver"); 339MODULE_DESCRIPTION("TNETV107X Keypad Driver");
340MODULE_ALIAS("platform: tnetv107x-keypad"); 340MODULE_ALIAS("platform:tnetv107x-keypad");
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d1bf8724b58f..c9104bb4db06 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -100,6 +100,27 @@ config INPUT_MAX8925_ONKEY
100 To compile this driver as a module, choose M here: the module 100 To compile this driver as a module, choose M here: the module
101 will be called max8925_onkey. 101 will be called max8925_onkey.
102 102
103config INPUT_MMA8450
104 tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer"
105 depends on I2C
106 select INPUT_POLLDEV
107 help
108 Say Y here if you want to support Freescale's MMA8450 Accelerometer
109 through I2C interface.
110
111 To compile this driver as a module, choose M here: the
112 module will be called mma8450.
113
114config INPUT_MPU3050
115 tristate "MPU3050 Triaxial gyroscope sensor"
116 depends on I2C
117 help
118 Say Y here if you want to support InvenSense MPU3050
119 connected via an I2C bus.
120
121 To compile this driver as a module, choose M here: the
122 module will be called mpu3050.
123
103config INPUT_APANEL 124config INPUT_APANEL
104 tristate "Fujitsu Lifebook Application Panel buttons" 125 tristate "Fujitsu Lifebook Application Panel buttons"
105 depends on X86 && I2C && LEDS_CLASS 126 depends on X86 && I2C && LEDS_CLASS
@@ -209,6 +230,23 @@ config INPUT_KEYSPAN_REMOTE
209 To compile this driver as a module, choose M here: the module will 230 To compile this driver as a module, choose M here: the module will
210 be called keyspan_remote. 231 be called keyspan_remote.
211 232
233config INPUT_KXTJ9
234 tristate "Kionix KXTJ9 tri-axis digital accelerometer"
235 depends on I2C
236 help
237 Say Y here to enable support for the Kionix KXTJ9 digital tri-axis
238 accelerometer.
239
240 To compile this driver as a module, choose M here: the module will
241 be called kxtj9.
242
243config INPUT_KXTJ9_POLLED_MODE
244 bool "Enable polling mode support"
245 depends on INPUT_KXTJ9
246 select INPUT_POLLDEV
247 help
248 Say Y here if you need accelerometer to work in polling mode.
249
212config INPUT_POWERMATE 250config INPUT_POWERMATE
213 tristate "Griffin PowerMate and Contour Jog support" 251 tristate "Griffin PowerMate and Contour Jog support"
214 depends on USB_ARCH_HAS_HCD 252 depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4da7c3a60e04..299ad5edba84 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -25,8 +25,11 @@ obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
25obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o 25obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
26obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o 26obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
27obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 27obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
28obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
28obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 29obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
29obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o 30obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
31obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
32obj-$(CONFIG_INPUT_MPU3050) += mpu3050.o
30obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o 33obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
31obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 34obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
32obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o 35obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
@@ -46,4 +49,3 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
46obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o 49obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
47obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o 50obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
48obj-$(CONFIG_INPUT_YEALINK) += yealink.o 51obj-$(CONFIG_INPUT_YEALINK) += yealink.o
49
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 4f72bdd69410..d00edc9f39d1 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -6,7 +6,6 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/version.h>
10#include <linux/init.h> 9#include <linux/init.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/irq.h> 11#include <linux/irq.h>
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
new file mode 100644
index 000000000000..c456f63b6bae
--- /dev/null
+++ b/drivers/input/misc/kxtj9.c
@@ -0,0 +1,671 @@
1/*
2 * Copyright (C) 2011 Kionix, Inc.
3 * Written by Chris Hudson <chudson@kionix.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA
18 */
19
20#include <linux/delay.h>
21#include <linux/i2c.h>
22#include <linux/input.h>
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <linux/input/kxtj9.h>
26#include <linux/input-polldev.h>
27
28#define NAME "kxtj9"
29#define G_MAX 8000
30/* OUTPUT REGISTERS */
31#define XOUT_L 0x06
32#define WHO_AM_I 0x0F
33/* CONTROL REGISTERS */
34#define INT_REL 0x1A
35#define CTRL_REG1 0x1B
36#define INT_CTRL1 0x1E
37#define DATA_CTRL 0x21
38/* CONTROL REGISTER 1 BITS */
39#define PC1_OFF 0x7F
40#define PC1_ON (1 << 7)
41/* Data ready funtion enable bit: set during probe if using irq mode */
42#define DRDYE (1 << 5)
43/* INTERRUPT CONTROL REGISTER 1 BITS */
44/* Set these during probe if using irq mode */
45#define KXTJ9_IEL (1 << 3)
46#define KXTJ9_IEA (1 << 4)
47#define KXTJ9_IEN (1 << 5)
48/* INPUT_ABS CONSTANTS */
49#define FUZZ 3
50#define FLAT 3
51/* RESUME STATE INDICES */
52#define RES_DATA_CTRL 0
53#define RES_CTRL_REG1 1
54#define RES_INT_CTRL1 2
55#define RESUME_ENTRIES 3
56
57/*
58 * The following table lists the maximum appropriate poll interval for each
59 * available output data rate.
60 */
61static const struct {
62 unsigned int cutoff;
63 u8 mask;
64} kxtj9_odr_table[] = {
65 { 3, ODR800F },
66 { 5, ODR400F },
67 { 10, ODR200F },
68 { 20, ODR100F },
69 { 40, ODR50F },
70 { 80, ODR25F },
71 { 0, ODR12_5F},
72};
73
74struct kxtj9_data {
75 struct i2c_client *client;
76 struct kxtj9_platform_data pdata;
77 struct input_dev *input_dev;
78#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
79 struct input_polled_dev *poll_dev;
80#endif
81 unsigned int last_poll_interval;
82 u8 shift;
83 u8 ctrl_reg1;
84 u8 data_ctrl;
85 u8 int_ctrl;
86};
87
88static int kxtj9_i2c_read(struct kxtj9_data *tj9, u8 addr, u8 *data, int len)
89{
90 struct i2c_msg msgs[] = {
91 {
92 .addr = tj9->client->addr,
93 .flags = tj9->client->flags,
94 .len = 1,
95 .buf = &addr,
96 },
97 {
98 .addr = tj9->client->addr,
99 .flags = tj9->client->flags | I2C_M_RD,
100 .len = len,
101 .buf = data,
102 },
103 };
104
105 return i2c_transfer(tj9->client->adapter, msgs, 2);
106}
107
108static void kxtj9_report_acceleration_data(struct kxtj9_data *tj9)
109{
110 s16 acc_data[3]; /* Data bytes from hardware xL, xH, yL, yH, zL, zH */
111 s16 x, y, z;
112 int err;
113
114 err = kxtj9_i2c_read(tj9, XOUT_L, (u8 *)acc_data, 6);
115 if (err < 0)
116 dev_err(&tj9->client->dev, "accelerometer data read failed\n");
117
118 x = le16_to_cpu(acc_data[tj9->pdata.axis_map_x]) >> tj9->shift;
119 y = le16_to_cpu(acc_data[tj9->pdata.axis_map_y]) >> tj9->shift;
120 z = le16_to_cpu(acc_data[tj9->pdata.axis_map_z]) >> tj9->shift;
121
122 input_report_abs(tj9->input_dev, ABS_X, tj9->pdata.negate_x ? -x : x);
123 input_report_abs(tj9->input_dev, ABS_Y, tj9->pdata.negate_y ? -y : y);
124 input_report_abs(tj9->input_dev, ABS_Z, tj9->pdata.negate_z ? -z : z);
125 input_sync(tj9->input_dev);
126}
127
128static irqreturn_t kxtj9_isr(int irq, void *dev)
129{
130 struct kxtj9_data *tj9 = dev;
131 int err;
132
133 /* data ready is the only possible interrupt type */
134 kxtj9_report_acceleration_data(tj9);
135
136 err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
137 if (err < 0)
138 dev_err(&tj9->client->dev,
139 "error clearing interrupt status: %d\n", err);
140
141 return IRQ_HANDLED;
142}
143
144static int kxtj9_update_g_range(struct kxtj9_data *tj9, u8 new_g_range)
145{
146 switch (new_g_range) {
147 case KXTJ9_G_2G:
148 tj9->shift = 4;
149 break;
150 case KXTJ9_G_4G:
151 tj9->shift = 3;
152 break;
153 case KXTJ9_G_8G:
154 tj9->shift = 2;
155 break;
156 default:
157 return -EINVAL;
158 }
159
160 tj9->ctrl_reg1 &= 0xe7;
161 tj9->ctrl_reg1 |= new_g_range;
162
163 return 0;
164}
165
166static int kxtj9_update_odr(struct kxtj9_data *tj9, unsigned int poll_interval)
167{
168 int err;
169 int i;
170
171 /* Use the lowest ODR that can support the requested poll interval */
172 for (i = 0; i < ARRAY_SIZE(kxtj9_odr_table); i++) {
173 tj9->data_ctrl = kxtj9_odr_table[i].mask;
174 if (poll_interval < kxtj9_odr_table[i].cutoff)
175 break;
176 }
177
178 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
179 if (err < 0)
180 return err;
181
182 err = i2c_smbus_write_byte_data(tj9->client, DATA_CTRL, tj9->data_ctrl);
183 if (err < 0)
184 return err;
185
186 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
187 if (err < 0)
188 return err;
189
190 return 0;
191}
192
193static int kxtj9_device_power_on(struct kxtj9_data *tj9)
194{
195 if (tj9->pdata.power_on)
196 return tj9->pdata.power_on();
197
198 return 0;
199}
200
201static void kxtj9_device_power_off(struct kxtj9_data *tj9)
202{
203 int err;
204
205 tj9->ctrl_reg1 &= PC1_OFF;
206 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
207 if (err < 0)
208 dev_err(&tj9->client->dev, "soft power off failed\n");
209
210 if (tj9->pdata.power_off)
211 tj9->pdata.power_off();
212}
213
214static int kxtj9_enable(struct kxtj9_data *tj9)
215{
216 int err;
217
218 err = kxtj9_device_power_on(tj9);
219 if (err < 0)
220 return err;
221
222 /* ensure that PC1 is cleared before updating control registers */
223 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
224 if (err < 0)
225 return err;
226
227 /* only write INT_CTRL_REG1 if in irq mode */
228 if (tj9->client->irq) {
229 err = i2c_smbus_write_byte_data(tj9->client,
230 INT_CTRL1, tj9->int_ctrl);
231 if (err < 0)
232 return err;
233 }
234
235 err = kxtj9_update_g_range(tj9, tj9->pdata.g_range);
236 if (err < 0)
237 return err;
238
239 /* turn on outputs */
240 tj9->ctrl_reg1 |= PC1_ON;
241 err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
242 if (err < 0)
243 return err;
244
245 err = kxtj9_update_odr(tj9, tj9->last_poll_interval);
246 if (err < 0)
247 return err;
248
249 /* clear initial interrupt if in irq mode */
250 if (tj9->client->irq) {
251 err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
252 if (err < 0) {
253 dev_err(&tj9->client->dev,
254 "error clearing interrupt: %d\n", err);
255 goto fail;
256 }
257 }
258
259 return 0;
260
261fail:
262 kxtj9_device_power_off(tj9);
263 return err;
264}
265
266static void kxtj9_disable(struct kxtj9_data *tj9)
267{
268 kxtj9_device_power_off(tj9);
269}
270
271static int kxtj9_input_open(struct input_dev *input)
272{
273 struct kxtj9_data *tj9 = input_get_drvdata(input);
274
275 return kxtj9_enable(tj9);
276}
277
278static void kxtj9_input_close(struct input_dev *dev)
279{
280 struct kxtj9_data *tj9 = input_get_drvdata(dev);
281
282 kxtj9_disable(tj9);
283}
284
285static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
286 struct input_dev *input_dev)
287{
288 __set_bit(EV_ABS, input_dev->evbit);
289 input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
290 input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
291 input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
292
293 input_dev->name = "kxtj9_accel";
294 input_dev->id.bustype = BUS_I2C;
295 input_dev->dev.parent = &tj9->client->dev;
296}
297
298static int __devinit kxtj9_setup_input_device(struct kxtj9_data *tj9)
299{
300 struct input_dev *input_dev;
301 int err;
302
303 input_dev = input_allocate_device();
304 if (!input_dev) {
305 dev_err(&tj9->client->dev, "input device allocate failed\n");
306 return -ENOMEM;
307 }
308
309 tj9->input_dev = input_dev;
310
311 input_dev->open = kxtj9_input_open;
312 input_dev->close = kxtj9_input_close;
313 input_set_drvdata(input_dev, tj9);
314
315 kxtj9_init_input_device(tj9, input_dev);
316
317 err = input_register_device(tj9->input_dev);
318 if (err) {
319 dev_err(&tj9->client->dev,
320 "unable to register input polled device %s: %d\n",
321 tj9->input_dev->name, err);
322 input_free_device(tj9->input_dev);
323 return err;
324 }
325
326 return 0;
327}
328
329/*
330 * When IRQ mode is selected, we need to provide an interface to allow the user
331 * to change the output data rate of the part. For consistency, we are using
332 * the set_poll method, which accepts a poll interval in milliseconds, and then
333 * calls update_odr() while passing this value as an argument. In IRQ mode, the
334 * data outputs will not be read AT the requested poll interval, rather, the
335 * lowest ODR that can support the requested interval. The client application
336 * will be responsible for retrieving data from the input node at the desired
337 * interval.
338 */
339
340/* Returns currently selected poll interval (in ms) */
341static ssize_t kxtj9_get_poll(struct device *dev,
342 struct device_attribute *attr, char *buf)
343{
344 struct i2c_client *client = to_i2c_client(dev);
345 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
346
347 return sprintf(buf, "%d\n", tj9->last_poll_interval);
348}
349
350/* Allow users to select a new poll interval (in ms) */
351static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr,
352 const char *buf, size_t count)
353{
354 struct i2c_client *client = to_i2c_client(dev);
355 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
356 struct input_dev *input_dev = tj9->input_dev;
357 unsigned int interval;
358 int error;
359
360 error = kstrtouint(buf, 10, &interval);
361 if (error < 0)
362 return error;
363
364 /* Lock the device to prevent races with open/close (and itself) */
365 mutex_lock(&input_dev->mutex);
366
367 disable_irq(client->irq);
368
369 /*
370 * Set current interval to the greater of the minimum interval or
371 * the requested interval
372 */
373 tj9->last_poll_interval = max(interval, tj9->pdata.min_interval);
374
375 kxtj9_update_odr(tj9, tj9->last_poll_interval);
376
377 enable_irq(client->irq);
378 mutex_unlock(&input_dev->mutex);
379
380 return count;
381}
382
383static DEVICE_ATTR(poll, S_IRUGO|S_IWUSR, kxtj9_get_poll, kxtj9_set_poll);
384
385static struct attribute *kxtj9_attributes[] = {
386 &dev_attr_poll.attr,
387 NULL
388};
389
390static struct attribute_group kxtj9_attribute_group = {
391 .attrs = kxtj9_attributes
392};
393
394
395#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
396static void kxtj9_poll(struct input_polled_dev *dev)
397{
398 struct kxtj9_data *tj9 = dev->private;
399 unsigned int poll_interval = dev->poll_interval;
400
401 kxtj9_report_acceleration_data(tj9);
402
403 if (poll_interval != tj9->last_poll_interval) {
404 kxtj9_update_odr(tj9, poll_interval);
405 tj9->last_poll_interval = poll_interval;
406 }
407}
408
409static void kxtj9_polled_input_open(struct input_polled_dev *dev)
410{
411 struct kxtj9_data *tj9 = dev->private;
412
413 kxtj9_enable(tj9);
414}
415
416static void kxtj9_polled_input_close(struct input_polled_dev *dev)
417{
418 struct kxtj9_data *tj9 = dev->private;
419
420 kxtj9_disable(tj9);
421}
422
423static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
424{
425 int err;
426 struct input_polled_dev *poll_dev;
427 poll_dev = input_allocate_polled_device();
428
429 if (!poll_dev) {
430 dev_err(&tj9->client->dev,
431 "Failed to allocate polled device\n");
432 return -ENOMEM;
433 }
434
435 tj9->poll_dev = poll_dev;
436 tj9->input_dev = poll_dev->input;
437
438 poll_dev->private = tj9;
439 poll_dev->poll = kxtj9_poll;
440 poll_dev->open = kxtj9_polled_input_open;
441 poll_dev->close = kxtj9_polled_input_close;
442
443 kxtj9_init_input_device(tj9, poll_dev->input);
444
445 err = input_register_polled_device(poll_dev);
446 if (err) {
447 dev_err(&tj9->client->dev,
448 "Unable to register polled device, err=%d\n", err);
449 input_free_polled_device(poll_dev);
450 return err;
451 }
452
453 return 0;
454}
455
456static void __devexit kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
457{
458 input_unregister_polled_device(tj9->poll_dev);
459 input_free_polled_device(tj9->poll_dev);
460}
461
462#else
463
464static inline int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
465{
466 return -ENOSYS;
467}
468
469static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
470{
471}
472
473#endif
474
475static int __devinit kxtj9_verify(struct kxtj9_data *tj9)
476{
477 int retval;
478
479 retval = kxtj9_device_power_on(tj9);
480 if (retval < 0)
481 return retval;
482
483 retval = i2c_smbus_read_byte_data(tj9->client, WHO_AM_I);
484 if (retval < 0) {
485 dev_err(&tj9->client->dev, "read err int source\n");
486 goto out;
487 }
488
489 retval = retval != 0x06 ? -EIO : 0;
490
491out:
492 kxtj9_device_power_off(tj9);
493 return retval;
494}
495
496static int __devinit kxtj9_probe(struct i2c_client *client,
497 const struct i2c_device_id *id)
498{
499 const struct kxtj9_platform_data *pdata = client->dev.platform_data;
500 struct kxtj9_data *tj9;
501 int err;
502
503 if (!i2c_check_functionality(client->adapter,
504 I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA)) {
505 dev_err(&client->dev, "client is not i2c capable\n");
506 return -ENXIO;
507 }
508
509 if (!pdata) {
510 dev_err(&client->dev, "platform data is NULL; exiting\n");
511 return -EINVAL;
512 }
513
514 tj9 = kzalloc(sizeof(*tj9), GFP_KERNEL);
515 if (!tj9) {
516 dev_err(&client->dev,
517 "failed to allocate memory for module data\n");
518 return -ENOMEM;
519 }
520
521 tj9->client = client;
522 tj9->pdata = *pdata;
523
524 if (pdata->init) {
525 err = pdata->init();
526 if (err < 0)
527 goto err_free_mem;
528 }
529
530 err = kxtj9_verify(tj9);
531 if (err < 0) {
532 dev_err(&client->dev, "device not recognized\n");
533 goto err_pdata_exit;
534 }
535
536 i2c_set_clientdata(client, tj9);
537
538 tj9->ctrl_reg1 = tj9->pdata.res_12bit | tj9->pdata.g_range;
539 tj9->data_ctrl = tj9->pdata.data_odr_init;
540
541 if (client->irq) {
542 /* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
543 tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
544 tj9->ctrl_reg1 |= DRDYE;
545
546 err = kxtj9_setup_input_device(tj9);
547 if (err)
548 goto err_pdata_exit;
549
550 err = request_threaded_irq(client->irq, NULL, kxtj9_isr,
551 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
552 "kxtj9-irq", tj9);
553 if (err) {
554 dev_err(&client->dev, "request irq failed: %d\n", err);
555 goto err_destroy_input;
556 }
557
558 err = sysfs_create_group(&client->dev.kobj, &kxtj9_attribute_group);
559 if (err) {
560 dev_err(&client->dev, "sysfs create failed: %d\n", err);
561 goto err_free_irq;
562 }
563
564 } else {
565 err = kxtj9_setup_polled_device(tj9);
566 if (err)
567 goto err_pdata_exit;
568 }
569
570 return 0;
571
572err_free_irq:
573 free_irq(client->irq, tj9);
574err_destroy_input:
575 input_unregister_device(tj9->input_dev);
576err_pdata_exit:
577 if (tj9->pdata.exit)
578 tj9->pdata.exit();
579err_free_mem:
580 kfree(tj9);
581 return err;
582}
583
584static int __devexit kxtj9_remove(struct i2c_client *client)
585{
586 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
587
588 if (client->irq) {
589 sysfs_remove_group(&client->dev.kobj, &kxtj9_attribute_group);
590 free_irq(client->irq, tj9);
591 input_unregister_device(tj9->input_dev);
592 } else {
593 kxtj9_teardown_polled_device(tj9);
594 }
595
596 if (tj9->pdata.exit)
597 tj9->pdata.exit();
598
599 kfree(tj9);
600
601 return 0;
602}
603
604#ifdef CONFIG_PM_SLEEP
605static int kxtj9_suspend(struct device *dev)
606{
607 struct i2c_client *client = to_i2c_client(dev);
608 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
609 struct input_dev *input_dev = tj9->input_dev;
610
611 mutex_lock(&input_dev->mutex);
612
613 if (input_dev->users)
614 kxtj9_disable(tj9);
615
616 mutex_unlock(&input_dev->mutex);
617 return 0;
618}
619
620static int kxtj9_resume(struct device *dev)
621{
622 struct i2c_client *client = to_i2c_client(dev);
623 struct kxtj9_data *tj9 = i2c_get_clientdata(client);
624 struct input_dev *input_dev = tj9->input_dev;
625 int retval = 0;
626
627 mutex_lock(&input_dev->mutex);
628
629 if (input_dev->users)
630 kxtj9_enable(tj9);
631
632 mutex_unlock(&input_dev->mutex);
633 return retval;
634}
635#endif
636
637static SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
638
639static const struct i2c_device_id kxtj9_id[] = {
640 { NAME, 0 },
641 { },
642};
643
644MODULE_DEVICE_TABLE(i2c, kxtj9_id);
645
646static struct i2c_driver kxtj9_driver = {
647 .driver = {
648 .name = NAME,
649 .owner = THIS_MODULE,
650 .pm = &kxtj9_pm_ops,
651 },
652 .probe = kxtj9_probe,
653 .remove = __devexit_p(kxtj9_remove),
654 .id_table = kxtj9_id,
655};
656
657static int __init kxtj9_init(void)
658{
659 return i2c_add_driver(&kxtj9_driver);
660}
661module_init(kxtj9_init);
662
663static void __exit kxtj9_exit(void)
664{
665 i2c_del_driver(&kxtj9_driver);
666}
667module_exit(kxtj9_exit);
668
669MODULE_DESCRIPTION("KXTJ9 accelerometer driver");
670MODULE_AUTHOR("Chris Hudson <chudson@kionix.com>");
671MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
new file mode 100644
index 000000000000..20f8f9284f02
--- /dev/null
+++ b/drivers/input/misc/mma8450.c
@@ -0,0 +1,256 @@
1/*
2 * Driver for Freescale's 3-Axis Accelerometer MMA8450
3 *
4 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/i2c.h>
26#include <linux/input-polldev.h>
27
28#define MMA8450_DRV_NAME "mma8450"
29
30#define MODE_CHANGE_DELAY_MS 100
31#define POLL_INTERVAL 100
32#define POLL_INTERVAL_MAX 500
33
34/* register definitions */
35#define MMA8450_STATUS 0x00
36#define MMA8450_STATUS_ZXYDR 0x08
37
38#define MMA8450_OUT_X8 0x01
39#define MMA8450_OUT_Y8 0x02
40#define MMA8450_OUT_Z8 0x03
41
42#define MMA8450_OUT_X_LSB 0x05
43#define MMA8450_OUT_X_MSB 0x06
44#define MMA8450_OUT_Y_LSB 0x07
45#define MMA8450_OUT_Y_MSB 0x08
46#define MMA8450_OUT_Z_LSB 0x09
47#define MMA8450_OUT_Z_MSB 0x0a
48
49#define MMA8450_XYZ_DATA_CFG 0x16
50
51#define MMA8450_CTRL_REG1 0x38
52#define MMA8450_CTRL_REG2 0x39
53
54/* mma8450 status */
55struct mma8450 {
56 struct i2c_client *client;
57 struct input_polled_dev *idev;
58};
59
60static int mma8450_read(struct mma8450 *m, unsigned off)
61{
62 struct i2c_client *c = m->client;
63 int ret;
64
65 ret = i2c_smbus_read_byte_data(c, off);
66 if (ret < 0)
67 dev_err(&c->dev,
68 "failed to read register 0x%02x, error %d\n",
69 off, ret);
70
71 return ret;
72}
73
74static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
75{
76 struct i2c_client *c = m->client;
77 int error;
78
79 error = i2c_smbus_write_byte_data(c, off, v);
80 if (error < 0) {
81 dev_err(&c->dev,
82 "failed to write to register 0x%02x, error %d\n",
83 off, error);
84 return error;
85 }
86
87 return 0;
88}
89
90static int mma8450_read_xyz(struct mma8450 *m, int *x, int *y, int *z)
91{
92 struct i2c_client *c = m->client;
93 u8 buff[6];
94 int err;
95
96 err = i2c_smbus_read_i2c_block_data(c, MMA8450_OUT_X_LSB, 6, buff);
97 if (err < 0) {
98 dev_err(&c->dev,
99 "failed to read block data at 0x%02x, error %d\n",
100 MMA8450_OUT_X_LSB, err);
101 return err;
102 }
103
104 *x = ((buff[1] << 4) & 0xff0) | (buff[0] & 0xf);
105 *y = ((buff[3] << 4) & 0xff0) | (buff[2] & 0xf);
106 *z = ((buff[5] << 4) & 0xff0) | (buff[4] & 0xf);
107
108 return 0;
109}
110
111static void mma8450_poll(struct input_polled_dev *dev)
112{
113 struct mma8450 *m = dev->private;
114 int x, y, z;
115 int ret;
116 int err;
117
118 ret = mma8450_read(m, MMA8450_STATUS);
119 if (ret < 0)
120 return;
121
122 if (!(ret & MMA8450_STATUS_ZXYDR))
123 return;
124
125 err = mma8450_read_xyz(m, &x, &y, &z);
126 if (err)
127 return;
128
129 input_report_abs(dev->input, ABS_X, x);
130 input_report_abs(dev->input, ABS_Y, y);
131 input_report_abs(dev->input, ABS_Z, z);
132 input_sync(dev->input);
133}
134
135/* Initialize the MMA8450 chip */
136static void mma8450_open(struct input_polled_dev *dev)
137{
138 struct mma8450 *m = dev->private;
139 int err;
140
141 /* enable all events from X/Y/Z, no FIFO */
142 err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
143 if (err)
144 return;
145
146 /*
147 * Sleep mode poll rate - 50Hz
148 * System output data rate - 400Hz
149 * Full scale selection - Active, +/- 2G
150 */
151 err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
152 if (err < 0)
153 return;
154
155 msleep(MODE_CHANGE_DELAY_MS);
156}
157
158static void mma8450_close(struct input_polled_dev *dev)
159{
160 struct mma8450 *m = dev->private;
161
162 mma8450_write(m, MMA8450_CTRL_REG1, 0x00);
163 mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
164}
165
166/*
167 * I2C init/probing/exit functions
168 */
169static int __devinit mma8450_probe(struct i2c_client *c,
170 const struct i2c_device_id *id)
171{
172 struct input_polled_dev *idev;
173 struct mma8450 *m;
174 int err;
175
176 m = kzalloc(sizeof(struct mma8450), GFP_KERNEL);
177 idev = input_allocate_polled_device();
178 if (!m || !idev) {
179 err = -ENOMEM;
180 goto err_free_mem;
181 }
182
183 m->client = c;
184 m->idev = idev;
185
186 idev->private = m;
187 idev->input->name = MMA8450_DRV_NAME;
188 idev->input->id.bustype = BUS_I2C;
189 idev->poll = mma8450_poll;
190 idev->poll_interval = POLL_INTERVAL;
191 idev->poll_interval_max = POLL_INTERVAL_MAX;
192 idev->open = mma8450_open;
193 idev->close = mma8450_close;
194
195 __set_bit(EV_ABS, idev->input->evbit);
196 input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
197 input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32);
198 input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32);
199
200 err = input_register_polled_device(idev);
201 if (err) {
202 dev_err(&c->dev, "failed to register polled input device\n");
203 goto err_free_mem;
204 }
205
206 return 0;
207
208err_free_mem:
209 input_free_polled_device(idev);
210 kfree(m);
211 return err;
212}
213
214static int __devexit mma8450_remove(struct i2c_client *c)
215{
216 struct mma8450 *m = i2c_get_clientdata(c);
217 struct input_polled_dev *idev = m->idev;
218
219 input_unregister_polled_device(idev);
220 input_free_polled_device(idev);
221 kfree(m);
222
223 return 0;
224}
225
226static const struct i2c_device_id mma8450_id[] = {
227 { MMA8450_DRV_NAME, 0 },
228 { },
229};
230MODULE_DEVICE_TABLE(i2c, mma8450_id);
231
232static struct i2c_driver mma8450_driver = {
233 .driver = {
234 .name = MMA8450_DRV_NAME,
235 .owner = THIS_MODULE,
236 },
237 .probe = mma8450_probe,
238 .remove = __devexit_p(mma8450_remove),
239 .id_table = mma8450_id,
240};
241
242static int __init mma8450_init(void)
243{
244 return i2c_add_driver(&mma8450_driver);
245}
246module_init(mma8450_init);
247
248static void __exit mma8450_exit(void)
249{
250 i2c_del_driver(&mma8450_driver);
251}
252module_exit(mma8450_exit);
253
254MODULE_AUTHOR("Freescale Semiconductor, Inc.");
255MODULE_DESCRIPTION("MMA8450 3-Axis Accelerometer Driver");
256MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
new file mode 100644
index 000000000000..b95fac15b2ea
--- /dev/null
+++ b/drivers/input/misc/mpu3050.c
@@ -0,0 +1,376 @@
1/*
2 * MPU3050 Tri-axis gyroscope driver
3 *
4 * Copyright (C) 2011 Wistron Co.Ltd
5 * Joseph Lai <joseph_lai@wistron.com>
6 *
7 * Trimmed down by Alan Cox <alan@linux.intel.com> to produce this version
8 *
9 * This is a 'lite' version of the driver, while we consider the right way
10 * to present the other features to user space. In particular it requires the
11 * device has an IRQ, and it only provides an input interface, so is not much
12 * use for device orientation. A fuller version is available from the Meego
13 * tree.
14 *
15 * This program is based on bma023.c.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; version 2 of the License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/platform_device.h>
36#include <linux/mutex.h>
37#include <linux/err.h>
38#include <linux/i2c.h>
39#include <linux/input.h>
40#include <linux/delay.h>
41#include <linux/slab.h>
42#include <linux/pm_runtime.h>
43
44#define MPU3050_CHIP_ID_REG 0x00
45#define MPU3050_CHIP_ID 0x69
46#define MPU3050_XOUT_H 0x1D
47#define MPU3050_PWR_MGM 0x3E
48#define MPU3050_PWR_MGM_POS 6
49#define MPU3050_PWR_MGM_MASK 0x40
50
51#define MPU3050_AUTO_DELAY 1000
52
53#define MPU3050_MIN_VALUE -32768
54#define MPU3050_MAX_VALUE 32767
55
56struct axis_data {
57 s16 x;
58 s16 y;
59 s16 z;
60};
61
62struct mpu3050_sensor {
63 struct i2c_client *client;
64 struct device *dev;
65 struct input_dev *idev;
66};
67
68/**
69 * mpu3050_xyz_read_reg - read the axes values
70 * @buffer: provide register addr and get register
71 * @length: length of register
72 *
73 * Reads the register values in one transaction or returns a negative
74 * error code on failure.
75 */
76static int mpu3050_xyz_read_reg(struct i2c_client *client,
77 u8 *buffer, int length)
78{
79 /*
80 * Annoying we can't make this const because the i2c layer doesn't
81 * declare input buffers const.
82 */
83 char cmd = MPU3050_XOUT_H;
84 struct i2c_msg msg[] = {
85 {
86 .addr = client->addr,
87 .flags = 0,
88 .len = 1,
89 .buf = &cmd,
90 },
91 {
92 .addr = client->addr,
93 .flags = I2C_M_RD,
94 .len = length,
95 .buf = buffer,
96 },
97 };
98
99 return i2c_transfer(client->adapter, msg, 2);
100}
101
102/**
103 * mpu3050_read_xyz - get co-ordinates from device
104 * @client: i2c address of sensor
105 * @coords: co-ordinates to update
106 *
107 * Return the converted X Y and Z co-ordinates from the sensor device
108 */
109static void mpu3050_read_xyz(struct i2c_client *client,
110 struct axis_data *coords)
111{
112 u16 buffer[3];
113
114 mpu3050_xyz_read_reg(client, (u8 *)buffer, 6);
115 coords->x = be16_to_cpu(buffer[0]);
116 coords->y = be16_to_cpu(buffer[1]);
117 coords->z = be16_to_cpu(buffer[2]);
118 dev_dbg(&client->dev, "%s: x %d, y %d, z %d\n", __func__,
119 coords->x, coords->y, coords->z);
120}
121
122/**
123 * mpu3050_set_power_mode - set the power mode
124 * @client: i2c client for the sensor
125 * @val: value to switch on/off of power, 1: normal power, 0: low power
126 *
127 * Put device to normal-power mode or low-power mode.
128 */
129static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
130{
131 u8 value;
132
133 value = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
134 value = (value & ~MPU3050_PWR_MGM_MASK) |
135 (((val << MPU3050_PWR_MGM_POS) & MPU3050_PWR_MGM_MASK) ^
136 MPU3050_PWR_MGM_MASK);
137 i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, value);
138}
139
140/**
141 * mpu3050_input_open - called on input event open
142 * @input: input dev of opened device
143 *
144 * The input layer calls this function when input event is opened. The
145 * function will push the device to resume. Then, the device is ready
146 * to provide data.
147 */
148static int mpu3050_input_open(struct input_dev *input)
149{
150 struct mpu3050_sensor *sensor = input_get_drvdata(input);
151
152 pm_runtime_get(sensor->dev);
153
154 return 0;
155}
156
157/**
158 * mpu3050_input_close - called on input event close
159 * @input: input dev of closed device
160 *
161 * The input layer calls this function when input event is closed. The
162 * function will push the device to suspend.
163 */
164static void mpu3050_input_close(struct input_dev *input)
165{
166 struct mpu3050_sensor *sensor = input_get_drvdata(input);
167
168 pm_runtime_put(sensor->dev);
169}
170
171/**
172 * mpu3050_interrupt_thread - handle an IRQ
173 * @irq: interrupt numner
174 * @data: the sensor
175 *
176 * Called by the kernel single threaded after an interrupt occurs. Read
177 * the sensor data and generate an input event for it.
178 */
179static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
180{
181 struct mpu3050_sensor *sensor = data;
182 struct axis_data axis;
183
184 mpu3050_read_xyz(sensor->client, &axis);
185
186 input_report_abs(sensor->idev, ABS_X, axis.x);
187 input_report_abs(sensor->idev, ABS_Y, axis.y);
188 input_report_abs(sensor->idev, ABS_Z, axis.z);
189 input_sync(sensor->idev);
190
191 return IRQ_HANDLED;
192}
193
194/**
195 * mpu3050_probe - device detection callback
196 * @client: i2c client of found device
197 * @id: id match information
198 *
199 * The I2C layer calls us when it believes a sensor is present at this
200 * address. Probe to see if this is correct and to validate the device.
201 *
202 * If present install the relevant sysfs interfaces and input device.
203 */
204static int __devinit mpu3050_probe(struct i2c_client *client,
205 const struct i2c_device_id *id)
206{
207 struct mpu3050_sensor *sensor;
208 struct input_dev *idev;
209 int ret;
210 int error;
211
212 sensor = kzalloc(sizeof(struct mpu3050_sensor), GFP_KERNEL);
213 idev = input_allocate_device();
214 if (!sensor || !idev) {
215 dev_err(&client->dev, "failed to allocate driver data\n");
216 error = -ENOMEM;
217 goto err_free_mem;
218 }
219
220 sensor->client = client;
221 sensor->dev = &client->dev;
222 sensor->idev = idev;
223
224 mpu3050_set_power_mode(client, 1);
225 msleep(10);
226
227 ret = i2c_smbus_read_byte_data(client, MPU3050_CHIP_ID_REG);
228 if (ret < 0) {
229 dev_err(&client->dev, "failed to detect device\n");
230 error = -ENXIO;
231 goto err_free_mem;
232 }
233
234 if (ret != MPU3050_CHIP_ID) {
235 dev_err(&client->dev, "unsupported chip id\n");
236 error = -ENXIO;
237 goto err_free_mem;
238 }
239
240 idev->name = "MPU3050";
241 idev->id.bustype = BUS_I2C;
242 idev->dev.parent = &client->dev;
243
244 idev->open = mpu3050_input_open;
245 idev->close = mpu3050_input_close;
246
247 __set_bit(EV_ABS, idev->evbit);
248 input_set_abs_params(idev, ABS_X,
249 MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
250 input_set_abs_params(idev, ABS_Y,
251 MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
252 input_set_abs_params(idev, ABS_Z,
253 MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
254
255 input_set_drvdata(idev, sensor);
256
257 pm_runtime_set_active(&client->dev);
258
259 error = request_threaded_irq(client->irq,
260 NULL, mpu3050_interrupt_thread,
261 IRQF_TRIGGER_RISING,
262 "mpu_int", sensor);
263 if (error) {
264 dev_err(&client->dev,
265 "can't get IRQ %d, error %d\n", client->irq, error);
266 goto err_pm_set_suspended;
267 }
268
269 error = input_register_device(idev);
270 if (error) {
271 dev_err(&client->dev, "failed to register input device\n");
272 goto err_free_irq;
273 }
274
275 pm_runtime_enable(&client->dev);
276 pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
277
278 return 0;
279
280err_free_irq:
281 free_irq(client->irq, sensor);
282err_pm_set_suspended:
283 pm_runtime_set_suspended(&client->dev);
284err_free_mem:
285 input_unregister_device(idev);
286 kfree(sensor);
287 return error;
288}
289
290/**
291 * mpu3050_remove - remove a sensor
292 * @client: i2c client of sensor being removed
293 *
294 * Our sensor is going away, clean up the resources.
295 */
296static int __devexit mpu3050_remove(struct i2c_client *client)
297{
298 struct mpu3050_sensor *sensor = i2c_get_clientdata(client);
299
300 pm_runtime_disable(&client->dev);
301 pm_runtime_set_suspended(&client->dev);
302
303 free_irq(client->irq, sensor);
304 input_unregister_device(sensor->idev);
305 kfree(sensor);
306
307 return 0;
308}
309
310#ifdef CONFIG_PM
311/**
312 * mpu3050_suspend - called on device suspend
313 * @dev: device being suspended
314 *
315 * Put the device into sleep mode before we suspend the machine.
316 */
317static int mpu3050_suspend(struct device *dev)
318{
319 struct i2c_client *client = to_i2c_client(dev);
320
321 mpu3050_set_power_mode(client, 0);
322
323 return 0;
324}
325
326/**
327 * mpu3050_resume - called on device resume
328 * @dev: device being resumed
329 *
330 * Put the device into powered mode on resume.
331 */
332static int mpu3050_resume(struct device *dev)
333{
334 struct i2c_client *client = to_i2c_client(dev);
335
336 mpu3050_set_power_mode(client, 1);
337 msleep(100); /* wait for gyro chip resume */
338
339 return 0;
340}
341#endif
342
343static UNIVERSAL_DEV_PM_OPS(mpu3050_pm, mpu3050_suspend, mpu3050_resume, NULL);
344
345static const struct i2c_device_id mpu3050_ids[] = {
346 { "mpu3050", 0 },
347 { }
348};
349MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
350
351static struct i2c_driver mpu3050_i2c_driver = {
352 .driver = {
353 .name = "mpu3050",
354 .owner = THIS_MODULE,
355 .pm = &mpu3050_pm,
356 },
357 .probe = mpu3050_probe,
358 .remove = __devexit_p(mpu3050_remove),
359 .id_table = mpu3050_ids,
360};
361
362static int __init mpu3050_init(void)
363{
364 return i2c_add_driver(&mpu3050_i2c_driver);
365}
366module_init(mpu3050_init);
367
368static void __exit mpu3050_exit(void)
369{
370 i2c_del_driver(&mpu3050_i2c_driver);
371}
372module_exit(mpu3050_exit);
373
374MODULE_AUTHOR("Wistron Corp.");
375MODULE_DESCRIPTION("MPU3050 Tri-axis gyroscope driver");
376MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 62bae99424e6..ad2e51c04db8 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -373,7 +373,7 @@ static struct xenbus_driver xenkbd_driver = {
373 373
374static int __init xenkbd_init(void) 374static int __init xenkbd_init(void)
375{ 375{
376 if (!xen_pv_domain()) 376 if (!xen_domain())
377 return -ENODEV; 377 return -ENODEV;
378 378
379 /* Nothing to do if running in dom0. */ 379 /* Nothing to do if running in dom0. */
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 7b6ce178f1b6..58902fbb9896 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -191,7 +191,7 @@ static void __exit gpio_mouse_exit(void)
191} 191}
192module_exit(gpio_mouse_exit); 192module_exit(gpio_mouse_exit);
193 193
194MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); 194MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
195MODULE_DESCRIPTION("GPIO mouse driver"); 195MODULE_DESCRIPTION("GPIO mouse driver");
196MODULE_LICENSE("GPL"); 196MODULE_LICENSE("GPL");
197MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */ 197MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index c31ad11df6bb..83bcaba96b89 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -33,7 +33,7 @@ static const char *desired_serio_phys;
33static int lifebook_limit_serio3(const struct dmi_system_id *d) 33static int lifebook_limit_serio3(const struct dmi_system_id *d)
34{ 34{
35 desired_serio_phys = "isa0060/serio3"; 35 desired_serio_phys = "isa0060/serio3";
36 return 0; 36 return 1;
37} 37}
38 38
39static bool lifebook_use_6byte_proto; 39static bool lifebook_use_6byte_proto;
@@ -41,7 +41,7 @@ static bool lifebook_use_6byte_proto;
41static int lifebook_set_6byte_proto(const struct dmi_system_id *d) 41static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
42{ 42{
43 lifebook_use_6byte_proto = true; 43 lifebook_use_6byte_proto = true;
44 return 0; 44 return 1;
45} 45}
46 46
47static const struct dmi_system_id __initconst lifebook_dmi_table[] = { 47static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 943cfec15665..6c5d84fcdea1 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/input.h> 14#include <linux/input.h>
15#include <linux/version.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 1242775fee19..2fc887a51066 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/version.h>
24#include <linux/input.h> 23#include <linux/input.h>
25#include <linux/ctype.h> 24#include <linux/ctype.h>
26#include <linux/libps2.h> 25#include <linux/libps2.h>
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e06e045bf907..5538fc657af1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -207,27 +207,37 @@ static int synaptics_identify(struct psmouse *psmouse)
207static int synaptics_resolution(struct psmouse *psmouse) 207static int synaptics_resolution(struct psmouse *psmouse)
208{ 208{
209 struct synaptics_data *priv = psmouse->private; 209 struct synaptics_data *priv = psmouse->private;
210 unsigned char res[3]; 210 unsigned char resp[3];
211 unsigned char max[3];
212 211
213 if (SYN_ID_MAJOR(priv->identity) < 4) 212 if (SYN_ID_MAJOR(priv->identity) < 4)
214 return 0; 213 return 0;
215 214
216 if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) { 215 if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, resp) == 0) {
217 if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) { 216 if (resp[0] != 0 && (resp[1] & 0x80) && resp[2] != 0) {
218 priv->x_res = res[0]; /* x resolution in units/mm */ 217 priv->x_res = resp[0]; /* x resolution in units/mm */
219 priv->y_res = res[2]; /* y resolution in units/mm */ 218 priv->y_res = resp[2]; /* y resolution in units/mm */
220 } 219 }
221 } 220 }
222 221
223 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && 222 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
224 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { 223 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
225 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_DIMENSIONS, max)) { 224 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
226 printk(KERN_ERR "Synaptics claims to have dimensions query," 225 printk(KERN_ERR "Synaptics claims to have max coordinates"
227 " but I'm not able to read it.\n"); 226 " query, but I'm not able to read it.\n");
227 } else {
228 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
229 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
230 }
231 }
232
233 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
234 SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
235 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
236 printk(KERN_ERR "Synaptics claims to have min coordinates"
237 " query, but I'm not able to read it.\n");
228 } else { 238 } else {
229 priv->x_max = (max[0] << 5) | ((max[1] & 0x0f) << 1); 239 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
230 priv->y_max = (max[2] << 5) | ((max[1] & 0xf0) >> 3); 240 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
231 } 241 }
232 } 242 }
233 243
@@ -406,26 +416,10 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
406 memset(hw, 0, sizeof(struct synaptics_hw_state)); 416 memset(hw, 0, sizeof(struct synaptics_hw_state));
407 417
408 if (SYN_MODEL_NEWABS(priv->model_id)) { 418 if (SYN_MODEL_NEWABS(priv->model_id)) {
409 hw->x = (((buf[3] & 0x10) << 8) |
410 ((buf[1] & 0x0f) << 8) |
411 buf[4]);
412 hw->y = (((buf[3] & 0x20) << 7) |
413 ((buf[1] & 0xf0) << 4) |
414 buf[5]);
415
416 hw->z = buf[2];
417 hw->w = (((buf[0] & 0x30) >> 2) | 419 hw->w = (((buf[0] & 0x30) >> 2) |
418 ((buf[0] & 0x04) >> 1) | 420 ((buf[0] & 0x04) >> 1) |
419 ((buf[3] & 0x04) >> 2)); 421 ((buf[3] & 0x04) >> 2));
420 422
421 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
422 /* Gesture packet: (x, y, z) at half resolution */
423 priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
424 priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
425 priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
426 return 1;
427 }
428
429 hw->left = (buf[0] & 0x01) ? 1 : 0; 423 hw->left = (buf[0] & 0x01) ? 1 : 0;
430 hw->right = (buf[0] & 0x02) ? 1 : 0; 424 hw->right = (buf[0] & 0x02) ? 1 : 0;
431 425
@@ -448,6 +442,22 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
448 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; 442 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
449 } 443 }
450 444
445 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
446 /* Gesture packet: (x, y, z) at half resolution */
447 priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
448 priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
449 priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
450 return 1;
451 }
452
453 hw->x = (((buf[3] & 0x10) << 8) |
454 ((buf[1] & 0x0f) << 8) |
455 buf[4]);
456 hw->y = (((buf[3] & 0x20) << 7) |
457 ((buf[1] & 0xf0) << 4) |
458 buf[5]);
459 hw->z = buf[2];
460
451 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && 461 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
452 ((buf[0] ^ buf[3]) & 0x02)) { 462 ((buf[0] ^ buf[3]) & 0x02)) {
453 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { 463 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
@@ -485,7 +495,8 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
485 return 0; 495 return 0;
486} 496}
487 497
488static void set_slot(struct input_dev *dev, int slot, bool active, int x, int y) 498static void synaptics_report_semi_mt_slot(struct input_dev *dev, int slot,
499 bool active, int x, int y)
489{ 500{
490 input_mt_slot(dev, slot); 501 input_mt_slot(dev, slot);
491 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); 502 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
@@ -502,14 +513,16 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
502 int num_fingers) 513 int num_fingers)
503{ 514{
504 if (num_fingers >= 2) { 515 if (num_fingers >= 2) {
505 set_slot(dev, 0, true, min(a->x, b->x), min(a->y, b->y)); 516 synaptics_report_semi_mt_slot(dev, 0, true, min(a->x, b->x),
506 set_slot(dev, 1, true, max(a->x, b->x), max(a->y, b->y)); 517 min(a->y, b->y));
518 synaptics_report_semi_mt_slot(dev, 1, true, max(a->x, b->x),
519 max(a->y, b->y));
507 } else if (num_fingers == 1) { 520 } else if (num_fingers == 1) {
508 set_slot(dev, 0, true, a->x, a->y); 521 synaptics_report_semi_mt_slot(dev, 0, true, a->x, a->y);
509 set_slot(dev, 1, false, 0, 0); 522 synaptics_report_semi_mt_slot(dev, 1, false, 0, 0);
510 } else { 523 } else {
511 set_slot(dev, 0, false, 0, 0); 524 synaptics_report_semi_mt_slot(dev, 0, false, 0, 0);
512 set_slot(dev, 1, false, 0, 0); 525 synaptics_report_semi_mt_slot(dev, 1, false, 0, 0);
513 } 526 }
514} 527}
515 528
@@ -684,23 +697,36 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
684static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) 697static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
685{ 698{
686 int i; 699 int i;
700 int fuzz = SYN_CAP_REDUCED_FILTERING(priv->ext_cap_0c) ?
701 SYN_REDUCED_FILTER_FUZZ : 0;
687 702
688 __set_bit(INPUT_PROP_POINTER, dev->propbit); 703 __set_bit(INPUT_PROP_POINTER, dev->propbit);
689 704
690 __set_bit(EV_ABS, dev->evbit); 705 __set_bit(EV_ABS, dev->evbit);
691 input_set_abs_params(dev, ABS_X, 706 input_set_abs_params(dev, ABS_X,
692 XMIN_NOMINAL, priv->x_max ?: XMAX_NOMINAL, 0, 0); 707 priv->x_min ?: XMIN_NOMINAL,
708 priv->x_max ?: XMAX_NOMINAL,
709 fuzz, 0);
693 input_set_abs_params(dev, ABS_Y, 710 input_set_abs_params(dev, ABS_Y,
694 YMIN_NOMINAL, priv->y_max ?: YMAX_NOMINAL, 0, 0); 711 priv->y_min ?: YMIN_NOMINAL,
712 priv->y_max ?: YMAX_NOMINAL,
713 fuzz, 0);
695 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); 714 input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
696 715
697 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) { 716 if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
698 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); 717 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
699 input_mt_init_slots(dev, 2); 718 input_mt_init_slots(dev, 2);
700 input_set_abs_params(dev, ABS_MT_POSITION_X, XMIN_NOMINAL, 719 input_set_abs_params(dev, ABS_MT_POSITION_X,
701 priv->x_max ?: XMAX_NOMINAL, 0, 0); 720 priv->x_min ?: XMIN_NOMINAL,
702 input_set_abs_params(dev, ABS_MT_POSITION_Y, YMIN_NOMINAL, 721 priv->x_max ?: XMAX_NOMINAL,
703 priv->y_max ?: YMAX_NOMINAL, 0, 0); 722 fuzz, 0);
723 input_set_abs_params(dev, ABS_MT_POSITION_Y,
724 priv->y_min ?: YMIN_NOMINAL,
725 priv->y_max ?: YMAX_NOMINAL,
726 fuzz, 0);
727
728 input_abs_set_res(dev, ABS_MT_POSITION_X, priv->x_res);
729 input_abs_set_res(dev, ABS_MT_POSITION_Y, priv->y_res);
704 } 730 }
705 731
706 if (SYN_CAP_PALMDETECT(priv->capabilities)) 732 if (SYN_CAP_PALMDETECT(priv->capabilities))
@@ -971,4 +997,3 @@ bool synaptics_supported(void)
971} 997}
972 998
973#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */ 999#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
974
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 7453938bf5ef..ca040aa80fa7 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -19,7 +19,8 @@
19#define SYN_QUE_RESOLUTION 0x08 19#define SYN_QUE_RESOLUTION 0x08
20#define SYN_QUE_EXT_CAPAB 0x09 20#define SYN_QUE_EXT_CAPAB 0x09
21#define SYN_QUE_EXT_CAPAB_0C 0x0c 21#define SYN_QUE_EXT_CAPAB_0C 0x0c
22#define SYN_QUE_EXT_DIMENSIONS 0x0d 22#define SYN_QUE_EXT_MAX_COORDS 0x0d
23#define SYN_QUE_EXT_MIN_COORDS 0x0f
23 24
24/* synatics modes */ 25/* synatics modes */
25#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 26#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -66,18 +67,21 @@
66 * 1 0x60 multifinger mode identifies firmware finger counting 67 * 1 0x60 multifinger mode identifies firmware finger counting
67 * (not reporting!) algorithm. 68 * (not reporting!) algorithm.
68 * Not particularly meaningful 69 * Not particularly meaningful
69 * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered 70 * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
70 * 2 0x01 clickpad bit 1 2-button ClickPad 71 * 2 0x01 clickpad bit 1 2-button ClickPad
71 * 2 0x02 deluxe LED controls touchpad support LED commands 72 * 2 0x02 deluxe LED controls touchpad support LED commands
72 * ala multimedia control bar 73 * ala multimedia control bar
73 * 2 0x04 reduced filtering firmware does less filtering on 74 * 2 0x04 reduced filtering firmware does less filtering on
74 * position data, driver should watch 75 * position data, driver should watch
75 * for noise. 76 * for noise.
77 * 2 0x20 report min query 0x0f gives min coord reported
76 */ 78 */
77#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ 79#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
78#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ 80#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
79#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) 81#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
82#define SYN_CAP_MIN_DIMENSIONS(ex0c) ((ex0c) & 0x002000)
80#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) 83#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
84#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
81 85
82/* synaptics modes query bits */ 86/* synaptics modes query bits */
83#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 87#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -104,6 +108,9 @@
104#define SYN_NEWABS_RELAXED 2 108#define SYN_NEWABS_RELAXED 2
105#define SYN_OLDABS 3 109#define SYN_OLDABS 3
106 110
111/* amount to fuzz position data when touchpad reports reduced filtering */
112#define SYN_REDUCED_FILTER_FUZZ 8
113
107/* 114/*
108 * A structure to describe the state of the touchpad hardware (buttons and pad) 115 * A structure to describe the state of the touchpad hardware (buttons and pad)
109 */ 116 */
@@ -130,7 +137,8 @@ struct synaptics_data {
130 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ 137 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
131 unsigned long int identity; /* Identification */ 138 unsigned long int identity; /* Identification */
132 unsigned int x_res, y_res; /* X/Y resolution in units/mm */ 139 unsigned int x_res, y_res; /* X/Y resolution in units/mm */
133 unsigned int x_max, y_max; /* Max dimensions (from FW) */ 140 unsigned int x_max, y_max; /* Max coordinates (from FW) */
141 unsigned int x_min, y_min; /* Min coordinates (from FW) */
134 142
135 unsigned char pkt_type; /* packet type - old, new, etc */ 143 unsigned char pkt_type; /* packet type - old, new, etc */
136 unsigned char mode; /* current mode byte */ 144 unsigned char mode; /* current mode byte */
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index 6ee8f0ddad51..95280f9207e1 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -372,6 +372,6 @@ static void __exit psif_exit(void)
372module_init(psif_init); 372module_init(psif_init);
373module_exit(psif_exit); 373module_exit(psif_exit);
374 374
375MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>"); 375MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
376MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver"); 376MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver");
377MODULE_LICENSE("GPL"); 377MODULE_LICENSE("GPL");
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 42206205e4f5..979c443bf1ef 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -795,7 +795,7 @@ int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback)
795 795
796/************************* Keepalive timer task *********************/ 796/************************* Keepalive timer task *********************/
797 797
798void hp_sdc_kicker (unsigned long data) 798static void hp_sdc_kicker(unsigned long data)
799{ 799{
800 tasklet_schedule(&hp_sdc.task); 800 tasklet_schedule(&hp_sdc.task);
801 /* Re-insert the periodic task. */ 801 /* Re-insert the periodic task. */
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 0a619c558bfb..6d89fd1842c3 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -225,7 +225,6 @@
225 /* toolMode codes 225 /* toolMode codes
226 */ 226 */
227#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN 227#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN
228#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN
229#define AIPTEK_TOOL_BUTTON_PENCIL_MODE BTN_TOOL_PENCIL 228#define AIPTEK_TOOL_BUTTON_PENCIL_MODE BTN_TOOL_PENCIL
230#define AIPTEK_TOOL_BUTTON_BRUSH_MODE BTN_TOOL_BRUSH 229#define AIPTEK_TOOL_BUTTON_BRUSH_MODE BTN_TOOL_BRUSH
231#define AIPTEK_TOOL_BUTTON_AIRBRUSH_MODE BTN_TOOL_AIRBRUSH 230#define AIPTEK_TOOL_BUTTON_AIRBRUSH_MODE BTN_TOOL_AIRBRUSH
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 08ba5ad9c9be..03ebcc8b24b5 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -15,6 +15,7 @@
15#include "wacom_wac.h" 15#include "wacom_wac.h"
16#include "wacom.h" 16#include "wacom.h"
17#include <linux/input/mt.h> 17#include <linux/input/mt.h>
18#include <linux/hid.h>
18 19
19/* resolution for penabled devices */ 20/* resolution for penabled devices */
20#define WACOM_PL_RES 20 21#define WACOM_PL_RES 20
@@ -264,6 +265,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
264 wacom->id[0] = 0; 265 wacom->id[0] = 0;
265 input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ 266 input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
266 input_report_key(input, wacom->tool[0], prox); 267 input_report_key(input, wacom->tool[0], prox);
268 input_event(input, EV_MSC, MSC_SERIAL, 1);
267 input_sync(input); /* sync last event */ 269 input_sync(input); /* sync last event */
268 } 270 }
269 271
@@ -273,11 +275,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
273 prox = data[7] & 0xf8; 275 prox = data[7] & 0xf8;
274 if (prox || wacom->id[1]) { 276 if (prox || wacom->id[1]) {
275 wacom->id[1] = PAD_DEVICE_ID; 277 wacom->id[1] = PAD_DEVICE_ID;
276 input_report_key(input, BTN_0, (data[7] & 0x40)); 278 input_report_key(input, BTN_BACK, (data[7] & 0x40));
277 input_report_key(input, BTN_4, (data[7] & 0x80)); 279 input_report_key(input, BTN_FORWARD, (data[7] & 0x80));
278 rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3); 280 rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
279 input_report_rel(input, REL_WHEEL, rw); 281 input_report_rel(input, REL_WHEEL, rw);
280 input_report_key(input, BTN_TOOL_FINGER, 0xf0);
281 if (!prox) 282 if (!prox)
282 wacom->id[1] = 0; 283 wacom->id[1] = 0;
283 input_report_abs(input, ABS_MISC, wacom->id[1]); 284 input_report_abs(input, ABS_MISC, wacom->id[1]);
@@ -290,18 +291,17 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
290 prox = (data[7] & 0xf8) || data[8]; 291 prox = (data[7] & 0xf8) || data[8];
291 if (prox || wacom->id[1]) { 292 if (prox || wacom->id[1]) {
292 wacom->id[1] = PAD_DEVICE_ID; 293 wacom->id[1] = PAD_DEVICE_ID;
293 input_report_key(input, BTN_0, (data[7] & 0x08)); 294 input_report_key(input, BTN_BACK, (data[7] & 0x08));
294 input_report_key(input, BTN_1, (data[7] & 0x20)); 295 input_report_key(input, BTN_LEFT, (data[7] & 0x20));
295 input_report_key(input, BTN_4, (data[7] & 0x10)); 296 input_report_key(input, BTN_FORWARD, (data[7] & 0x10));
296 input_report_key(input, BTN_5, (data[7] & 0x40)); 297 input_report_key(input, BTN_RIGHT, (data[7] & 0x40));
297 input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f)); 298 input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
298 input_report_key(input, BTN_TOOL_FINGER, 0xf0);
299 if (!prox) 299 if (!prox)
300 wacom->id[1] = 0; 300 wacom->id[1] = 0;
301 input_report_abs(input, ABS_MISC, wacom->id[1]); 301 input_report_abs(input, ABS_MISC, wacom->id[1]);
302 input_event(input, EV_MSC, MSC_SERIAL, 0xf0); 302 input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
303 retval = 1;
303 } 304 }
304 retval = 1;
305 break; 305 break;
306 } 306 }
307exit: 307exit:
@@ -494,10 +494,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
494 494
495 /* pad packets. Works as a second tool and is always in prox */ 495 /* pad packets. Works as a second tool and is always in prox */
496 if (data[0] == WACOM_REPORT_INTUOSPAD) { 496 if (data[0] == WACOM_REPORT_INTUOSPAD) {
497 /* initiate the pad as a device */
498 if (wacom->tool[1] != BTN_TOOL_FINGER)
499 wacom->tool[1] = BTN_TOOL_FINGER;
500
501 if (features->type >= INTUOS4S && features->type <= INTUOS4L) { 497 if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
502 input_report_key(input, BTN_0, (data[2] & 0x01)); 498 input_report_key(input, BTN_0, (data[2] & 0x01));
503 input_report_key(input, BTN_1, (data[3] & 0x01)); 499 input_report_key(input, BTN_1, (data[3] & 0x01));
@@ -1080,18 +1076,14 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1080 1076
1081 switch (wacom_wac->features.type) { 1077 switch (wacom_wac->features.type) {
1082 case WACOM_MO: 1078 case WACOM_MO:
1083 __set_bit(BTN_1, input_dev->keybit);
1084 __set_bit(BTN_5, input_dev->keybit);
1085
1086 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); 1079 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
1087 /* fall through */ 1080 /* fall through */
1088 1081
1089 case WACOM_G4: 1082 case WACOM_G4:
1090 input_set_capability(input_dev, EV_MSC, MSC_SERIAL); 1083 input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
1091 1084
1092 __set_bit(BTN_TOOL_FINGER, input_dev->keybit); 1085 __set_bit(BTN_BACK, input_dev->keybit);
1093 __set_bit(BTN_0, input_dev->keybit); 1086 __set_bit(BTN_FORWARD, input_dev->keybit);
1094 __set_bit(BTN_4, input_dev->keybit);
1095 /* fall through */ 1087 /* fall through */
1096 1088
1097 case GRAPHIRE: 1089 case GRAPHIRE:
@@ -1127,10 +1119,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1127 case CINTIQ: 1119 case CINTIQ:
1128 for (i = 0; i < 8; i++) 1120 for (i = 0; i < 8; i++)
1129 __set_bit(BTN_0 + i, input_dev->keybit); 1121 __set_bit(BTN_0 + i, input_dev->keybit);
1130 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1131 1122
1132 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 1123 if (wacom_wac->features.type != WACOM_21UX2) {
1133 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); 1124 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
1125 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
1126 }
1127
1134 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1128 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1135 wacom_setup_cintiq(wacom_wac); 1129 wacom_setup_cintiq(wacom_wac);
1136 break; 1130 break;
@@ -1151,8 +1145,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1151 __set_bit(BTN_2, input_dev->keybit); 1145 __set_bit(BTN_2, input_dev->keybit);
1152 __set_bit(BTN_3, input_dev->keybit); 1146 __set_bit(BTN_3, input_dev->keybit);
1153 1147
1154 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1155
1156 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 1148 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
1157 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1149 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1158 /* fall through */ 1150 /* fall through */
@@ -1170,7 +1162,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1170 case INTUOS4S: 1162 case INTUOS4S:
1171 for (i = 0; i < 7; i++) 1163 for (i = 0; i < 7; i++)
1172 __set_bit(BTN_0 + i, input_dev->keybit); 1164 __set_bit(BTN_0 + i, input_dev->keybit);
1173 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1174 1165
1175 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1166 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1176 wacom_setup_intuos(wacom_wac); 1167 wacom_setup_intuos(wacom_wac);
@@ -1295,6 +1286,12 @@ static const struct wacom_features wacom_features_0x65 =
1295static const struct wacom_features wacom_features_0x69 = 1286static const struct wacom_features wacom_features_0x69 =
1296 { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 1287 { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511,
1297 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; 1288 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
1289static const struct wacom_features wacom_features_0x6A =
1290 { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023,
1291 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1292static const struct wacom_features wacom_features_0x6B =
1293 { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023,
1294 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1298static const struct wacom_features wacom_features_0x20 = 1295static const struct wacom_features wacom_features_0x20 =
1299 { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 1296 { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023,
1300 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1297 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1427,6 +1424,9 @@ static const struct wacom_features wacom_features_0x90 =
1427static const struct wacom_features wacom_features_0x93 = 1424static const struct wacom_features wacom_features_0x93 =
1428 { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 1425 { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
1429 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1426 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1427static const struct wacom_features wacom_features_0x97 =
1428 { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511,
1429 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1430static const struct wacom_features wacom_features_0x9A = 1430static const struct wacom_features wacom_features_0x9A =
1431 { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 1431 { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
1432 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1432 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1458,7 +1458,7 @@ static const struct wacom_features wacom_features_0xD3 =
1458 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 1458 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
1459 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1459 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1460static const struct wacom_features wacom_features_0xD4 = 1460static const struct wacom_features wacom_features_0xD4 =
1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1463static const struct wacom_features wacom_features_0xD6 = 1463static const struct wacom_features wacom_features_0xD6 =
1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
@@ -1483,6 +1483,11 @@ static const struct wacom_features wacom_features_0x6004 =
1483 USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ 1483 USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
1484 .driver_info = (kernel_ulong_t)&wacom_features_##prod 1484 .driver_info = (kernel_ulong_t)&wacom_features_##prod
1485 1485
1486#define USB_DEVICE_DETAILED(prod, class, sub, proto) \
1487 USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \
1488 sub, proto), \
1489 .driver_info = (kernel_ulong_t)&wacom_features_##prod
1490
1486#define USB_DEVICE_LENOVO(prod) \ 1491#define USB_DEVICE_LENOVO(prod) \
1487 USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ 1492 USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
1488 .driver_info = (kernel_ulong_t)&wacom_features_##prod 1493 .driver_info = (kernel_ulong_t)&wacom_features_##prod
@@ -1506,6 +1511,8 @@ const struct usb_device_id wacom_ids[] = {
1506 { USB_DEVICE_WACOM(0x64) }, 1511 { USB_DEVICE_WACOM(0x64) },
1507 { USB_DEVICE_WACOM(0x65) }, 1512 { USB_DEVICE_WACOM(0x65) },
1508 { USB_DEVICE_WACOM(0x69) }, 1513 { USB_DEVICE_WACOM(0x69) },
1514 { USB_DEVICE_WACOM(0x6A) },
1515 { USB_DEVICE_WACOM(0x6B) },
1509 { USB_DEVICE_WACOM(0x20) }, 1516 { USB_DEVICE_WACOM(0x20) },
1510 { USB_DEVICE_WACOM(0x21) }, 1517 { USB_DEVICE_WACOM(0x21) },
1511 { USB_DEVICE_WACOM(0x22) }, 1518 { USB_DEVICE_WACOM(0x22) },
@@ -1545,7 +1552,13 @@ const struct usb_device_id wacom_ids[] = {
1545 { USB_DEVICE_WACOM(0xC5) }, 1552 { USB_DEVICE_WACOM(0xC5) },
1546 { USB_DEVICE_WACOM(0xC6) }, 1553 { USB_DEVICE_WACOM(0xC6) },
1547 { USB_DEVICE_WACOM(0xC7) }, 1554 { USB_DEVICE_WACOM(0xC7) },
1548 { USB_DEVICE_WACOM(0xCE) }, 1555 /*
1556 * DTU-2231 has two interfaces on the same configuration,
1557 * only one is used.
1558 */
1559 { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID,
1560 USB_INTERFACE_SUBCLASS_BOOT,
1561 USB_INTERFACE_PROTOCOL_MOUSE) },
1549 { USB_DEVICE_WACOM(0xD0) }, 1562 { USB_DEVICE_WACOM(0xD0) },
1550 { USB_DEVICE_WACOM(0xD1) }, 1563 { USB_DEVICE_WACOM(0xD1) },
1551 { USB_DEVICE_WACOM(0xD2) }, 1564 { USB_DEVICE_WACOM(0xD2) },
@@ -1560,6 +1573,7 @@ const struct usb_device_id wacom_ids[] = {
1560 { USB_DEVICE_WACOM(0xCC) }, 1573 { USB_DEVICE_WACOM(0xCC) },
1561 { USB_DEVICE_WACOM(0x90) }, 1574 { USB_DEVICE_WACOM(0x90) },
1562 { USB_DEVICE_WACOM(0x93) }, 1575 { USB_DEVICE_WACOM(0x93) },
1576 { USB_DEVICE_WACOM(0x97) },
1563 { USB_DEVICE_WACOM(0x9A) }, 1577 { USB_DEVICE_WACOM(0x9A) },
1564 { USB_DEVICE_WACOM(0x9F) }, 1578 { USB_DEVICE_WACOM(0x9F) },
1565 { USB_DEVICE_WACOM(0xE2) }, 1579 { USB_DEVICE_WACOM(0xE2) },
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 5196861b86ef..d507b9b67806 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -967,17 +967,12 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
967 ts->get_pendown_state = pdata->get_pendown_state; 967 ts->get_pendown_state = pdata->get_pendown_state;
968 } else if (gpio_is_valid(pdata->gpio_pendown)) { 968 } else if (gpio_is_valid(pdata->gpio_pendown)) {
969 969
970 err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); 970 err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN,
971 "ads7846_pendown");
971 if (err) { 972 if (err) {
972 dev_err(&spi->dev, "failed to request pendown GPIO%d\n", 973 dev_err(&spi->dev,
973 pdata->gpio_pendown); 974 "failed to request/setup pendown GPIO%d: %d\n",
974 return err; 975 pdata->gpio_pendown, err);
975 }
976 err = gpio_direction_input(pdata->gpio_pendown);
977 if (err) {
978 dev_err(&spi->dev, "failed to setup pendown GPIO%d\n",
979 pdata->gpio_pendown);
980 gpio_free(pdata->gpio_pendown);
981 return err; 976 return err;
982 } 977 }
983 978
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index fa8e56bd9094..8034cbb20f74 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -164,7 +164,7 @@ static irqreturn_t atmel_wm97xx_channel_b_interrupt(int irq, void *dev_id)
164 164
165 data = ac97c_readl(atmel_wm97xx, CBRHR); 165 data = ac97c_readl(atmel_wm97xx, CBRHR);
166 value = data & 0x0fff; 166 value = data & 0x0fff;
167 source = data & WM97XX_ADCSRC_MASK; 167 source = data & WM97XX_ADCSEL_MASK;
168 pen_down = (data & WM97XX_PEN_DOWN) >> 8; 168 pen_down = (data & WM97XX_PEN_DOWN) >> 8;
169 169
170 if (source == WM97XX_ADCSEL_X) 170 if (source == WM97XX_ADCSEL_X)
@@ -442,6 +442,6 @@ static void __exit atmel_wm97xx_exit(void)
442} 442}
443module_exit(atmel_wm97xx_exit); 443module_exit(atmel_wm97xx_exit);
444 444
445MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>"); 445MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
446MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32"); 446MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32");
447MODULE_LICENSE("GPL"); 447MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 1e61387c73ca..ae00604a6a81 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -48,41 +48,47 @@
48#define MXT_OBJECT_SIZE 6 48#define MXT_OBJECT_SIZE 6
49 49
50/* Object types */ 50/* Object types */
51#define MXT_DEBUG_DIAGNOSTIC 37 51#define MXT_DEBUG_DIAGNOSTIC_T37 37
52#define MXT_GEN_MESSAGE 5 52#define MXT_GEN_MESSAGE_T5 5
53#define MXT_GEN_COMMAND 6 53#define MXT_GEN_COMMAND_T6 6
54#define MXT_GEN_POWER 7 54#define MXT_GEN_POWER_T7 7
55#define MXT_GEN_ACQUIRE 8 55#define MXT_GEN_ACQUIRE_T8 8
56#define MXT_TOUCH_MULTI 9 56#define MXT_GEN_DATASOURCE_T53 53
57#define MXT_TOUCH_KEYARRAY 15 57#define MXT_TOUCH_MULTI_T9 9
58#define MXT_TOUCH_PROXIMITY 23 58#define MXT_TOUCH_KEYARRAY_T15 15
59#define MXT_PROCI_GRIPFACE 20 59#define MXT_TOUCH_PROXIMITY_T23 23
60#define MXT_PROCG_NOISE 22 60#define MXT_TOUCH_PROXKEY_T52 52
61#define MXT_PROCI_ONETOUCH 24 61#define MXT_PROCI_GRIPFACE_T20 20
62#define MXT_PROCI_TWOTOUCH 27 62#define MXT_PROCG_NOISE_T22 22
63#define MXT_PROCI_GRIP 40 63#define MXT_PROCI_ONETOUCH_T24 24
64#define MXT_PROCI_PALM 41 64#define MXT_PROCI_TWOTOUCH_T27 27
65#define MXT_SPT_COMMSCONFIG 18 65#define MXT_PROCI_GRIP_T40 40
66#define MXT_SPT_GPIOPWM 19 66#define MXT_PROCI_PALM_T41 41
67#define MXT_SPT_SELFTEST 25 67#define MXT_PROCI_TOUCHSUPPRESSION_T42 42
68#define MXT_SPT_CTECONFIG 28 68#define MXT_PROCI_STYLUS_T47 47
69#define MXT_SPT_USERDATA 38 69#define MXT_PROCG_NOISESUPPRESSION_T48 48
70#define MXT_SPT_DIGITIZER 43 70#define MXT_SPT_COMMSCONFIG_T18 18
71#define MXT_SPT_MESSAGECOUNT 44 71#define MXT_SPT_GPIOPWM_T19 19
72 72#define MXT_SPT_SELFTEST_T25 25
73/* MXT_GEN_COMMAND field */ 73#define MXT_SPT_CTECONFIG_T28 28
74#define MXT_SPT_USERDATA_T38 38
75#define MXT_SPT_DIGITIZER_T43 43
76#define MXT_SPT_MESSAGECOUNT_T44 44
77#define MXT_SPT_CTECONFIG_T46 46
78
79/* MXT_GEN_COMMAND_T6 field */
74#define MXT_COMMAND_RESET 0 80#define MXT_COMMAND_RESET 0
75#define MXT_COMMAND_BACKUPNV 1 81#define MXT_COMMAND_BACKUPNV 1
76#define MXT_COMMAND_CALIBRATE 2 82#define MXT_COMMAND_CALIBRATE 2
77#define MXT_COMMAND_REPORTALL 3 83#define MXT_COMMAND_REPORTALL 3
78#define MXT_COMMAND_DIAGNOSTIC 5 84#define MXT_COMMAND_DIAGNOSTIC 5
79 85
80/* MXT_GEN_POWER field */ 86/* MXT_GEN_POWER_T7 field */
81#define MXT_POWER_IDLEACQINT 0 87#define MXT_POWER_IDLEACQINT 0
82#define MXT_POWER_ACTVACQINT 1 88#define MXT_POWER_ACTVACQINT 1
83#define MXT_POWER_ACTV2IDLETO 2 89#define MXT_POWER_ACTV2IDLETO 2
84 90
85/* MXT_GEN_ACQUIRE field */ 91/* MXT_GEN_ACQUIRE_T8 field */
86#define MXT_ACQUIRE_CHRGTIME 0 92#define MXT_ACQUIRE_CHRGTIME 0
87#define MXT_ACQUIRE_TCHDRIFT 2 93#define MXT_ACQUIRE_TCHDRIFT 2
88#define MXT_ACQUIRE_DRIFTST 3 94#define MXT_ACQUIRE_DRIFTST 3
@@ -91,7 +97,7 @@
91#define MXT_ACQUIRE_ATCHCALST 6 97#define MXT_ACQUIRE_ATCHCALST 6
92#define MXT_ACQUIRE_ATCHCALSTHR 7 98#define MXT_ACQUIRE_ATCHCALSTHR 7
93 99
94/* MXT_TOUCH_MULTI field */ 100/* MXT_TOUCH_MULTI_T9 field */
95#define MXT_TOUCH_CTRL 0 101#define MXT_TOUCH_CTRL 0
96#define MXT_TOUCH_XORIGIN 1 102#define MXT_TOUCH_XORIGIN 1
97#define MXT_TOUCH_YORIGIN 2 103#define MXT_TOUCH_YORIGIN 2
@@ -121,7 +127,7 @@
121#define MXT_TOUCH_YEDGEDIST 29 127#define MXT_TOUCH_YEDGEDIST 29
122#define MXT_TOUCH_JUMPLIMIT 30 128#define MXT_TOUCH_JUMPLIMIT 30
123 129
124/* MXT_PROCI_GRIPFACE field */ 130/* MXT_PROCI_GRIPFACE_T20 field */
125#define MXT_GRIPFACE_CTRL 0 131#define MXT_GRIPFACE_CTRL 0
126#define MXT_GRIPFACE_XLOGRIP 1 132#define MXT_GRIPFACE_XLOGRIP 1
127#define MXT_GRIPFACE_XHIGRIP 2 133#define MXT_GRIPFACE_XHIGRIP 2
@@ -151,11 +157,11 @@
151#define MXT_NOISE_FREQ4 15 157#define MXT_NOISE_FREQ4 15
152#define MXT_NOISE_IDLEGCAFVALID 16 158#define MXT_NOISE_IDLEGCAFVALID 16
153 159
154/* MXT_SPT_COMMSCONFIG */ 160/* MXT_SPT_COMMSCONFIG_T18 */
155#define MXT_COMMS_CTRL 0 161#define MXT_COMMS_CTRL 0
156#define MXT_COMMS_CMD 1 162#define MXT_COMMS_CMD 1
157 163
158/* MXT_SPT_CTECONFIG field */ 164/* MXT_SPT_CTECONFIG_T28 field */
159#define MXT_CTE_CTRL 0 165#define MXT_CTE_CTRL 0
160#define MXT_CTE_CMD 1 166#define MXT_CTE_CMD 1
161#define MXT_CTE_MODE 2 167#define MXT_CTE_MODE 2
@@ -166,7 +172,7 @@
166#define MXT_VOLTAGE_DEFAULT 2700000 172#define MXT_VOLTAGE_DEFAULT 2700000
167#define MXT_VOLTAGE_STEP 10000 173#define MXT_VOLTAGE_STEP 10000
168 174
169/* Define for MXT_GEN_COMMAND */ 175/* Define for MXT_GEN_COMMAND_T6 */
170#define MXT_BOOT_VALUE 0xa5 176#define MXT_BOOT_VALUE 0xa5
171#define MXT_BACKUP_VALUE 0x55 177#define MXT_BACKUP_VALUE 0x55
172#define MXT_BACKUP_TIME 25 /* msec */ 178#define MXT_BACKUP_TIME 25 /* msec */
@@ -256,24 +262,31 @@ struct mxt_data {
256static bool mxt_object_readable(unsigned int type) 262static bool mxt_object_readable(unsigned int type)
257{ 263{
258 switch (type) { 264 switch (type) {
259 case MXT_GEN_MESSAGE: 265 case MXT_GEN_MESSAGE_T5:
260 case MXT_GEN_COMMAND: 266 case MXT_GEN_COMMAND_T6:
261 case MXT_GEN_POWER: 267 case MXT_GEN_POWER_T7:
262 case MXT_GEN_ACQUIRE: 268 case MXT_GEN_ACQUIRE_T8:
263 case MXT_TOUCH_MULTI: 269 case MXT_GEN_DATASOURCE_T53:
264 case MXT_TOUCH_KEYARRAY: 270 case MXT_TOUCH_MULTI_T9:
265 case MXT_TOUCH_PROXIMITY: 271 case MXT_TOUCH_KEYARRAY_T15:
266 case MXT_PROCI_GRIPFACE: 272 case MXT_TOUCH_PROXIMITY_T23:
267 case MXT_PROCG_NOISE: 273 case MXT_TOUCH_PROXKEY_T52:
268 case MXT_PROCI_ONETOUCH: 274 case MXT_PROCI_GRIPFACE_T20:
269 case MXT_PROCI_TWOTOUCH: 275 case MXT_PROCG_NOISE_T22:
270 case MXT_PROCI_GRIP: 276 case MXT_PROCI_ONETOUCH_T24:
271 case MXT_PROCI_PALM: 277 case MXT_PROCI_TWOTOUCH_T27:
272 case MXT_SPT_COMMSCONFIG: 278 case MXT_PROCI_GRIP_T40:
273 case MXT_SPT_GPIOPWM: 279 case MXT_PROCI_PALM_T41:
274 case MXT_SPT_SELFTEST: 280 case MXT_PROCI_TOUCHSUPPRESSION_T42:
275 case MXT_SPT_CTECONFIG: 281 case MXT_PROCI_STYLUS_T47:
276 case MXT_SPT_USERDATA: 282 case MXT_PROCG_NOISESUPPRESSION_T48:
283 case MXT_SPT_COMMSCONFIG_T18:
284 case MXT_SPT_GPIOPWM_T19:
285 case MXT_SPT_SELFTEST_T25:
286 case MXT_SPT_CTECONFIG_T28:
287 case MXT_SPT_USERDATA_T38:
288 case MXT_SPT_DIGITIZER_T43:
289 case MXT_SPT_CTECONFIG_T46:
277 return true; 290 return true;
278 default: 291 default:
279 return false; 292 return false;
@@ -283,21 +296,28 @@ static bool mxt_object_readable(unsigned int type)
283static bool mxt_object_writable(unsigned int type) 296static bool mxt_object_writable(unsigned int type)
284{ 297{
285 switch (type) { 298 switch (type) {
286 case MXT_GEN_COMMAND: 299 case MXT_GEN_COMMAND_T6:
287 case MXT_GEN_POWER: 300 case MXT_GEN_POWER_T7:
288 case MXT_GEN_ACQUIRE: 301 case MXT_GEN_ACQUIRE_T8:
289 case MXT_TOUCH_MULTI: 302 case MXT_TOUCH_MULTI_T9:
290 case MXT_TOUCH_KEYARRAY: 303 case MXT_TOUCH_KEYARRAY_T15:
291 case MXT_TOUCH_PROXIMITY: 304 case MXT_TOUCH_PROXIMITY_T23:
292 case MXT_PROCI_GRIPFACE: 305 case MXT_TOUCH_PROXKEY_T52:
293 case MXT_PROCG_NOISE: 306 case MXT_PROCI_GRIPFACE_T20:
294 case MXT_PROCI_ONETOUCH: 307 case MXT_PROCG_NOISE_T22:
295 case MXT_PROCI_TWOTOUCH: 308 case MXT_PROCI_ONETOUCH_T24:
296 case MXT_PROCI_GRIP: 309 case MXT_PROCI_TWOTOUCH_T27:
297 case MXT_PROCI_PALM: 310 case MXT_PROCI_GRIP_T40:
298 case MXT_SPT_GPIOPWM: 311 case MXT_PROCI_PALM_T41:
299 case MXT_SPT_SELFTEST: 312 case MXT_PROCI_TOUCHSUPPRESSION_T42:
300 case MXT_SPT_CTECONFIG: 313 case MXT_PROCI_STYLUS_T47:
314 case MXT_PROCG_NOISESUPPRESSION_T48:
315 case MXT_SPT_COMMSCONFIG_T18:
316 case MXT_SPT_GPIOPWM_T19:
317 case MXT_SPT_SELFTEST_T25:
318 case MXT_SPT_CTECONFIG_T28:
319 case MXT_SPT_DIGITIZER_T43:
320 case MXT_SPT_CTECONFIG_T46:
301 return true; 321 return true;
302 default: 322 default:
303 return false; 323 return false;
@@ -455,7 +475,7 @@ static int mxt_read_message(struct mxt_data *data,
455 struct mxt_object *object; 475 struct mxt_object *object;
456 u16 reg; 476 u16 reg;
457 477
458 object = mxt_get_object(data, MXT_GEN_MESSAGE); 478 object = mxt_get_object(data, MXT_GEN_MESSAGE_T5);
459 if (!object) 479 if (!object)
460 return -EINVAL; 480 return -EINVAL;
461 481
@@ -597,8 +617,8 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
597 617
598 reportid = message.reportid; 618 reportid = message.reportid;
599 619
600 /* whether reportid is thing of MXT_TOUCH_MULTI */ 620 /* whether reportid is thing of MXT_TOUCH_MULTI_T9 */
601 object = mxt_get_object(data, MXT_TOUCH_MULTI); 621 object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
602 if (!object) 622 if (!object)
603 goto end; 623 goto end;
604 624
@@ -635,7 +655,9 @@ static int mxt_check_reg_init(struct mxt_data *data)
635 if (!mxt_object_writable(object->type)) 655 if (!mxt_object_writable(object->type))
636 continue; 656 continue;
637 657
638 for (j = 0; j < object->size + 1; j++) { 658 for (j = 0;
659 j < (object->size + 1) * (object->instances + 1);
660 j++) {
639 config_offset = index + j; 661 config_offset = index + j;
640 if (config_offset > pdata->config_length) { 662 if (config_offset > pdata->config_length) {
641 dev_err(dev, "Not enough config data!\n"); 663 dev_err(dev, "Not enough config data!\n");
@@ -644,7 +666,7 @@ static int mxt_check_reg_init(struct mxt_data *data)
644 mxt_write_object(data, object->type, j, 666 mxt_write_object(data, object->type, j,
645 pdata->config[config_offset]); 667 pdata->config[config_offset]);
646 } 668 }
647 index += object->size + 1; 669 index += (object->size + 1) * (object->instances + 1);
648 } 670 }
649 671
650 return 0; 672 return 0;
@@ -678,31 +700,31 @@ static void mxt_handle_pdata(struct mxt_data *data)
678 u8 voltage; 700 u8 voltage;
679 701
680 /* Set touchscreen lines */ 702 /* Set touchscreen lines */
681 mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_XSIZE, 703 mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_XSIZE,
682 pdata->x_line); 704 pdata->x_line);
683 mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_YSIZE, 705 mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_YSIZE,
684 pdata->y_line); 706 pdata->y_line);
685 707
686 /* Set touchscreen orient */ 708 /* Set touchscreen orient */
687 mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_ORIENT, 709 mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_ORIENT,
688 pdata->orient); 710 pdata->orient);
689 711
690 /* Set touchscreen burst length */ 712 /* Set touchscreen burst length */
691 mxt_write_object(data, MXT_TOUCH_MULTI, 713 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
692 MXT_TOUCH_BLEN, pdata->blen); 714 MXT_TOUCH_BLEN, pdata->blen);
693 715
694 /* Set touchscreen threshold */ 716 /* Set touchscreen threshold */
695 mxt_write_object(data, MXT_TOUCH_MULTI, 717 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
696 MXT_TOUCH_TCHTHR, pdata->threshold); 718 MXT_TOUCH_TCHTHR, pdata->threshold);
697 719
698 /* Set touchscreen resolution */ 720 /* Set touchscreen resolution */
699 mxt_write_object(data, MXT_TOUCH_MULTI, 721 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
700 MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff); 722 MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
701 mxt_write_object(data, MXT_TOUCH_MULTI, 723 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
702 MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8); 724 MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
703 mxt_write_object(data, MXT_TOUCH_MULTI, 725 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
704 MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff); 726 MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
705 mxt_write_object(data, MXT_TOUCH_MULTI, 727 mxt_write_object(data, MXT_TOUCH_MULTI_T9,
706 MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8); 728 MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
707 729
708 /* Set touchscreen voltage */ 730 /* Set touchscreen voltage */
@@ -715,7 +737,7 @@ static void mxt_handle_pdata(struct mxt_data *data)
715 voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) / 737 voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
716 MXT_VOLTAGE_STEP; 738 MXT_VOLTAGE_STEP;
717 739
718 mxt_write_object(data, MXT_SPT_CTECONFIG, 740 mxt_write_object(data, MXT_SPT_CTECONFIG_T28,
719 MXT_CTE_VOLTAGE, voltage); 741 MXT_CTE_VOLTAGE, voltage);
720 } 742 }
721} 743}
@@ -819,13 +841,13 @@ static int mxt_initialize(struct mxt_data *data)
819 mxt_handle_pdata(data); 841 mxt_handle_pdata(data);
820 842
821 /* Backup to memory */ 843 /* Backup to memory */
822 mxt_write_object(data, MXT_GEN_COMMAND, 844 mxt_write_object(data, MXT_GEN_COMMAND_T6,
823 MXT_COMMAND_BACKUPNV, 845 MXT_COMMAND_BACKUPNV,
824 MXT_BACKUP_VALUE); 846 MXT_BACKUP_VALUE);
825 msleep(MXT_BACKUP_TIME); 847 msleep(MXT_BACKUP_TIME);
826 848
827 /* Soft reset */ 849 /* Soft reset */
828 mxt_write_object(data, MXT_GEN_COMMAND, 850 mxt_write_object(data, MXT_GEN_COMMAND_T6,
829 MXT_COMMAND_RESET, 1); 851 MXT_COMMAND_RESET, 1);
830 msleep(MXT_RESET_TIME); 852 msleep(MXT_RESET_TIME);
831 853
@@ -921,7 +943,7 @@ static int mxt_load_fw(struct device *dev, const char *fn)
921 } 943 }
922 944
923 /* Change to the bootloader mode */ 945 /* Change to the bootloader mode */
924 mxt_write_object(data, MXT_GEN_COMMAND, 946 mxt_write_object(data, MXT_GEN_COMMAND_T6,
925 MXT_COMMAND_RESET, MXT_BOOT_VALUE); 947 MXT_COMMAND_RESET, MXT_BOOT_VALUE);
926 msleep(MXT_RESET_TIME); 948 msleep(MXT_RESET_TIME);
927 949
@@ -1027,14 +1049,14 @@ static void mxt_start(struct mxt_data *data)
1027{ 1049{
1028 /* Touch enable */ 1050 /* Touch enable */
1029 mxt_write_object(data, 1051 mxt_write_object(data,
1030 MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0x83); 1052 MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
1031} 1053}
1032 1054
1033static void mxt_stop(struct mxt_data *data) 1055static void mxt_stop(struct mxt_data *data)
1034{ 1056{
1035 /* Touch disable */ 1057 /* Touch disable */
1036 mxt_write_object(data, 1058 mxt_write_object(data,
1037 MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0); 1059 MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
1038} 1060}
1039 1061
1040static int mxt_input_open(struct input_dev *dev) 1062static int mxt_input_open(struct input_dev *dev)
@@ -1182,7 +1204,7 @@ static int mxt_resume(struct device *dev)
1182 struct input_dev *input_dev = data->input_dev; 1204 struct input_dev *input_dev = data->input_dev;
1183 1205
1184 /* Soft reset */ 1206 /* Soft reset */
1185 mxt_write_object(data, MXT_GEN_COMMAND, 1207 mxt_write_object(data, MXT_GEN_COMMAND_T6,
1186 MXT_COMMAND_RESET, 1); 1208 MXT_COMMAND_RESET, 1);
1187 1209
1188 msleep(MXT_RESET_TIME); 1210 msleep(MXT_RESET_TIME);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index a93c5c26ab3f..d8815c5d54ad 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -84,9 +84,9 @@ static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg,
84 memcpy(i2c_data + 1, value, len); 84 memcpy(i2c_data + 1, value, len);
85 85
86 ret = i2c_master_send(client, i2c_data, len + 1); 86 ret = i2c_master_send(client, i2c_data, len + 1);
87 if (ret != 1) { 87 if (ret != len + 1) {
88 dev_err(&client->dev, "i2c write data cmd failed\n"); 88 dev_err(&client->dev, "i2c write data cmd failed\n");
89 return ret ? ret : -EIO; 89 return ret < 0 ? ret : -EIO;
90 } 90 }
91 91
92 return 0; 92 return 0;
@@ -193,6 +193,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
193 193
194 ts->client = client; 194 ts->client = client;
195 ts->input = input_dev; 195 ts->input = input_dev;
196 ts->reset_pin = pdata->reset_pin;
197 ts->irq_pin = pdata->irq_pin;
196 198
197 snprintf(ts->phys, sizeof(ts->phys), 199 snprintf(ts->phys, sizeof(ts->phys),
198 "%s/input0", dev_name(&client->dev)); 200 "%s/input0", dev_name(&client->dev));
@@ -328,7 +330,7 @@ static int __devexit cy8ctmg110_remove(struct i2c_client *client)
328 return 0; 330 return 0;
329} 331}
330 332
331static struct i2c_device_id cy8ctmg110_idtable[] = { 333static const struct i2c_device_id cy8ctmg110_idtable[] = {
332 { CY8CTMG110_DRIVER_NAME, 1 }, 334 { CY8CTMG110_DRIVER_NAME, 1 },
333 { } 335 { }
334}; 336};
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 66c96bfc5522..327695268e06 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -448,15 +448,11 @@ static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
448 */ 448 */
449static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev) 449static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
450{ 450{
451 int err, i, found; 451 int found = 0;
452 int err, i;
452 u8 r8; 453 u8 r8;
453 454
454 found = -1;
455
456 for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) { 455 for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
457 if (found >= 0)
458 break;
459
460 err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8); 456 err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8);
461 if (err) 457 if (err)
462 return err; 458 return err;
@@ -466,16 +462,15 @@ static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
466 break; 462 break;
467 } 463 }
468 } 464 }
469 if (found < 0)
470 return 0;
471 465
472 if (tsdev->vendor == PMIC_VENDOR_FS) { 466 if (tsdev->vendor == PMIC_VENDOR_FS) {
473 if (found && found > (MRSTOUCH_MAX_CHANNELS - 18)) 467 if (found > MRSTOUCH_MAX_CHANNELS - 18)
474 return -ENOSPC; 468 return -ENOSPC;
475 } else { 469 } else {
476 if (found && found > (MRSTOUCH_MAX_CHANNELS - 4)) 470 if (found > MRSTOUCH_MAX_CHANNELS - 4)
477 return -ENOSPC; 471 return -ENOSPC;
478 } 472 }
473
479 return found; 474 return found;
480} 475}
481 476
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 3242e7076258..e966c29ff1bb 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -157,9 +157,9 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
157 x, y, p); 157 x, y, p);
158 158
159 /* are samples valid */ 159 /* are samples valid */
160 if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X || 160 if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
161 (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y || 161 (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
162 (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES) 162 (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
163 goto up; 163 goto up;
164 164
165 /* coordinate is good */ 165 /* coordinate is good */
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 22a3411e93c5..089b0a0f3d8c 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -393,5 +393,5 @@ module_exit(tsc_exit);
393 393
394MODULE_AUTHOR("Cyril Chemparathy"); 394MODULE_AUTHOR("Cyril Chemparathy");
395MODULE_DESCRIPTION("TNETV107X Touchscreen Driver"); 395MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
396MODULE_ALIAS("platform: tnetv107x-ts"); 396MODULE_ALIAS("platform:tnetv107x-ts");
397MODULE_LICENSE("GPL"); 397MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
index 98e61175d3f5..adc13a523ab5 100644
--- a/drivers/input/touchscreen/wm9705.c
+++ b/drivers/input/touchscreen/wm9705.c
@@ -215,8 +215,9 @@ static inline int is_pden(struct wm97xx *wm)
215static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample) 215static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
216{ 216{
217 int timeout = 5 * delay; 217 int timeout = 5 * delay;
218 bool wants_pen = adcsel & WM97XX_PEN_DOWN;
218 219
219 if (!wm->pen_probably_down) { 220 if (wants_pen && !wm->pen_probably_down) {
220 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); 221 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
221 if (!(data & WM97XX_PEN_DOWN)) 222 if (!(data & WM97XX_PEN_DOWN))
222 return RC_PENUP; 223 return RC_PENUP;
@@ -224,13 +225,10 @@ static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
224 } 225 }
225 226
226 /* set up digitiser */ 227 /* set up digitiser */
227 if (adcsel & 0x8000)
228 adcsel = ((adcsel & 0x7fff) + 3) << 12;
229
230 if (wm->mach_ops && wm->mach_ops->pre_sample) 228 if (wm->mach_ops && wm->mach_ops->pre_sample)
231 wm->mach_ops->pre_sample(adcsel); 229 wm->mach_ops->pre_sample(adcsel);
232 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 230 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
233 adcsel | WM97XX_POLL | WM97XX_DELAY(delay)); 231 | WM97XX_POLL | WM97XX_DELAY(delay));
234 232
235 /* wait 3 AC97 time slots + delay for conversion */ 233 /* wait 3 AC97 time slots + delay for conversion */
236 poll_delay(delay); 234 poll_delay(delay);
@@ -256,13 +254,14 @@ static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
256 wm->mach_ops->post_sample(adcsel); 254 wm->mach_ops->post_sample(adcsel);
257 255
258 /* check we have correct sample */ 256 /* check we have correct sample */
259 if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) { 257 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
260 dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel, 258 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
261 *sample & WM97XX_ADCSEL_MASK); 259 adcsel & WM97XX_ADCSEL_MASK,
260 *sample & WM97XX_ADCSEL_MASK);
262 return RC_PENUP; 261 return RC_PENUP;
263 } 262 }
264 263
265 if (!(*sample & WM97XX_PEN_DOWN)) { 264 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
266 wm->pen_probably_down = 0; 265 wm->pen_probably_down = 0;
267 return RC_PENUP; 266 return RC_PENUP;
268 } 267 }
@@ -277,14 +276,14 @@ static int wm9705_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
277{ 276{
278 int rc; 277 int rc;
279 278
280 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X, &data->x); 279 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
281 if (rc != RC_VALID) 280 if (rc != RC_VALID)
282 return rc; 281 return rc;
283 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y); 282 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
284 if (rc != RC_VALID) 283 if (rc != RC_VALID)
285 return rc; 284 return rc;
286 if (pil) { 285 if (pil) {
287 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES, &data->p); 286 rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN, &data->p);
288 if (rc != RC_VALID) 287 if (rc != RC_VALID)
289 return rc; 288 return rc;
290 } else 289 } else
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 2bc2fb801009..6e743e3dfda4 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -255,8 +255,9 @@ static inline int is_pden(struct wm97xx *wm)
255static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample) 255static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
256{ 256{
257 int timeout = 5 * delay; 257 int timeout = 5 * delay;
258 bool wants_pen = adcsel & WM97XX_PEN_DOWN;
258 259
259 if (!wm->pen_probably_down) { 260 if (wants_pen && !wm->pen_probably_down) {
260 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); 261 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
261 if (!(data & WM97XX_PEN_DOWN)) 262 if (!(data & WM97XX_PEN_DOWN))
262 return RC_PENUP; 263 return RC_PENUP;
@@ -264,13 +265,10 @@ static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
264 } 265 }
265 266
266 /* set up digitiser */ 267 /* set up digitiser */
267 if (adcsel & 0x8000)
268 adcsel = ((adcsel & 0x7fff) + 3) << 12;
269
270 if (wm->mach_ops && wm->mach_ops->pre_sample) 268 if (wm->mach_ops && wm->mach_ops->pre_sample)
271 wm->mach_ops->pre_sample(adcsel); 269 wm->mach_ops->pre_sample(adcsel);
272 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, 270 wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
273 adcsel | WM97XX_POLL | WM97XX_DELAY(delay)); 271 | WM97XX_POLL | WM97XX_DELAY(delay));
274 272
275 /* wait 3 AC97 time slots + delay for conversion */ 273 /* wait 3 AC97 time slots + delay for conversion */
276 poll_delay(delay); 274 poll_delay(delay);
@@ -296,13 +294,14 @@ static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
296 wm->mach_ops->post_sample(adcsel); 294 wm->mach_ops->post_sample(adcsel);
297 295
298 /* check we have correct sample */ 296 /* check we have correct sample */
299 if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) { 297 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
300 dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel, 298 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
301 *sample & WM97XX_ADCSEL_MASK); 299 adcsel & WM97XX_ADCSEL_MASK,
300 *sample & WM97XX_ADCSEL_MASK);
302 return RC_PENUP; 301 return RC_PENUP;
303 } 302 }
304 303
305 if (!(*sample & WM97XX_PEN_DOWN)) { 304 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
306 wm->pen_probably_down = 0; 305 wm->pen_probably_down = 0;
307 return RC_PENUP; 306 return RC_PENUP;
308 } 307 }
@@ -387,16 +386,18 @@ static int wm9712_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
387 if (rc != RC_VALID) 386 if (rc != RC_VALID)
388 return rc; 387 return rc;
389 } else { 388 } else {
390 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X, &data->x); 389 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN,
390 &data->x);
391 if (rc != RC_VALID) 391 if (rc != RC_VALID)
392 return rc; 392 return rc;
393 393
394 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y); 394 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN,
395 &data->y);
395 if (rc != RC_VALID) 396 if (rc != RC_VALID)
396 return rc; 397 return rc;
397 398
398 if (pil && !five_wire) { 399 if (pil && !five_wire) {
399 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES, 400 rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
400 &data->p); 401 &data->p);
401 if (rc != RC_VALID) 402 if (rc != RC_VALID)
402 return rc; 403 return rc;
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index 73ec99568f12..7405353199d7 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -261,8 +261,9 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
261{ 261{
262 u16 dig1; 262 u16 dig1;
263 int timeout = 5 * delay; 263 int timeout = 5 * delay;
264 bool wants_pen = adcsel & WM97XX_PEN_DOWN;
264 265
265 if (!wm->pen_probably_down) { 266 if (wants_pen && !wm->pen_probably_down) {
266 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); 267 u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
267 if (!(data & WM97XX_PEN_DOWN)) 268 if (!(data & WM97XX_PEN_DOWN))
268 return RC_PENUP; 269 return RC_PENUP;
@@ -270,15 +271,14 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
270 } 271 }
271 272
272 /* set up digitiser */ 273 /* set up digitiser */
273 if (adcsel & 0x8000)
274 adcsel = 1 << ((adcsel & 0x7fff) + 3);
275
276 dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1); 274 dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
277 dig1 &= ~WM9713_ADCSEL_MASK; 275 dig1 &= ~WM9713_ADCSEL_MASK;
276 /* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */
277 dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12);
278 278
279 if (wm->mach_ops && wm->mach_ops->pre_sample) 279 if (wm->mach_ops && wm->mach_ops->pre_sample)
280 wm->mach_ops->pre_sample(adcsel); 280 wm->mach_ops->pre_sample(adcsel);
281 wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | adcsel | WM9713_POLL); 281 wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL);
282 282
283 /* wait 3 AC97 time slots + delay for conversion */ 283 /* wait 3 AC97 time slots + delay for conversion */
284 poll_delay(delay); 284 poll_delay(delay);
@@ -304,13 +304,14 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
304 wm->mach_ops->post_sample(adcsel); 304 wm->mach_ops->post_sample(adcsel);
305 305
306 /* check we have correct sample */ 306 /* check we have correct sample */
307 if ((*sample & WM97XX_ADCSRC_MASK) != ffs(adcsel >> 1) << 12) { 307 if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
308 dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel, 308 dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
309 *sample & WM97XX_ADCSRC_MASK); 309 adcsel & WM97XX_ADCSEL_MASK,
310 *sample & WM97XX_ADCSEL_MASK);
310 return RC_PENUP; 311 return RC_PENUP;
311 } 312 }
312 313
313 if (!(*sample & WM97XX_PEN_DOWN)) { 314 if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
314 wm->pen_probably_down = 0; 315 wm->pen_probably_down = 0;
315 return RC_PENUP; 316 return RC_PENUP;
316 } 317 }
@@ -400,14 +401,14 @@ static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
400 if (rc != RC_VALID) 401 if (rc != RC_VALID)
401 return rc; 402 return rc;
402 } else { 403 } else {
403 rc = wm9713_poll_sample(wm, WM9713_ADCSEL_X, &data->x); 404 rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
404 if (rc != RC_VALID) 405 if (rc != RC_VALID)
405 return rc; 406 return rc;
406 rc = wm9713_poll_sample(wm, WM9713_ADCSEL_Y, &data->y); 407 rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
407 if (rc != RC_VALID) 408 if (rc != RC_VALID)
408 return rc; 409 return rc;
409 if (pil) { 410 if (pil) {
410 rc = wm9713_poll_sample(wm, WM9713_ADCSEL_PRES, 411 rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
411 &data->p); 412 &data->p);
412 if (rc != RC_VALID) 413 if (rc != RC_VALID)
413 return rc; 414 return rc;
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index 5b0f15ec874a..f6328c0cded6 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -122,9 +122,9 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
122 x, y, p); 122 x, y, p);
123 123
124 /* are samples valid */ 124 /* are samples valid */
125 if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X || 125 if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
126 (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y || 126 (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
127 (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES) 127 (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
128 goto up; 128 goto up;
129 129
130 /* coordinate is good */ 130 /* coordinate is good */
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 77cb2a14c896..81525ae5d869 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -55,7 +55,7 @@ enum smbios_attr_enum {
55 SMBIOS_ATTR_INSTANCE_SHOW, 55 SMBIOS_ATTR_INSTANCE_SHOW,
56}; 56};
57 57
58static mode_t 58static size_t
59find_smbios_instance_string(struct pci_dev *pdev, char *buf, 59find_smbios_instance_string(struct pci_dev *pdev, char *buf,
60 enum smbios_attr_enum attribute) 60 enum smbios_attr_enum attribute)
61{ 61{
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 081c171a1ed6..5ce5170254ca 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -397,7 +397,7 @@ struct amap_pdu_data_out {
397}; 397};
398 398
399struct be_cmd_bhs { 399struct be_cmd_bhs {
400 struct iscsi_cmd iscsi_hdr; 400 struct iscsi_scsi_req iscsi_hdr;
401 unsigned char pad1[16]; 401 unsigned char pad1[16];
402 struct pdu_data_out iscsi_data_pdu; 402 struct pdu_data_out iscsi_data_pdu;
403 unsigned char pad2[BE_SENSE_INFO_SIZE - 403 unsigned char pad2[BE_SENSE_INFO_SIZE -
@@ -428,7 +428,7 @@ struct be_nonio_bhs {
428}; 428};
429 429
430struct be_status_bhs { 430struct be_status_bhs {
431 struct iscsi_cmd iscsi_hdr; 431 struct iscsi_scsi_req iscsi_hdr;
432 unsigned char pad1[16]; 432 unsigned char pad1[16];
433 /** 433 /**
434 * The plus 2 below is to hold the sense info length that gets 434 * The plus 2 below is to hold the sense info length that gets
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 030a96c646c3..9ae80cd5953b 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -332,11 +332,11 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
332{ 332{
333 struct bnx2i_cmd *bnx2i_cmd; 333 struct bnx2i_cmd *bnx2i_cmd;
334 struct bnx2i_login_request *login_wqe; 334 struct bnx2i_login_request *login_wqe;
335 struct iscsi_login *login_hdr; 335 struct iscsi_login_req *login_hdr;
336 u32 dword; 336 u32 dword;
337 337
338 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; 338 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
339 login_hdr = (struct iscsi_login *)task->hdr; 339 login_hdr = (struct iscsi_login_req *)task->hdr;
340 login_wqe = (struct bnx2i_login_request *) 340 login_wqe = (struct bnx2i_login_request *)
341 bnx2i_conn->ep->qp.sq_prod_qe; 341 bnx2i_conn->ep->qp.sq_prod_qe;
342 342
@@ -1349,7 +1349,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1349 struct bnx2i_cmd_response *resp_cqe; 1349 struct bnx2i_cmd_response *resp_cqe;
1350 struct bnx2i_cmd *bnx2i_cmd; 1350 struct bnx2i_cmd *bnx2i_cmd;
1351 struct iscsi_task *task; 1351 struct iscsi_task *task;
1352 struct iscsi_cmd_rsp *hdr; 1352 struct iscsi_scsi_rsp *hdr;
1353 u32 datalen = 0; 1353 u32 datalen = 0;
1354 1354
1355 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1355 resp_cqe = (struct bnx2i_cmd_response *)cqe;
@@ -1376,7 +1376,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1376 } 1376 }
1377 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1377 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1378 1378
1379 hdr = (struct iscsi_cmd_rsp *)task->hdr; 1379 hdr = (struct iscsi_scsi_rsp *)task->hdr;
1380 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1380 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1381 hdr->opcode = resp_cqe->op_code; 1381 hdr->opcode = resp_cqe->op_code;
1382 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); 1382 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 5c55a75ae597..cffd4d75df56 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1213,7 +1213,7 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1213 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1213 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1214 struct scsi_cmnd *sc = task->sc; 1214 struct scsi_cmnd *sc = task->sc;
1215 struct bnx2i_cmd *cmd = task->dd_data; 1215 struct bnx2i_cmd *cmd = task->dd_data;
1216 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1216 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1217 1217
1218 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > 1218 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1219 hba->max_sqes) 1219 hba->max_sqes)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d7a4120034a2..256a999d010b 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -84,22 +84,6 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
84 __func__, ##arg); \ 84 __func__, ##arg); \
85 } while (0); 85 } while (0);
86 86
87/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
88#define SNA32_CHECK 2147483648UL
89
90static int iscsi_sna_lt(u32 n1, u32 n2)
91{
92 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
93 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
94}
95
96/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
97static int iscsi_sna_lte(u32 n1, u32 n2)
98{
99 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
100 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
101}
102
103inline void iscsi_conn_queue_work(struct iscsi_conn *conn) 87inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
104{ 88{
105 struct Scsi_Host *shost = conn->session->host; 89 struct Scsi_Host *shost = conn->session->host;
@@ -360,7 +344,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
360 struct iscsi_conn *conn = task->conn; 344 struct iscsi_conn *conn = task->conn;
361 struct iscsi_session *session = conn->session; 345 struct iscsi_session *session = conn->session;
362 struct scsi_cmnd *sc = task->sc; 346 struct scsi_cmnd *sc = task->sc;
363 struct iscsi_cmd *hdr; 347 struct iscsi_scsi_req *hdr;
364 unsigned hdrlength, cmd_len; 348 unsigned hdrlength, cmd_len;
365 itt_t itt; 349 itt_t itt;
366 int rc; 350 int rc;
@@ -374,7 +358,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
374 if (rc) 358 if (rc)
375 return rc; 359 return rc;
376 } 360 }
377 hdr = (struct iscsi_cmd *) task->hdr; 361 hdr = (struct iscsi_scsi_req *)task->hdr;
378 itt = hdr->itt; 362 itt = hdr->itt;
379 memset(hdr, 0, sizeof(*hdr)); 363 memset(hdr, 0, sizeof(*hdr));
380 364
@@ -830,7 +814,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
830 struct iscsi_task *task, char *data, 814 struct iscsi_task *task, char *data,
831 int datalen) 815 int datalen)
832{ 816{
833 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 817 struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
834 struct iscsi_session *session = conn->session; 818 struct iscsi_session *session = conn->session;
835 struct scsi_cmnd *sc = task->sc; 819 struct scsi_cmnd *sc = task->sc;
836 820
diff --git a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
index 5711e7c16b50..40e3d375ea99 100644
--- a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
@@ -24,8 +24,6 @@
24#define BRCMS_SET_SHORTSLOT_OVERRIDE 146 24#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
25 25
26 26
27#include <linux/interrupt.h>
28
29/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and 27/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
30 * sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be 28 * sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
31 * submitted to workqueue instead of being on kernel timer 29 * submitted to workqueue instead of being on kernel timer
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 5cb0f0ef6af0..b28794b72125 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,5 +31,6 @@ config TCM_PSCSI
31 31
32source "drivers/target/loopback/Kconfig" 32source "drivers/target/loopback/Kconfig"
33source "drivers/target/tcm_fc/Kconfig" 33source "drivers/target/tcm_fc/Kconfig"
34source "drivers/target/iscsi/Kconfig"
34 35
35endif 36endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 21df808a992c..1060c7b7f803 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
24 24
25# Fabric modules 25# Fabric modules
26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
27
28obj-$(CONFIG_TCM_FC) += tcm_fc/ 27obj-$(CONFIG_TCM_FC) += tcm_fc/
28obj-$(CONFIG_ISCSI_TARGET) += iscsi/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
new file mode 100644
index 000000000000..564ff4e0dbc4
--- /dev/null
+++ b/drivers/target/iscsi/Kconfig
@@ -0,0 +1,8 @@
1config ISCSI_TARGET
2 tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
3 select CRYPTO
4 select CRYPTO_CRC32C
5 select CRYPTO_CRC32C_INTEL if X86
6 help
7 Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
8 Target Mode Stack.
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
new file mode 100644
index 000000000000..5b9a2cf7f0a9
--- /dev/null
+++ b/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
1iscsi_target_mod-y += iscsi_target_parameters.o \
2 iscsi_target_seq_pdu_list.o \
3 iscsi_target_tq.o \
4 iscsi_target_auth.o \
5 iscsi_target_datain_values.o \
6 iscsi_target_device.o \
7 iscsi_target_erl0.o \
8 iscsi_target_erl1.o \
9 iscsi_target_erl2.o \
10 iscsi_target_login.o \
11 iscsi_target_nego.o \
12 iscsi_target_nodeattrib.o \
13 iscsi_target_tmr.o \
14 iscsi_target_tpg.o \
15 iscsi_target_util.o \
16 iscsi_target.o \
17 iscsi_target_configfs.o \
18 iscsi_target_stat.o
19
20obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 000000000000..14c81c4265bd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4559 @@
1/*******************************************************************************
2 * This file contains main functions related to the iSCSI Target Core Driver.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/string.h>
22#include <linux/kthread.h>
23#include <linux/crypto.h>
24#include <linux/completion.h>
25#include <asm/unaligned.h>
26#include <scsi/scsi_device.h>
27#include <scsi/iscsi_proto.h>
28#include <target/target_core_base.h>
29#include <target/target_core_tmr.h>
30#include <target/target_core_transport.h>
31
32#include "iscsi_target_core.h"
33#include "iscsi_target_parameters.h"
34#include "iscsi_target_seq_pdu_list.h"
35#include "iscsi_target_tq.h"
36#include "iscsi_target_configfs.h"
37#include "iscsi_target_datain_values.h"
38#include "iscsi_target_erl0.h"
39#include "iscsi_target_erl1.h"
40#include "iscsi_target_erl2.h"
41#include "iscsi_target_login.h"
42#include "iscsi_target_tmr.h"
43#include "iscsi_target_tpg.h"
44#include "iscsi_target_util.h"
45#include "iscsi_target.h"
46#include "iscsi_target_device.h"
47#include "iscsi_target_stat.h"
48
49static LIST_HEAD(g_tiqn_list);
50static LIST_HEAD(g_np_list);
51static DEFINE_SPINLOCK(tiqn_lock);
52static DEFINE_SPINLOCK(np_lock);
53
54static struct idr tiqn_idr;
55struct idr sess_idr;
56struct mutex auth_id_lock;
57spinlock_t sess_idr_lock;
58
59struct iscsit_global *iscsit_global;
60
61struct kmem_cache *lio_cmd_cache;
62struct kmem_cache *lio_qr_cache;
63struct kmem_cache *lio_dr_cache;
64struct kmem_cache *lio_ooo_cache;
65struct kmem_cache *lio_r2t_cache;
66
67static int iscsit_handle_immediate_data(struct iscsi_cmd *,
68 unsigned char *buf, u32);
69static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
70
71struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
72{
73 struct iscsi_tiqn *tiqn = NULL;
74
75 spin_lock(&tiqn_lock);
76 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
77 if (!strcmp(tiqn->tiqn, buf)) {
78
79 spin_lock(&tiqn->tiqn_state_lock);
80 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
81 tiqn->tiqn_access_count++;
82 spin_unlock(&tiqn->tiqn_state_lock);
83 spin_unlock(&tiqn_lock);
84 return tiqn;
85 }
86 spin_unlock(&tiqn->tiqn_state_lock);
87 }
88 }
89 spin_unlock(&tiqn_lock);
90
91 return NULL;
92}
93
94static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
95{
96 spin_lock(&tiqn->tiqn_state_lock);
97 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
98 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
99 spin_unlock(&tiqn->tiqn_state_lock);
100 return 0;
101 }
102 spin_unlock(&tiqn->tiqn_state_lock);
103
104 return -1;
105}
106
107void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
108{
109 spin_lock(&tiqn->tiqn_state_lock);
110 tiqn->tiqn_access_count--;
111 spin_unlock(&tiqn->tiqn_state_lock);
112}
113
114/*
115 * Note that IQN formatting is expected to be done in userspace, and
116 * no explict IQN format checks are done here.
117 */
118struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
119{
120 struct iscsi_tiqn *tiqn = NULL;
121 int ret;
122
123 if (strlen(buf) > ISCSI_IQN_LEN) {
124 pr_err("Target IQN exceeds %d bytes\n",
125 ISCSI_IQN_LEN);
126 return ERR_PTR(-EINVAL);
127 }
128
129 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
130 if (!tiqn) {
131 pr_err("Unable to allocate struct iscsi_tiqn\n");
132 return ERR_PTR(-ENOMEM);
133 }
134
135 sprintf(tiqn->tiqn, "%s", buf);
136 INIT_LIST_HEAD(&tiqn->tiqn_list);
137 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
138 spin_lock_init(&tiqn->tiqn_state_lock);
139 spin_lock_init(&tiqn->tiqn_tpg_lock);
140 spin_lock_init(&tiqn->sess_err_stats.lock);
141 spin_lock_init(&tiqn->login_stats.lock);
142 spin_lock_init(&tiqn->logout_stats.lock);
143
144 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
145 pr_err("idr_pre_get() for tiqn_idr failed\n");
146 kfree(tiqn);
147 return ERR_PTR(-ENOMEM);
148 }
149 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
150
151 spin_lock(&tiqn_lock);
152 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
153 if (ret < 0) {
154 pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
155 spin_unlock(&tiqn_lock);
156 kfree(tiqn);
157 return ERR_PTR(ret);
158 }
159 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
160 spin_unlock(&tiqn_lock);
161
162 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
163
164 return tiqn;
165
166}
167
168static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
169{
170 /*
171 * Wait for accesses to said struct iscsi_tiqn to end.
172 */
173 spin_lock(&tiqn->tiqn_state_lock);
174 while (tiqn->tiqn_access_count != 0) {
175 spin_unlock(&tiqn->tiqn_state_lock);
176 msleep(10);
177 spin_lock(&tiqn->tiqn_state_lock);
178 }
179 spin_unlock(&tiqn->tiqn_state_lock);
180}
181
182void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
183{
184 /*
185 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
186 * while holding tiqn->tiqn_state_lock. This means that all subsequent
187 * attempts to access this struct iscsi_tiqn will fail from both transport
188 * fabric and control code paths.
189 */
190 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
191 pr_err("iscsit_set_tiqn_shutdown() failed\n");
192 return;
193 }
194
195 iscsit_wait_for_tiqn(tiqn);
196
197 spin_lock(&tiqn_lock);
198 list_del(&tiqn->tiqn_list);
199 idr_remove(&tiqn_idr, tiqn->tiqn_index);
200 spin_unlock(&tiqn_lock);
201
202 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
203 tiqn->tiqn);
204 kfree(tiqn);
205}
206
207int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
208{
209 int ret;
210 /*
211 * Determine if the network portal is accepting storage traffic.
212 */
213 spin_lock_bh(&np->np_thread_lock);
214 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
215 spin_unlock_bh(&np->np_thread_lock);
216 return -1;
217 }
218 if (np->np_login_tpg) {
219 pr_err("np->np_login_tpg() is not NULL!\n");
220 spin_unlock_bh(&np->np_thread_lock);
221 return -1;
222 }
223 spin_unlock_bh(&np->np_thread_lock);
224 /*
225 * Determine if the portal group is accepting storage traffic.
226 */
227 spin_lock_bh(&tpg->tpg_state_lock);
228 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
229 spin_unlock_bh(&tpg->tpg_state_lock);
230 return -1;
231 }
232 spin_unlock_bh(&tpg->tpg_state_lock);
233
234 /*
235 * Here we serialize access across the TIQN+TPG Tuple.
236 */
237 ret = mutex_lock_interruptible(&tpg->np_login_lock);
238 if ((ret != 0) || signal_pending(current))
239 return -1;
240
241 spin_lock_bh(&np->np_thread_lock);
242 np->np_login_tpg = tpg;
243 spin_unlock_bh(&np->np_thread_lock);
244
245 return 0;
246}
247
248int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
249{
250 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
251
252 spin_lock_bh(&np->np_thread_lock);
253 np->np_login_tpg = NULL;
254 spin_unlock_bh(&np->np_thread_lock);
255
256 mutex_unlock(&tpg->np_login_lock);
257
258 if (tiqn)
259 iscsit_put_tiqn_for_login(tiqn);
260
261 return 0;
262}
263
264static struct iscsi_np *iscsit_get_np(
265 struct __kernel_sockaddr_storage *sockaddr,
266 int network_transport)
267{
268 struct sockaddr_in *sock_in, *sock_in_e;
269 struct sockaddr_in6 *sock_in6, *sock_in6_e;
270 struct iscsi_np *np;
271 int ip_match = 0;
272 u16 port;
273
274 spin_lock_bh(&np_lock);
275 list_for_each_entry(np, &g_np_list, np_list) {
276 spin_lock(&np->np_thread_lock);
277 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
278 spin_unlock(&np->np_thread_lock);
279 continue;
280 }
281
282 if (sockaddr->ss_family == AF_INET6) {
283 sock_in6 = (struct sockaddr_in6 *)sockaddr;
284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
285
286 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
287 (void *)&sock_in6_e->sin6_addr.in6_u,
288 sizeof(struct in6_addr)))
289 ip_match = 1;
290
291 port = ntohs(sock_in6->sin6_port);
292 } else {
293 sock_in = (struct sockaddr_in *)sockaddr;
294 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
295
296 if (sock_in->sin_addr.s_addr ==
297 sock_in_e->sin_addr.s_addr)
298 ip_match = 1;
299
300 port = ntohs(sock_in->sin_port);
301 }
302
303 if ((ip_match == 1) && (np->np_port == port) &&
304 (np->np_network_transport == network_transport)) {
305 /*
306 * Increment the np_exports reference count now to
307 * prevent iscsit_del_np() below from being called
308 * while iscsi_tpg_add_network_portal() is called.
309 */
310 np->np_exports++;
311 spin_unlock(&np->np_thread_lock);
312 spin_unlock_bh(&np_lock);
313 return np;
314 }
315 spin_unlock(&np->np_thread_lock);
316 }
317 spin_unlock_bh(&np_lock);
318
319 return NULL;
320}
321
322struct iscsi_np *iscsit_add_np(
323 struct __kernel_sockaddr_storage *sockaddr,
324 char *ip_str,
325 int network_transport)
326{
327 struct sockaddr_in *sock_in;
328 struct sockaddr_in6 *sock_in6;
329 struct iscsi_np *np;
330 int ret;
331 /*
332 * Locate the existing struct iscsi_np if already active..
333 */
334 np = iscsit_get_np(sockaddr, network_transport);
335 if (np)
336 return np;
337
338 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
339 if (!np) {
340 pr_err("Unable to allocate memory for struct iscsi_np\n");
341 return ERR_PTR(-ENOMEM);
342 }
343
344 np->np_flags |= NPF_IP_NETWORK;
345 if (sockaddr->ss_family == AF_INET6) {
346 sock_in6 = (struct sockaddr_in6 *)sockaddr;
347 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
348 np->np_port = ntohs(sock_in6->sin6_port);
349 } else {
350 sock_in = (struct sockaddr_in *)sockaddr;
351 sprintf(np->np_ip, "%s", ip_str);
352 np->np_port = ntohs(sock_in->sin_port);
353 }
354
355 np->np_network_transport = network_transport;
356 spin_lock_init(&np->np_thread_lock);
357 init_completion(&np->np_restart_comp);
358 INIT_LIST_HEAD(&np->np_list);
359
360 ret = iscsi_target_setup_login_socket(np, sockaddr);
361 if (ret != 0) {
362 kfree(np);
363 return ERR_PTR(ret);
364 }
365
366 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
367 if (IS_ERR(np->np_thread)) {
368 pr_err("Unable to create kthread: iscsi_np\n");
369 ret = PTR_ERR(np->np_thread);
370 kfree(np);
371 return ERR_PTR(ret);
372 }
373 /*
374 * Increment the np_exports reference count now to prevent
375 * iscsit_del_np() below from being run while a new call to
376 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
377 * active. We don't need to hold np->np_thread_lock at this
378 * point because iscsi_np has not been added to g_np_list yet.
379 */
380 np->np_exports = 1;
381
382 spin_lock_bh(&np_lock);
383 list_add_tail(&np->np_list, &g_np_list);
384 spin_unlock_bh(&np_lock);
385
386 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
387 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
388 "TCP" : "SCTP");
389
390 return np;
391}
392
393int iscsit_reset_np_thread(
394 struct iscsi_np *np,
395 struct iscsi_tpg_np *tpg_np,
396 struct iscsi_portal_group *tpg)
397{
398 spin_lock_bh(&np->np_thread_lock);
399 if (tpg && tpg_np) {
400 /*
401 * The reset operation need only be performed when the
402 * passed struct iscsi_portal_group has a login in progress
403 * to one of the network portals.
404 */
405 if (tpg_np->tpg_np->np_login_tpg != tpg) {
406 spin_unlock_bh(&np->np_thread_lock);
407 return 0;
408 }
409 }
410 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
411 spin_unlock_bh(&np->np_thread_lock);
412 return 0;
413 }
414 np->np_thread_state = ISCSI_NP_THREAD_RESET;
415
416 if (np->np_thread) {
417 spin_unlock_bh(&np->np_thread_lock);
418 send_sig(SIGINT, np->np_thread, 1);
419 wait_for_completion(&np->np_restart_comp);
420 spin_lock_bh(&np->np_thread_lock);
421 }
422 spin_unlock_bh(&np->np_thread_lock);
423
424 return 0;
425}
426
427int iscsit_del_np_comm(struct iscsi_np *np)
428{
429 if (!np->np_socket)
430 return 0;
431
432 /*
433 * Some network transports allocate their own struct sock->file,
434 * see if we need to free any additional allocated resources.
435 */
436 if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
437 kfree(np->np_socket->file);
438 np->np_socket->file = NULL;
439 }
440
441 sock_release(np->np_socket);
442 return 0;
443}
444
445int iscsit_del_np(struct iscsi_np *np)
446{
447 spin_lock_bh(&np->np_thread_lock);
448 np->np_exports--;
449 if (np->np_exports) {
450 spin_unlock_bh(&np->np_thread_lock);
451 return 0;
452 }
453 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
454 spin_unlock_bh(&np->np_thread_lock);
455
456 if (np->np_thread) {
457 /*
458 * We need to send the signal to wakeup Linux/Net
459 * which may be sleeping in sock_accept()..
460 */
461 send_sig(SIGINT, np->np_thread, 1);
462 kthread_stop(np->np_thread);
463 }
464 iscsit_del_np_comm(np);
465
466 spin_lock_bh(&np_lock);
467 list_del(&np->np_list);
468 spin_unlock_bh(&np_lock);
469
470 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
471 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
472 "TCP" : "SCTP");
473
474 kfree(np);
475 return 0;
476}
477
478static int __init iscsi_target_init_module(void)
479{
480 int ret = 0;
481
482 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
483
484 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
485 if (!iscsit_global) {
486 pr_err("Unable to allocate memory for iscsit_global\n");
487 return -1;
488 }
489 mutex_init(&auth_id_lock);
490 spin_lock_init(&sess_idr_lock);
491 idr_init(&tiqn_idr);
492 idr_init(&sess_idr);
493
494 ret = iscsi_target_register_configfs();
495 if (ret < 0)
496 goto out;
497
498 ret = iscsi_thread_set_init();
499 if (ret < 0)
500 goto configfs_out;
501
502 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
503 TARGET_THREAD_SET_COUNT) {
504 pr_err("iscsi_allocate_thread_sets() returned"
505 " unexpected value!\n");
506 goto ts_out1;
507 }
508
509 lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
510 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
511 0, NULL);
512 if (!lio_cmd_cache) {
513 pr_err("Unable to kmem_cache_create() for"
514 " lio_cmd_cache\n");
515 goto ts_out2;
516 }
517
518 lio_qr_cache = kmem_cache_create("lio_qr_cache",
519 sizeof(struct iscsi_queue_req),
520 __alignof__(struct iscsi_queue_req), 0, NULL);
521 if (!lio_qr_cache) {
522 pr_err("nable to kmem_cache_create() for"
523 " lio_qr_cache\n");
524 goto cmd_out;
525 }
526
527 lio_dr_cache = kmem_cache_create("lio_dr_cache",
528 sizeof(struct iscsi_datain_req),
529 __alignof__(struct iscsi_datain_req), 0, NULL);
530 if (!lio_dr_cache) {
531 pr_err("Unable to kmem_cache_create() for"
532 " lio_dr_cache\n");
533 goto qr_out;
534 }
535
536 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
537 sizeof(struct iscsi_ooo_cmdsn),
538 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
539 if (!lio_ooo_cache) {
540 pr_err("Unable to kmem_cache_create() for"
541 " lio_ooo_cache\n");
542 goto dr_out;
543 }
544
545 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
546 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
547 0, NULL);
548 if (!lio_r2t_cache) {
549 pr_err("Unable to kmem_cache_create() for"
550 " lio_r2t_cache\n");
551 goto ooo_out;
552 }
553
554 if (iscsit_load_discovery_tpg() < 0)
555 goto r2t_out;
556
557 return ret;
558r2t_out:
559 kmem_cache_destroy(lio_r2t_cache);
560ooo_out:
561 kmem_cache_destroy(lio_ooo_cache);
562dr_out:
563 kmem_cache_destroy(lio_dr_cache);
564qr_out:
565 kmem_cache_destroy(lio_qr_cache);
566cmd_out:
567 kmem_cache_destroy(lio_cmd_cache);
568ts_out2:
569 iscsi_deallocate_thread_sets();
570ts_out1:
571 iscsi_thread_set_free();
572configfs_out:
573 iscsi_target_deregister_configfs();
574out:
575 kfree(iscsit_global);
576 return -ENOMEM;
577}
578
579static void __exit iscsi_target_cleanup_module(void)
580{
581 iscsi_deallocate_thread_sets();
582 iscsi_thread_set_free();
583 iscsit_release_discovery_tpg();
584 kmem_cache_destroy(lio_cmd_cache);
585 kmem_cache_destroy(lio_qr_cache);
586 kmem_cache_destroy(lio_dr_cache);
587 kmem_cache_destroy(lio_ooo_cache);
588 kmem_cache_destroy(lio_r2t_cache);
589
590 iscsi_target_deregister_configfs();
591
592 kfree(iscsit_global);
593}
594
595int iscsit_add_reject(
596 u8 reason,
597 int fail_conn,
598 unsigned char *buf,
599 struct iscsi_conn *conn)
600{
601 struct iscsi_cmd *cmd;
602 struct iscsi_reject *hdr;
603 int ret;
604
605 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
606 if (!cmd)
607 return -1;
608
609 cmd->iscsi_opcode = ISCSI_OP_REJECT;
610 if (fail_conn)
611 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
612
613 hdr = (struct iscsi_reject *) cmd->pdu;
614 hdr->reason = reason;
615
616 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
617 if (!cmd->buf_ptr) {
618 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
619 iscsit_release_cmd(cmd);
620 return -1;
621 }
622 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
623
624 spin_lock_bh(&conn->cmd_lock);
625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
626 spin_unlock_bh(&conn->cmd_lock);
627
628 cmd->i_state = ISTATE_SEND_REJECT;
629 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
630
631 ret = wait_for_completion_interruptible(&cmd->reject_comp);
632 if (ret != 0)
633 return -1;
634
635 return (!fail_conn) ? 0 : -1;
636}
637
638int iscsit_add_reject_from_cmd(
639 u8 reason,
640 int fail_conn,
641 int add_to_conn,
642 unsigned char *buf,
643 struct iscsi_cmd *cmd)
644{
645 struct iscsi_conn *conn;
646 struct iscsi_reject *hdr;
647 int ret;
648
649 if (!cmd->conn) {
650 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
651 cmd->init_task_tag);
652 return -1;
653 }
654 conn = cmd->conn;
655
656 cmd->iscsi_opcode = ISCSI_OP_REJECT;
657 if (fail_conn)
658 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
659
660 hdr = (struct iscsi_reject *) cmd->pdu;
661 hdr->reason = reason;
662
663 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
664 if (!cmd->buf_ptr) {
665 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
666 iscsit_release_cmd(cmd);
667 return -1;
668 }
669 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
670
671 if (add_to_conn) {
672 spin_lock_bh(&conn->cmd_lock);
673 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
674 spin_unlock_bh(&conn->cmd_lock);
675 }
676
677 cmd->i_state = ISTATE_SEND_REJECT;
678 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
679
680 ret = wait_for_completion_interruptible(&cmd->reject_comp);
681 if (ret != 0)
682 return -1;
683
684 return (!fail_conn) ? 0 : -1;
685}
686
687/*
688 * Map some portion of the allocated scatterlist to an iovec, suitable for
689 * kernel sockets to copy data in/out. This handles both pages and slab-allocated
690 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
691 * either case (see iscsit_alloc_buffs)
692 */
693static int iscsit_map_iovec(
694 struct iscsi_cmd *cmd,
695 struct kvec *iov,
696 u32 data_offset,
697 u32 data_length)
698{
699 u32 i = 0;
700 struct scatterlist *sg;
701 unsigned int page_off;
702
703 /*
704 * We have a private mapping of the allocated pages in t_mem_sg.
705 * At this point, we also know each contains a page.
706 */
707 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
708 page_off = (data_offset % PAGE_SIZE);
709
710 cmd->first_data_sg = sg;
711 cmd->first_data_sg_off = page_off;
712
713 while (data_length) {
714 u32 cur_len = min_t(u32, data_length, sg->length - page_off);
715
716 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
717 iov[i].iov_len = cur_len;
718
719 data_length -= cur_len;
720 page_off = 0;
721 sg = sg_next(sg);
722 i++;
723 }
724
725 cmd->kmapped_nents = i;
726
727 return i;
728}
729
730static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
731{
732 u32 i;
733 struct scatterlist *sg;
734
735 sg = cmd->first_data_sg;
736
737 for (i = 0; i < cmd->kmapped_nents; i++)
738 kunmap(sg_page(&sg[i]));
739}
740
741static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
742{
743 struct iscsi_cmd *cmd;
744
745 conn->exp_statsn = exp_statsn;
746
747 spin_lock_bh(&conn->cmd_lock);
748 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
749 spin_lock(&cmd->istate_lock);
750 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
751 (cmd->stat_sn < exp_statsn)) {
752 cmd->i_state = ISTATE_REMOVE;
753 spin_unlock(&cmd->istate_lock);
754 iscsit_add_cmd_to_immediate_queue(cmd, conn,
755 cmd->i_state);
756 continue;
757 }
758 spin_unlock(&cmd->istate_lock);
759 }
760 spin_unlock_bh(&conn->cmd_lock);
761}
762
763static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
764{
765 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
766 cmd->se_cmd.t_data_nents;
767
768 iov_count += TRANSPORT_IOV_DATA_BUFFER;
769
770 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
771 if (!cmd->iov_data) {
772 pr_err("Unable to allocate cmd->iov_data\n");
773 return -ENOMEM;
774 }
775
776 cmd->orig_iov_data_count = iov_count;
777 return 0;
778}
779
780static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
781{
782 struct scatterlist *sgl;
783 u32 length = cmd->se_cmd.data_length;
784 int nents = DIV_ROUND_UP(length, PAGE_SIZE);
785 int i = 0, ret;
786 /*
787 * If no SCSI payload is present, allocate the default iovecs used for
788 * iSCSI PDU Header
789 */
790 if (!length)
791 return iscsit_allocate_iovecs(cmd);
792
793 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
794 if (!sgl)
795 return -ENOMEM;
796
797 sg_init_table(sgl, nents);
798
799 while (length) {
800 int buf_size = min_t(int, length, PAGE_SIZE);
801 struct page *page;
802
803 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
804 if (!page)
805 goto page_alloc_failed;
806
807 sg_set_page(&sgl[i], page, buf_size, 0);
808
809 length -= buf_size;
810 i++;
811 }
812
813 cmd->t_mem_sg = sgl;
814 cmd->t_mem_sg_nents = nents;
815
816 /* BIDI ops not supported */
817
818 /* Tell the core about our preallocated memory */
819 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
820 /*
821 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
822 * so that cmd->se_cmd.t_tasks_se_num has been set.
823 */
824 ret = iscsit_allocate_iovecs(cmd);
825 if (ret < 0)
826 goto page_alloc_failed;
827
828 return 0;
829
830page_alloc_failed:
831 while (i >= 0) {
832 __free_page(sg_page(&sgl[i]));
833 i--;
834 }
835 kfree(cmd->t_mem_sg);
836 cmd->t_mem_sg = NULL;
837 return -ENOMEM;
838}
839
840static int iscsit_handle_scsi_cmd(
841 struct iscsi_conn *conn,
842 unsigned char *buf)
843{
844 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
845 int dump_immediate_data = 0, send_check_condition = 0, payload_length;
846 struct iscsi_cmd *cmd = NULL;
847 struct iscsi_scsi_req *hdr;
848
849 spin_lock_bh(&conn->sess->session_stats_lock);
850 conn->sess->cmd_pdus++;
851 if (conn->sess->se_sess->se_node_acl) {
852 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
853 conn->sess->se_sess->se_node_acl->num_cmds++;
854 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
855 }
856 spin_unlock_bh(&conn->sess->session_stats_lock);
857
858 hdr = (struct iscsi_scsi_req *) buf;
859 payload_length = ntoh24(hdr->dlength);
860 hdr->itt = be32_to_cpu(hdr->itt);
861 hdr->data_length = be32_to_cpu(hdr->data_length);
862 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
863 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
864
865 /* FIXME; Add checks for AdditionalHeaderSegment */
866
867 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
868 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
869 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
870 " not set. Bad iSCSI Initiator.\n");
871 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
872 buf, conn);
873 }
874
875 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
876 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
877 /*
878 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
879 * that adds support for RESERVE/RELEASE. There is a bug
880 * add with this new functionality that sets R/W bits when
881 * neither CDB carries any READ or WRITE datapayloads.
882 */
883 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
884 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
885 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
886 goto done;
887 }
888
889 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
890 " set when Expected Data Transfer Length is 0 for"
891 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
892 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
893 buf, conn);
894 }
895done:
896
897 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
898 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
899 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
900 " MUST be set if Expected Data Transfer Length is not 0."
901 " Bad iSCSI Initiator\n");
902 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
903 buf, conn);
904 }
905
906 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
907 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
908 pr_err("Bidirectional operations not supported!\n");
909 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
910 buf, conn);
911 }
912
913 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
914 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
915 " Scsi Command PDU.\n");
916 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
917 buf, conn);
918 }
919
920 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
921 pr_err("ImmediateData=No but DataSegmentLength=%u,"
922 " protocol error.\n", payload_length);
923 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
924 buf, conn);
925 }
926
927 if ((hdr->data_length == payload_length) &&
928 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
929 pr_err("Expected Data Transfer Length and Length of"
930 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
931 " bit is not set protocol error\n");
932 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
933 buf, conn);
934 }
935
936 if (payload_length > hdr->data_length) {
937 pr_err("DataSegmentLength: %u is greater than"
938 " EDTL: %u, protocol error.\n", payload_length,
939 hdr->data_length);
940 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
941 buf, conn);
942 }
943
944 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
945 pr_err("DataSegmentLength: %u is greater than"
946 " MaxRecvDataSegmentLength: %u, protocol error.\n",
947 payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
948 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
949 buf, conn);
950 }
951
952 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
953 pr_err("DataSegmentLength: %u is greater than"
954 " FirstBurstLength: %u, protocol error.\n",
955 payload_length, conn->sess->sess_ops->FirstBurstLength);
956 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
957 buf, conn);
958 }
959
960 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
961 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
962 DMA_NONE;
963
964 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
965 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
966 if (!cmd)
967 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
968 buf, conn);
969
970 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
971 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
972 hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
973
974 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
975 cmd->i_state = ISTATE_NEW_CMD;
976 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
977 cmd->immediate_data = (payload_length) ? 1 : 0;
978 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
979 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
980 if (cmd->unsolicited_data)
981 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
982
983 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
984 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
985 spin_lock_bh(&conn->sess->ttt_lock);
986 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
987 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
988 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
989 spin_unlock_bh(&conn->sess->ttt_lock);
990 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
991 cmd->targ_xfer_tag = 0xFFFFFFFF;
992 cmd->cmd_sn = hdr->cmdsn;
993 cmd->exp_stat_sn = hdr->exp_statsn;
994 cmd->first_burst_len = payload_length;
995
996 if (cmd->data_direction == DMA_FROM_DEVICE) {
997 struct iscsi_datain_req *dr;
998
999 dr = iscsit_allocate_datain_req();
1000 if (!dr)
1001 return iscsit_add_reject_from_cmd(
1002 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1003 1, 1, buf, cmd);
1004
1005 iscsit_attach_datain_req(cmd, dr);
1006 }
1007
1008 /*
1009 * The CDB is going to an se_device_t.
1010 */
1011 ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
1012 get_unaligned_le64(&hdr->lun));
1013 if (ret < 0) {
1014 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
1015 pr_debug("Responding to non-acl'ed,"
1016 " non-existent or non-exported iSCSI LUN:"
1017 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
1018 }
1019 if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
1020 return iscsit_add_reject_from_cmd(
1021 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1022 1, 1, buf, cmd);
1023
1024 send_check_condition = 1;
1025 goto attach_cmd;
1026 }
1027 /*
1028 * The Initiator Node has access to the LUN (the addressing method
1029 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
1030 * allocate 1->N transport tasks (depending on sector count and
1031 * maximum request size the physical HBA(s) can handle.
1032 */
1033 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
1034 if (transport_ret == -ENOMEM) {
1035 return iscsit_add_reject_from_cmd(
1036 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1037 1, 1, buf, cmd);
1038 } else if (transport_ret == -EINVAL) {
1039 /*
1040 * Unsupported SAM Opcode. CHECK_CONDITION will be sent
1041 * in iscsit_execute_cmd() during the CmdSN OOO Execution
1042 * Mechinism.
1043 */
1044 send_check_condition = 1;
1045 } else {
1046 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1047 return iscsit_add_reject_from_cmd(
1048 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1049 1, 1, buf, cmd);
1050 }
1051
1052attach_cmd:
1053 spin_lock_bh(&conn->cmd_lock);
1054 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1055 spin_unlock_bh(&conn->cmd_lock);
1056 /*
1057 * Check if we need to delay processing because of ALUA
1058 * Active/NonOptimized primary access state..
1059 */
1060 core_alua_check_nonop_delay(&cmd->se_cmd);
1061 /*
1062 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
1063 * also call iscsit_allocate_iovecs()
1064 */
1065 ret = iscsit_alloc_buffs(cmd);
1066 if (ret < 0)
1067 return iscsit_add_reject_from_cmd(
1068 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1069 1, 1, buf, cmd);
1070 /*
1071 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1072 * the Immediate Bit is not set, and no Immediate
1073 * Data is attached.
1074 *
1075 * A PDU/CmdSN carrying Immediate Data can only
1076 * be processed after the DataCRC has passed.
1077 * If the DataCRC fails, the CmdSN MUST NOT
1078 * be acknowledged. (See below)
1079 */
1080 if (!cmd->immediate_data) {
1081 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1082 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1083 return iscsit_add_reject_from_cmd(
1084 ISCSI_REASON_PROTOCOL_ERROR,
1085 1, 0, buf, cmd);
1086 }
1087
1088 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1089
1090 /*
1091 * If no Immediate Data is attached, it's OK to return now.
1092 */
1093 if (!cmd->immediate_data) {
1094 if (send_check_condition)
1095 return 0;
1096
1097 if (cmd->unsolicited_data) {
1098 iscsit_set_dataout_sequence_values(cmd);
1099
1100 spin_lock_bh(&cmd->dataout_timeout_lock);
1101 iscsit_start_dataout_timer(cmd, cmd->conn);
1102 spin_unlock_bh(&cmd->dataout_timeout_lock);
1103 }
1104
1105 return 0;
1106 }
1107
1108 /*
1109 * Early CHECK_CONDITIONs never make it to the transport processing
1110 * thread. They are processed in CmdSN order by
1111 * iscsit_check_received_cmdsn() below.
1112 */
1113 if (send_check_condition) {
1114 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1115 dump_immediate_data = 1;
1116 goto after_immediate_data;
1117 }
1118 /*
1119 * Call directly into transport_generic_new_cmd() to perform
1120 * the backend memory allocation.
1121 */
1122 ret = transport_generic_new_cmd(&cmd->se_cmd);
1123 if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
1124 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1125 dump_immediate_data = 1;
1126 goto after_immediate_data;
1127 }
1128
1129 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
1130after_immediate_data:
1131 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1132 /*
1133 * A PDU/CmdSN carrying Immediate Data passed
1134 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1135 * Immediate Bit is not set.
1136 */
1137 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1138 /*
1139 * Special case for Unsupported SAM WRITE Opcodes
1140 * and ImmediateData=Yes.
1141 */
1142 if (dump_immediate_data) {
1143 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
1144 return -1;
1145 } else if (cmd->unsolicited_data) {
1146 iscsit_set_dataout_sequence_values(cmd);
1147
1148 spin_lock_bh(&cmd->dataout_timeout_lock);
1149 iscsit_start_dataout_timer(cmd, cmd->conn);
1150 spin_unlock_bh(&cmd->dataout_timeout_lock);
1151 }
1152
1153 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1154 return iscsit_add_reject_from_cmd(
1155 ISCSI_REASON_PROTOCOL_ERROR,
1156 1, 0, buf, cmd);
1157
1158 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1159 /*
1160 * Immediate Data failed DataCRC and ERL>=1,
1161 * silently drop this PDU and let the initiator
1162 * plug the CmdSN gap.
1163 *
1164 * FIXME: Send Unsolicited NOPIN with reserved
1165 * TTT here to help the initiator figure out
1166 * the missing CmdSN, although they should be
1167 * intelligent enough to determine the missing
1168 * CmdSN and issue a retry to plug the sequence.
1169 */
1170 cmd->i_state = ISTATE_REMOVE;
1171 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1172 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1173 return -1;
1174
1175 return 0;
1176}
1177
1178static u32 iscsit_do_crypto_hash_sg(
1179 struct hash_desc *hash,
1180 struct iscsi_cmd *cmd,
1181 u32 data_offset,
1182 u32 data_length,
1183 u32 padding,
1184 u8 *pad_bytes)
1185{
1186 u32 data_crc;
1187 u32 i;
1188 struct scatterlist *sg;
1189 unsigned int page_off;
1190
1191 crypto_hash_init(hash);
1192
1193 sg = cmd->first_data_sg;
1194 page_off = cmd->first_data_sg_off;
1195
1196 i = 0;
1197 while (data_length) {
1198 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
1199
1200 crypto_hash_update(hash, &sg[i], cur_len);
1201
1202 data_length -= cur_len;
1203 page_off = 0;
1204 i++;
1205 }
1206
1207 if (padding) {
1208 struct scatterlist pad_sg;
1209
1210 sg_init_one(&pad_sg, pad_bytes, padding);
1211 crypto_hash_update(hash, &pad_sg, padding);
1212 }
1213 crypto_hash_final(hash, (u8 *) &data_crc);
1214
1215 return data_crc;
1216}
1217
1218static void iscsit_do_crypto_hash_buf(
1219 struct hash_desc *hash,
1220 unsigned char *buf,
1221 u32 payload_length,
1222 u32 padding,
1223 u8 *pad_bytes,
1224 u8 *data_crc)
1225{
1226 struct scatterlist sg;
1227
1228 crypto_hash_init(hash);
1229
1230 sg_init_one(&sg, (u8 *)buf, payload_length);
1231 crypto_hash_update(hash, &sg, payload_length);
1232
1233 if (padding) {
1234 sg_init_one(&sg, pad_bytes, padding);
1235 crypto_hash_update(hash, &sg, padding);
1236 }
1237 crypto_hash_final(hash, data_crc);
1238}
1239
1240static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1241{
1242 int iov_ret, ooo_cmdsn = 0, ret;
1243 u8 data_crc_failed = 0;
1244 u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
1245 u32 rx_size = 0, payload_length;
1246 struct iscsi_cmd *cmd = NULL;
1247 struct se_cmd *se_cmd;
1248 struct iscsi_data *hdr;
1249 struct kvec *iov;
1250 unsigned long flags;
1251
1252 hdr = (struct iscsi_data *) buf;
1253 payload_length = ntoh24(hdr->dlength);
1254 hdr->itt = be32_to_cpu(hdr->itt);
1255 hdr->ttt = be32_to_cpu(hdr->ttt);
1256 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1257 hdr->datasn = be32_to_cpu(hdr->datasn);
1258 hdr->offset = be32_to_cpu(hdr->offset);
1259
1260 if (!payload_length) {
1261 pr_err("DataOUT payload is ZERO, protocol error.\n");
1262 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1263 buf, conn);
1264 }
1265
1266 /* iSCSI write */
1267 spin_lock_bh(&conn->sess->session_stats_lock);
1268 conn->sess->rx_data_octets += payload_length;
1269 if (conn->sess->se_sess->se_node_acl) {
1270 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
1271 conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
1272 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
1273 }
1274 spin_unlock_bh(&conn->sess->session_stats_lock);
1275
1276 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1277 pr_err("DataSegmentLength: %u is greater than"
1278 " MaxRecvDataSegmentLength: %u\n", payload_length,
1279 conn->conn_ops->MaxRecvDataSegmentLength);
1280 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1281 buf, conn);
1282 }
1283
1284 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
1285 payload_length);
1286 if (!cmd)
1287 return 0;
1288
1289 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1290 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1291 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
1292 payload_length, conn->cid);
1293
1294 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1295 pr_err("Command ITT: 0x%08x received DataOUT after"
1296 " last DataOUT received, dumping payload\n",
1297 cmd->init_task_tag);
1298 return iscsit_dump_data_payload(conn, payload_length, 1);
1299 }
1300
1301 if (cmd->data_direction != DMA_TO_DEVICE) {
1302 pr_err("Command ITT: 0x%08x received DataOUT for a"
1303 " NON-WRITE command.\n", cmd->init_task_tag);
1304 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
1305 1, 0, buf, cmd);
1306 }
1307 se_cmd = &cmd->se_cmd;
1308 iscsit_mod_dataout_timer(cmd);
1309
1310 if ((hdr->offset + payload_length) > cmd->data_length) {
1311 pr_err("DataOut Offset: %u, Length %u greater than"
1312 " iSCSI Command EDTL %u, protocol error.\n",
1313 hdr->offset, payload_length, cmd->data_length);
1314 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
1315 1, 0, buf, cmd);
1316 }
1317
1318 if (cmd->unsolicited_data) {
1319 int dump_unsolicited_data = 0;
1320
1321 if (conn->sess->sess_ops->InitialR2T) {
1322 pr_err("Received unexpected unsolicited data"
1323 " while InitialR2T=Yes, protocol error.\n");
1324 transport_send_check_condition_and_sense(&cmd->se_cmd,
1325 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1326 return -1;
1327 }
1328 /*
1329 * Special case for dealing with Unsolicited DataOUT
1330 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1331 * failures;
1332 */
1333
1334 /* Something's amiss if we're not in WRITE_PENDING state... */
1335 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1336 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1337 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1338
1339 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1340 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1341 (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
1342 dump_unsolicited_data = 1;
1343 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1344
1345 if (dump_unsolicited_data) {
1346 /*
1347 * Check if a delayed TASK_ABORTED status needs to
1348 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1349 * received with the unsolicitied data out.
1350 */
1351 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1352 iscsit_stop_dataout_timer(cmd);
1353
1354 transport_check_aborted_status(se_cmd,
1355 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1356 return iscsit_dump_data_payload(conn, payload_length, 1);
1357 }
1358 } else {
1359 /*
1360 * For the normal solicited data path:
1361 *
1362 * Check for a delayed TASK_ABORTED status and dump any
1363 * incoming data out payload if one exists. Also, when the
1364 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1365 * data out sequence, we decrement outstanding_r2ts. Once
1366 * outstanding_r2ts reaches zero, go ahead and send the delayed
1367 * TASK_ABORTED status.
1368 */
1369 if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
1370 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1371 if (--cmd->outstanding_r2ts < 1) {
1372 iscsit_stop_dataout_timer(cmd);
1373 transport_check_aborted_status(
1374 se_cmd, 1);
1375 }
1376
1377 return iscsit_dump_data_payload(conn, payload_length, 1);
1378 }
1379 }
1380 /*
1381 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1382 * within-command recovery checks before receiving the payload.
1383 */
1384 ret = iscsit_check_pre_dataout(cmd, buf);
1385 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
1386 return 0;
1387 else if (ret == DATAOUT_CANNOT_RECOVER)
1388 return -1;
1389
1390 rx_size += payload_length;
1391 iov = &cmd->iov_data[0];
1392
1393 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
1394 if (iov_ret < 0)
1395 return -1;
1396
1397 iov_count += iov_ret;
1398
1399 padding = ((-payload_length) & 3);
1400 if (padding != 0) {
1401 iov[iov_count].iov_base = cmd->pad_bytes;
1402 iov[iov_count++].iov_len = padding;
1403 rx_size += padding;
1404 pr_debug("Receiving %u padding bytes.\n", padding);
1405 }
1406
1407 if (conn->conn_ops->DataDigest) {
1408 iov[iov_count].iov_base = &checksum;
1409 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1410 rx_size += ISCSI_CRC_LEN;
1411 }
1412
1413 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1414
1415 iscsit_unmap_iovec(cmd);
1416
1417 if (rx_got != rx_size)
1418 return -1;
1419
1420 if (conn->conn_ops->DataDigest) {
1421 u32 data_crc;
1422
1423 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
1424 hdr->offset, payload_length, padding,
1425 cmd->pad_bytes);
1426
1427 if (checksum != data_crc) {
1428 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1429 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1430 " does not match computed 0x%08x\n",
1431 hdr->itt, hdr->offset, payload_length,
1432 hdr->datasn, checksum, data_crc);
1433 data_crc_failed = 1;
1434 } else {
1435 pr_debug("Got CRC32C DataDigest 0x%08x for"
1436 " %u bytes of Data Out\n", checksum,
1437 payload_length);
1438 }
1439 }
1440 /*
1441 * Increment post receive data and CRC values or perform
1442 * within-command recovery.
1443 */
1444 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
1445 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
1446 return 0;
1447 else if (ret == DATAOUT_SEND_R2T) {
1448 iscsit_set_dataout_sequence_values(cmd);
1449 iscsit_build_r2ts_for_cmd(cmd, conn, 0);
1450 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
1451 /*
1452 * Handle extra special case for out of order
1453 * Unsolicited Data Out.
1454 */
1455 spin_lock_bh(&cmd->istate_lock);
1456 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1457 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1458 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1459 spin_unlock_bh(&cmd->istate_lock);
1460
1461 iscsit_stop_dataout_timer(cmd);
1462 return (!ooo_cmdsn) ? transport_generic_handle_data(
1463 &cmd->se_cmd) : 0;
1464 } else /* DATAOUT_CANNOT_RECOVER */
1465 return -1;
1466
1467 return 0;
1468}
1469
1470static int iscsit_handle_nop_out(
1471 struct iscsi_conn *conn,
1472 unsigned char *buf)
1473{
1474 unsigned char *ping_data = NULL;
1475 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
1476 u32 checksum, data_crc, padding = 0, payload_length;
1477 u64 lun;
1478 struct iscsi_cmd *cmd = NULL;
1479 struct kvec *iov = NULL;
1480 struct iscsi_nopout *hdr;
1481
1482 hdr = (struct iscsi_nopout *) buf;
1483 payload_length = ntoh24(hdr->dlength);
1484 lun = get_unaligned_le64(&hdr->lun);
1485 hdr->itt = be32_to_cpu(hdr->itt);
1486 hdr->ttt = be32_to_cpu(hdr->ttt);
1487 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1488 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1489
1490 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1491 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1492 " not set, protocol error.\n");
1493 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1494 buf, conn);
1495 }
1496
1497 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1498 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1499 " greater than MaxRecvDataSegmentLength: %u, protocol"
1500 " error.\n", payload_length,
1501 conn->conn_ops->MaxRecvDataSegmentLength);
1502 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1503 buf, conn);
1504 }
1505
1506 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
1507 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1508 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
1509 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1510 payload_length);
1511 /*
1512 * This is not a response to a Unsolicited NopIN, which means
1513 * it can either be a NOPOUT ping request (with a valid ITT),
1514 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1515 * Either way, make sure we allocate an struct iscsi_cmd, as both
1516 * can contain ping data.
1517 */
1518 if (hdr->ttt == 0xFFFFFFFF) {
1519 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1520 if (!cmd)
1521 return iscsit_add_reject(
1522 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1523 1, buf, conn);
1524
1525 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1526 cmd->i_state = ISTATE_SEND_NOPIN;
1527 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1528 1 : 0);
1529 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1530 cmd->targ_xfer_tag = 0xFFFFFFFF;
1531 cmd->cmd_sn = hdr->cmdsn;
1532 cmd->exp_stat_sn = hdr->exp_statsn;
1533 cmd->data_direction = DMA_NONE;
1534 }
1535
1536 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
1537 rx_size = payload_length;
1538 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1539 if (!ping_data) {
1540 pr_err("Unable to allocate memory for"
1541 " NOPOUT ping data.\n");
1542 ret = -1;
1543 goto out;
1544 }
1545
1546 iov = &cmd->iov_misc[0];
1547 iov[niov].iov_base = ping_data;
1548 iov[niov++].iov_len = payload_length;
1549
1550 padding = ((-payload_length) & 3);
1551 if (padding != 0) {
1552 pr_debug("Receiving %u additional bytes"
1553 " for padding.\n", padding);
1554 iov[niov].iov_base = &cmd->pad_bytes;
1555 iov[niov++].iov_len = padding;
1556 rx_size += padding;
1557 }
1558 if (conn->conn_ops->DataDigest) {
1559 iov[niov].iov_base = &checksum;
1560 iov[niov++].iov_len = ISCSI_CRC_LEN;
1561 rx_size += ISCSI_CRC_LEN;
1562 }
1563
1564 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1565 if (rx_got != rx_size) {
1566 ret = -1;
1567 goto out;
1568 }
1569
1570 if (conn->conn_ops->DataDigest) {
1571 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1572 ping_data, payload_length,
1573 padding, cmd->pad_bytes,
1574 (u8 *)&data_crc);
1575
1576 if (checksum != data_crc) {
1577 pr_err("Ping data CRC32C DataDigest"
1578 " 0x%08x does not match computed 0x%08x\n",
1579 checksum, data_crc);
1580 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1581 pr_err("Unable to recover from"
1582 " NOPOUT Ping DataCRC failure while in"
1583 " ERL=0.\n");
1584 ret = -1;
1585 goto out;
1586 } else {
1587 /*
1588 * Silently drop this PDU and let the
1589 * initiator plug the CmdSN gap.
1590 */
1591 pr_debug("Dropping NOPOUT"
1592 " Command CmdSN: 0x%08x due to"
1593 " DataCRC error.\n", hdr->cmdsn);
1594 ret = 0;
1595 goto out;
1596 }
1597 } else {
1598 pr_debug("Got CRC32C DataDigest"
1599 " 0x%08x for %u bytes of ping data.\n",
1600 checksum, payload_length);
1601 }
1602 }
1603
1604 ping_data[payload_length] = '\0';
1605 /*
1606 * Attach ping data to struct iscsi_cmd->buf_ptr.
1607 */
1608 cmd->buf_ptr = (void *)ping_data;
1609 cmd->buf_ptr_size = payload_length;
1610
1611 pr_debug("Got %u bytes of NOPOUT ping"
1612 " data.\n", payload_length);
1613 pr_debug("Ping Data: \"%s\"\n", ping_data);
1614 }
1615
1616 if (hdr->itt != 0xFFFFFFFF) {
1617 if (!cmd) {
1618 pr_err("Checking CmdSN for NOPOUT,"
1619 " but cmd is NULL!\n");
1620 return -1;
1621 }
1622 /*
1623 * Initiator is expecting a NopIN ping reply,
1624 */
1625 spin_lock_bh(&conn->cmd_lock);
1626 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1627 spin_unlock_bh(&conn->cmd_lock);
1628
1629 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1630
1631 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1632 iscsit_add_cmd_to_response_queue(cmd, conn,
1633 cmd->i_state);
1634 return 0;
1635 }
1636
1637 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1638 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1639 ret = 0;
1640 goto ping_out;
1641 }
1642 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1643 return iscsit_add_reject_from_cmd(
1644 ISCSI_REASON_PROTOCOL_ERROR,
1645 1, 0, buf, cmd);
1646
1647 return 0;
1648 }
1649
1650 if (hdr->ttt != 0xFFFFFFFF) {
1651 /*
1652 * This was a response to a unsolicited NOPIN ping.
1653 */
1654 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
1655 if (!cmd)
1656 return -1;
1657
1658 iscsit_stop_nopin_response_timer(conn);
1659
1660 cmd->i_state = ISTATE_REMOVE;
1661 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1662 iscsit_start_nopin_timer(conn);
1663 } else {
1664 /*
1665 * Initiator is not expecting a NOPIN is response.
1666 * Just ignore for now.
1667 *
1668 * iSCSI v19-91 10.18
1669 * "A NOP-OUT may also be used to confirm a changed
1670 * ExpStatSN if another PDU will not be available
1671 * for a long time."
1672 */
1673 ret = 0;
1674 goto out;
1675 }
1676
1677 return 0;
1678out:
1679 if (cmd)
1680 iscsit_release_cmd(cmd);
1681ping_out:
1682 kfree(ping_data);
1683 return ret;
1684}
1685
1686static int iscsit_handle_task_mgt_cmd(
1687 struct iscsi_conn *conn,
1688 unsigned char *buf)
1689{
1690 struct iscsi_cmd *cmd;
1691 struct se_tmr_req *se_tmr;
1692 struct iscsi_tmr_req *tmr_req;
1693 struct iscsi_tm *hdr;
1694 u32 payload_length;
1695 int out_of_order_cmdsn = 0;
1696 int ret;
1697 u8 function;
1698
1699 hdr = (struct iscsi_tm *) buf;
1700 payload_length = ntoh24(hdr->dlength);
1701 hdr->itt = be32_to_cpu(hdr->itt);
1702 hdr->rtt = be32_to_cpu(hdr->rtt);
1703 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1704 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1705 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
1706 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
1707 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1708 function = hdr->flags;
1709
1710 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
1711 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
1712 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
1713 hdr->rtt, hdr->refcmdsn, conn->cid);
1714
1715 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1716 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1717 (hdr->rtt != ISCSI_RESERVED_TAG))) {
1718 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1719 hdr->rtt = ISCSI_RESERVED_TAG;
1720 }
1721
1722 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
1723 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1724 pr_err("Task Management Request TASK_REASSIGN not"
1725 " issued as immediate command, bad iSCSI Initiator"
1726 "implementation\n");
1727 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1728 buf, conn);
1729 }
1730 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1731 (hdr->refcmdsn != ISCSI_RESERVED_TAG))
1732 hdr->refcmdsn = ISCSI_RESERVED_TAG;
1733
1734 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
1735 if (!cmd)
1736 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1737 1, buf, conn);
1738
1739 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
1740 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1741 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1742 cmd->init_task_tag = hdr->itt;
1743 cmd->targ_xfer_tag = 0xFFFFFFFF;
1744 cmd->cmd_sn = hdr->cmdsn;
1745 cmd->exp_stat_sn = hdr->exp_statsn;
1746 se_tmr = cmd->se_cmd.se_tmr_req;
1747 tmr_req = cmd->tmr_req;
1748 /*
1749 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
1750 */
1751 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1752 ret = iscsit_get_lun_for_tmr(cmd,
1753 get_unaligned_le64(&hdr->lun));
1754 if (ret < 0) {
1755 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1756 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
1757 goto attach;
1758 }
1759 }
1760
1761 switch (function) {
1762 case ISCSI_TM_FUNC_ABORT_TASK:
1763 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
1764 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
1765 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1766 goto attach;
1767 }
1768 break;
1769 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1770 case ISCSI_TM_FUNC_CLEAR_ACA:
1771 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1772 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1773 break;
1774 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1775 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
1776 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1777 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1778 goto attach;
1779 }
1780 break;
1781 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1782 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
1783 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1784 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1785 goto attach;
1786 }
1787 break;
1788 case ISCSI_TM_FUNC_TASK_REASSIGN:
1789 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
1790 /*
1791 * Perform sanity checks on the ExpDataSN only if the
1792 * TASK_REASSIGN was successful.
1793 */
1794 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
1795 break;
1796
1797 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
1798 return iscsit_add_reject_from_cmd(
1799 ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
1800 buf, cmd);
1801 break;
1802 default:
1803 pr_err("Unknown TMR function: 0x%02x, protocol"
1804 " error.\n", function);
1805 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1806 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
1807 goto attach;
1808 }
1809
1810 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1811 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
1812 se_tmr->call_transport = 1;
1813attach:
1814 spin_lock_bh(&conn->cmd_lock);
1815 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1816 spin_unlock_bh(&conn->cmd_lock);
1817
1818 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1819 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1820 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
1821 out_of_order_cmdsn = 1;
1822 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1823 return 0;
1824 } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
1825 return iscsit_add_reject_from_cmd(
1826 ISCSI_REASON_PROTOCOL_ERROR,
1827 1, 0, buf, cmd);
1828 }
1829 }
1830 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1831
1832 if (out_of_order_cmdsn)
1833 return 0;
1834 /*
1835 * Found the referenced task, send to transport for processing.
1836 */
1837 if (se_tmr->call_transport)
1838 return transport_generic_handle_tmr(&cmd->se_cmd);
1839
1840 /*
1841 * Could not find the referenced LUN, task, or Task Management
1842 * command not authorized or supported. Change state and
1843 * let the tx_thread send the response.
1844 *
1845 * For connection recovery, this is also the default action for
1846 * TMR TASK_REASSIGN.
1847 */
1848 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1849 return 0;
1850}
1851
1852/* #warning FIXME: Support Text Command parameters besides SendTargets */
1853static int iscsit_handle_text_cmd(
1854 struct iscsi_conn *conn,
1855 unsigned char *buf)
1856{
1857 char *text_ptr, *text_in;
1858 int cmdsn_ret, niov = 0, rx_got, rx_size;
1859 u32 checksum = 0, data_crc = 0, payload_length;
1860 u32 padding = 0, text_length = 0;
1861 struct iscsi_cmd *cmd;
1862 struct kvec iov[3];
1863 struct iscsi_text *hdr;
1864
1865 hdr = (struct iscsi_text *) buf;
1866 payload_length = ntoh24(hdr->dlength);
1867 hdr->itt = be32_to_cpu(hdr->itt);
1868 hdr->ttt = be32_to_cpu(hdr->ttt);
1869 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1870 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1871
1872 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1873 pr_err("Unable to accept text parameter length: %u"
1874 "greater than MaxRecvDataSegmentLength %u.\n",
1875 payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
1876 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1877 buf, conn);
1878 }
1879
1880 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1881 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1882 hdr->exp_statsn, payload_length);
1883
1884 rx_size = text_length = payload_length;
1885 if (text_length) {
1886 text_in = kzalloc(text_length, GFP_KERNEL);
1887 if (!text_in) {
1888 pr_err("Unable to allocate memory for"
1889 " incoming text parameters\n");
1890 return -1;
1891 }
1892
1893 memset(iov, 0, 3 * sizeof(struct kvec));
1894 iov[niov].iov_base = text_in;
1895 iov[niov++].iov_len = text_length;
1896
1897 padding = ((-payload_length) & 3);
1898 if (padding != 0) {
1899 iov[niov].iov_base = cmd->pad_bytes;
1900 iov[niov++].iov_len = padding;
1901 rx_size += padding;
1902 pr_debug("Receiving %u additional bytes"
1903 " for padding.\n", padding);
1904 }
1905 if (conn->conn_ops->DataDigest) {
1906 iov[niov].iov_base = &checksum;
1907 iov[niov++].iov_len = ISCSI_CRC_LEN;
1908 rx_size += ISCSI_CRC_LEN;
1909 }
1910
1911 rx_got = rx_data(conn, &iov[0], niov, rx_size);
1912 if (rx_got != rx_size) {
1913 kfree(text_in);
1914 return -1;
1915 }
1916
1917 if (conn->conn_ops->DataDigest) {
1918 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1919 text_in, text_length,
1920 padding, cmd->pad_bytes,
1921 (u8 *)&data_crc);
1922
1923 if (checksum != data_crc) {
1924 pr_err("Text data CRC32C DataDigest"
1925 " 0x%08x does not match computed"
1926 " 0x%08x\n", checksum, data_crc);
1927 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1928 pr_err("Unable to recover from"
1929 " Text Data digest failure while in"
1930 " ERL=0.\n");
1931 kfree(text_in);
1932 return -1;
1933 } else {
1934 /*
1935 * Silently drop this PDU and let the
1936 * initiator plug the CmdSN gap.
1937 */
1938 pr_debug("Dropping Text"
1939 " Command CmdSN: 0x%08x due to"
1940 " DataCRC error.\n", hdr->cmdsn);
1941 kfree(text_in);
1942 return 0;
1943 }
1944 } else {
1945 pr_debug("Got CRC32C DataDigest"
1946 " 0x%08x for %u bytes of text data.\n",
1947 checksum, text_length);
1948 }
1949 }
1950 text_in[text_length - 1] = '\0';
1951 pr_debug("Successfully read %d bytes of text"
1952 " data.\n", text_length);
1953
1954 if (strncmp("SendTargets", text_in, 11) != 0) {
1955 pr_err("Received Text Data that is not"
1956 " SendTargets, cannot continue.\n");
1957 kfree(text_in);
1958 return -1;
1959 }
1960 text_ptr = strchr(text_in, '=');
1961 if (!text_ptr) {
1962 pr_err("No \"=\" separator found in Text Data,"
1963 " cannot continue.\n");
1964 kfree(text_in);
1965 return -1;
1966 }
1967 if (strncmp("=All", text_ptr, 4) != 0) {
1968 pr_err("Unable to locate All value for"
1969 " SendTargets key, cannot continue.\n");
1970 kfree(text_in);
1971 return -1;
1972 }
1973/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
1974 kfree(text_in);
1975 }
1976
1977 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1978 if (!cmd)
1979 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1980 1, buf, conn);
1981
1982 cmd->iscsi_opcode = ISCSI_OP_TEXT;
1983 cmd->i_state = ISTATE_SEND_TEXTRSP;
1984 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1985 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1986 cmd->targ_xfer_tag = 0xFFFFFFFF;
1987 cmd->cmd_sn = hdr->cmdsn;
1988 cmd->exp_stat_sn = hdr->exp_statsn;
1989 cmd->data_direction = DMA_NONE;
1990
1991 spin_lock_bh(&conn->cmd_lock);
1992 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1993 spin_unlock_bh(&conn->cmd_lock);
1994
1995 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1996
1997 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1998 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1999 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2000 return iscsit_add_reject_from_cmd(
2001 ISCSI_REASON_PROTOCOL_ERROR,
2002 1, 0, buf, cmd);
2003
2004 return 0;
2005 }
2006
2007 return iscsit_execute_cmd(cmd, 0);
2008}
2009
2010int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2011{
2012 struct iscsi_conn *conn_p;
2013 struct iscsi_session *sess = conn->sess;
2014
2015 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2016 " for SID: %u.\n", conn->cid, conn->sess->sid);
2017
2018 atomic_set(&sess->session_logout, 1);
2019 atomic_set(&conn->conn_logout_remove, 1);
2020 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2021
2022 iscsit_inc_conn_usage_count(conn);
2023 iscsit_inc_session_usage_count(sess);
2024
2025 spin_lock_bh(&sess->conn_lock);
2026 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2027 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2028 continue;
2029
2030 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2031 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2032 }
2033 spin_unlock_bh(&sess->conn_lock);
2034
2035 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2036
2037 return 0;
2038}
2039
2040int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2041{
2042 struct iscsi_conn *l_conn;
2043 struct iscsi_session *sess = conn->sess;
2044
2045 pr_debug("Received logout request CLOSECONNECTION for CID:"
2046 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2047
2048 /*
2049 * A Logout Request with a CLOSECONNECTION reason code for a CID
2050 * can arrive on a connection with a differing CID.
2051 */
2052 if (conn->cid == cmd->logout_cid) {
2053 spin_lock_bh(&conn->state_lock);
2054 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2055 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2056
2057 atomic_set(&conn->conn_logout_remove, 1);
2058 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2059 iscsit_inc_conn_usage_count(conn);
2060
2061 spin_unlock_bh(&conn->state_lock);
2062 } else {
2063 /*
2064 * Handle all different cid CLOSECONNECTION requests in
2065 * iscsit_logout_post_handler_diffcid() as to give enough
2066 * time for any non immediate command's CmdSN to be
2067 * acknowledged on the connection in question.
2068 *
2069 * Here we simply make sure the CID is still around.
2070 */
2071 l_conn = iscsit_get_conn_from_cid(sess,
2072 cmd->logout_cid);
2073 if (!l_conn) {
2074 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2075 iscsit_add_cmd_to_response_queue(cmd, conn,
2076 cmd->i_state);
2077 return 0;
2078 }
2079
2080 iscsit_dec_conn_usage_count(l_conn);
2081 }
2082
2083 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2084
2085 return 0;
2086}
2087
2088int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2089{
2090 struct iscsi_session *sess = conn->sess;
2091
2092 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2093 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2094
2095 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2096 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2097 " while ERL!=2.\n");
2098 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2099 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2100 return 0;
2101 }
2102
2103 if (conn->cid == cmd->logout_cid) {
2104 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2105 " with CID: %hu on CID: %hu, implementation error.\n",
2106 cmd->logout_cid, conn->cid);
2107 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2108 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2109 return 0;
2110 }
2111
2112 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2113
2114 return 0;
2115}
2116
2117static int iscsit_handle_logout_cmd(
2118 struct iscsi_conn *conn,
2119 unsigned char *buf)
2120{
2121 int cmdsn_ret, logout_remove = 0;
2122 u8 reason_code = 0;
2123 struct iscsi_cmd *cmd;
2124 struct iscsi_logout *hdr;
2125 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2126
2127 hdr = (struct iscsi_logout *) buf;
2128 reason_code = (hdr->flags & 0x7f);
2129 hdr->itt = be32_to_cpu(hdr->itt);
2130 hdr->cid = be16_to_cpu(hdr->cid);
2131 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
2132 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2133
2134 if (tiqn) {
2135 spin_lock(&tiqn->logout_stats.lock);
2136 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2137 tiqn->logout_stats.normal_logouts++;
2138 else
2139 tiqn->logout_stats.abnormal_logouts++;
2140 spin_unlock(&tiqn->logout_stats.lock);
2141 }
2142
2143 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2144 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2145 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2146 hdr->cid, conn->cid);
2147
2148 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2149 pr_err("Received logout request on connection that"
2150 " is not in logged in state, ignoring request.\n");
2151 return 0;
2152 }
2153
2154 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
2155 if (!cmd)
2156 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
2157 buf, conn);
2158
2159 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2160 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2161 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2162 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2163 cmd->targ_xfer_tag = 0xFFFFFFFF;
2164 cmd->cmd_sn = hdr->cmdsn;
2165 cmd->exp_stat_sn = hdr->exp_statsn;
2166 cmd->logout_cid = hdr->cid;
2167 cmd->logout_reason = reason_code;
2168 cmd->data_direction = DMA_NONE;
2169
2170 /*
2171 * We need to sleep in these cases (by returning 1) until the Logout
2172 * Response gets sent in the tx thread.
2173 */
2174 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2175 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2176 (hdr->cid == conn->cid)))
2177 logout_remove = 1;
2178
2179 spin_lock_bh(&conn->cmd_lock);
2180 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
2181 spin_unlock_bh(&conn->cmd_lock);
2182
2183 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2184 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
2185
2186 /*
2187 * Immediate commands are executed, well, immediately.
2188 * Non-Immediate Logout Commands are executed in CmdSN order.
2189 */
2190 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
2191 int ret = iscsit_execute_cmd(cmd, 0);
2192
2193 if (ret < 0)
2194 return ret;
2195 } else {
2196 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
2197 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2198 logout_remove = 0;
2199 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2200 return iscsit_add_reject_from_cmd(
2201 ISCSI_REASON_PROTOCOL_ERROR,
2202 1, 0, buf, cmd);
2203 }
2204 }
2205
2206 return logout_remove;
2207}
2208
2209static int iscsit_handle_snack(
2210 struct iscsi_conn *conn,
2211 unsigned char *buf)
2212{
2213 u32 unpacked_lun;
2214 u64 lun;
2215 struct iscsi_snack *hdr;
2216
2217 hdr = (struct iscsi_snack *) buf;
2218 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2219 lun = get_unaligned_le64(&hdr->lun);
2220 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2221 hdr->itt = be32_to_cpu(hdr->itt);
2222 hdr->ttt = be32_to_cpu(hdr->ttt);
2223 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2224 hdr->begrun = be32_to_cpu(hdr->begrun);
2225 hdr->runlength = be32_to_cpu(hdr->runlength);
2226
2227 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2228 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2229 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2230 hdr->begrun, hdr->runlength, conn->cid);
2231
2232 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2233 pr_err("Initiator sent SNACK request while in"
2234 " ErrorRecoveryLevel=0.\n");
2235 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2236 buf, conn);
2237 }
2238 /*
2239 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2240 * call from inside iscsi_send_recovery_datain_or_r2t().
2241 */
2242 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2243 case 0:
2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2246 return 0;
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2249 hdr->begrun, hdr->runlength);
2250 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2251 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
2252 hdr->runlength);
2253 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2254 /* FIXME: Support R-Data SNACK */
2255 pr_err("R-Data SNACK Not Supported.\n");
2256 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2257 buf, conn);
2258 default:
2259 pr_err("Unknown SNACK type 0x%02x, protocol"
2260 " error.\n", hdr->flags & 0x0f);
2261 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2262 buf, conn);
2263 }
2264
2265 return 0;
2266}
2267
2268static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2269{
2270 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2271 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2272 wait_for_completion_interruptible_timeout(
2273 &conn->rx_half_close_comp,
2274 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2275 }
2276}
2277
2278static int iscsit_handle_immediate_data(
2279 struct iscsi_cmd *cmd,
2280 unsigned char *buf,
2281 u32 length)
2282{
2283 int iov_ret, rx_got = 0, rx_size = 0;
2284 u32 checksum, iov_count = 0, padding = 0;
2285 struct iscsi_conn *conn = cmd->conn;
2286 struct kvec *iov;
2287
2288 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
2289 if (iov_ret < 0)
2290 return IMMEDIATE_DATA_CANNOT_RECOVER;
2291
2292 rx_size = length;
2293 iov_count = iov_ret;
2294 iov = &cmd->iov_data[0];
2295
2296 padding = ((-length) & 3);
2297 if (padding != 0) {
2298 iov[iov_count].iov_base = cmd->pad_bytes;
2299 iov[iov_count++].iov_len = padding;
2300 rx_size += padding;
2301 }
2302
2303 if (conn->conn_ops->DataDigest) {
2304 iov[iov_count].iov_base = &checksum;
2305 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2306 rx_size += ISCSI_CRC_LEN;
2307 }
2308
2309 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2310
2311 iscsit_unmap_iovec(cmd);
2312
2313 if (rx_got != rx_size) {
2314 iscsit_rx_thread_wait_for_tcp(conn);
2315 return IMMEDIATE_DATA_CANNOT_RECOVER;
2316 }
2317
2318 if (conn->conn_ops->DataDigest) {
2319 u32 data_crc;
2320
2321 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
2322 cmd->write_data_done, length, padding,
2323 cmd->pad_bytes);
2324
2325 if (checksum != data_crc) {
2326 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2327 " does not match computed 0x%08x\n", checksum,
2328 data_crc);
2329
2330 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2331 pr_err("Unable to recover from"
2332 " Immediate Data digest failure while"
2333 " in ERL=0.\n");
2334 iscsit_add_reject_from_cmd(
2335 ISCSI_REASON_DATA_DIGEST_ERROR,
2336 1, 0, buf, cmd);
2337 return IMMEDIATE_DATA_CANNOT_RECOVER;
2338 } else {
2339 iscsit_add_reject_from_cmd(
2340 ISCSI_REASON_DATA_DIGEST_ERROR,
2341 0, 0, buf, cmd);
2342 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2343 }
2344 } else {
2345 pr_debug("Got CRC32C DataDigest 0x%08x for"
2346 " %u bytes of Immediate Data\n", checksum,
2347 length);
2348 }
2349 }
2350
2351 cmd->write_data_done += length;
2352
2353 if (cmd->write_data_done == cmd->data_length) {
2354 spin_lock_bh(&cmd->istate_lock);
2355 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2356 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2357 spin_unlock_bh(&cmd->istate_lock);
2358 }
2359
2360 return IMMEDIATE_DATA_NORMAL_OPERATION;
2361}
2362
2363/*
2364 * Called with sess->conn_lock held.
2365 */
2366/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2367 with active network interface */
2368static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2369{
2370 struct iscsi_cmd *cmd;
2371 struct iscsi_conn *conn_p;
2372
2373 /*
2374 * Only send a Asynchronous Message on connections whos network
2375 * interface is still functional.
2376 */
2377 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2378 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2379 iscsit_inc_conn_usage_count(conn_p);
2380 break;
2381 }
2382 }
2383
2384 if (!conn_p)
2385 return;
2386
2387 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
2388 if (!cmd) {
2389 iscsit_dec_conn_usage_count(conn_p);
2390 return;
2391 }
2392
2393 cmd->logout_cid = conn->cid;
2394 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2395 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2396
2397 spin_lock_bh(&conn_p->cmd_lock);
2398 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
2399 spin_unlock_bh(&conn_p->cmd_lock);
2400
2401 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2402 iscsit_dec_conn_usage_count(conn_p);
2403}
2404
2405static int iscsit_send_conn_drop_async_message(
2406 struct iscsi_cmd *cmd,
2407 struct iscsi_conn *conn)
2408{
2409 struct iscsi_async *hdr;
2410
2411 cmd->tx_size = ISCSI_HDR_LEN;
2412 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2413
2414 hdr = (struct iscsi_async *) cmd->pdu;
2415 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2416 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2417 cmd->init_task_tag = 0xFFFFFFFF;
2418 cmd->targ_xfer_tag = 0xFFFFFFFF;
2419 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2420 cmd->stat_sn = conn->stat_sn++;
2421 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2422 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2423 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2424 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2425 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2426 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2427 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2428
2429 if (conn->conn_ops->HeaderDigest) {
2430 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2431
2432 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2433 (unsigned char *)hdr, ISCSI_HDR_LEN,
2434 0, NULL, (u8 *)header_digest);
2435
2436 cmd->tx_size += ISCSI_CRC_LEN;
2437 pr_debug("Attaching CRC32C HeaderDigest to"
2438 " Async Message 0x%08x\n", *header_digest);
2439 }
2440
2441 cmd->iov_misc[0].iov_base = cmd->pdu;
2442 cmd->iov_misc[0].iov_len = cmd->tx_size;
2443 cmd->iov_misc_count = 1;
2444
2445 pr_debug("Sending Connection Dropped Async Message StatSN:"
2446 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2447 cmd->logout_cid, conn->cid);
2448 return 0;
2449}
2450
2451static int iscsit_send_data_in(
2452 struct iscsi_cmd *cmd,
2453 struct iscsi_conn *conn,
2454 int *eodr)
2455{
2456 int iov_ret = 0, set_statsn = 0;
2457 u32 iov_count = 0, tx_size = 0;
2458 struct iscsi_datain datain;
2459 struct iscsi_datain_req *dr;
2460 struct iscsi_data_rsp *hdr;
2461 struct kvec *iov;
2462
2463 memset(&datain, 0, sizeof(struct iscsi_datain));
2464 dr = iscsit_get_datain_values(cmd, &datain);
2465 if (!dr) {
2466 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2467 cmd->init_task_tag);
2468 return -1;
2469 }
2470
2471 /*
2472 * Be paranoid and double check the logic for now.
2473 */
2474 if ((datain.offset + datain.length) > cmd->data_length) {
2475 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2476 " datain.length: %u exceeds cmd->data_length: %u\n",
2477 cmd->init_task_tag, datain.offset, datain.length,
2478 cmd->data_length);
2479 return -1;
2480 }
2481
2482 spin_lock_bh(&conn->sess->session_stats_lock);
2483 conn->sess->tx_data_octets += datain.length;
2484 if (conn->sess->se_sess->se_node_acl) {
2485 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
2486 conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
2487 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
2488 }
2489 spin_unlock_bh(&conn->sess->session_stats_lock);
2490 /*
2491 * Special case for successfully execution w/ both DATAIN
2492 * and Sense Data.
2493 */
2494 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2495 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2496 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2497 else {
2498 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2499 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2500 iscsit_increment_maxcmdsn(cmd, conn->sess);
2501 cmd->stat_sn = conn->stat_sn++;
2502 set_statsn = 1;
2503 } else if (dr->dr_complete ==
2504 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2505 set_statsn = 1;
2506 }
2507
2508 hdr = (struct iscsi_data_rsp *) cmd->pdu;
2509 memset(hdr, 0, ISCSI_HDR_LEN);
2510 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2511 hdr->flags = datain.flags;
2512 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2513 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2514 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2515 hdr->residual_count = cpu_to_be32(cmd->residual_count);
2516 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2517 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2518 hdr->residual_count = cpu_to_be32(cmd->residual_count);
2519 }
2520 }
2521 hton24(hdr->dlength, datain.length);
2522 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2523 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2524 (struct scsi_lun *)&hdr->lun);
2525 else
2526 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2527
2528 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2529 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
2530 cpu_to_be32(cmd->targ_xfer_tag) :
2531 0xFFFFFFFF;
2532 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
2533 0xFFFFFFFF;
2534 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2535 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2536 hdr->datasn = cpu_to_be32(datain.data_sn);
2537 hdr->offset = cpu_to_be32(datain.offset);
2538
2539 iov = &cmd->iov_data[0];
2540 iov[iov_count].iov_base = cmd->pdu;
2541 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
2542 tx_size += ISCSI_HDR_LEN;
2543
2544 if (conn->conn_ops->HeaderDigest) {
2545 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2546
2547 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2548 (unsigned char *)hdr, ISCSI_HDR_LEN,
2549 0, NULL, (u8 *)header_digest);
2550
2551 iov[0].iov_len += ISCSI_CRC_LEN;
2552 tx_size += ISCSI_CRC_LEN;
2553
2554 pr_debug("Attaching CRC32 HeaderDigest"
2555 " for DataIN PDU 0x%08x\n", *header_digest);
2556 }
2557
2558 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
2559 if (iov_ret < 0)
2560 return -1;
2561
2562 iov_count += iov_ret;
2563 tx_size += datain.length;
2564
2565 cmd->padding = ((-datain.length) & 3);
2566 if (cmd->padding) {
2567 iov[iov_count].iov_base = cmd->pad_bytes;
2568 iov[iov_count++].iov_len = cmd->padding;
2569 tx_size += cmd->padding;
2570
2571 pr_debug("Attaching %u padding bytes\n",
2572 cmd->padding);
2573 }
2574 if (conn->conn_ops->DataDigest) {
2575 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
2576 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
2577
2578 iov[iov_count].iov_base = &cmd->data_crc;
2579 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2580 tx_size += ISCSI_CRC_LEN;
2581
2582 pr_debug("Attached CRC32C DataDigest %d bytes, crc"
2583 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
2584 }
2585
2586 cmd->iov_data_count = iov_count;
2587 cmd->tx_size = tx_size;
2588
2589 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2590 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2591 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2592 ntohl(hdr->offset), datain.length, conn->cid);
2593
2594 if (dr->dr_complete) {
2595 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2596 2 : 1;
2597 iscsit_free_datain_req(cmd, dr);
2598 }
2599
2600 return 0;
2601}
2602
2603static int iscsit_send_logout_response(
2604 struct iscsi_cmd *cmd,
2605 struct iscsi_conn *conn)
2606{
2607 int niov = 0, tx_size;
2608 struct iscsi_conn *logout_conn = NULL;
2609 struct iscsi_conn_recovery *cr = NULL;
2610 struct iscsi_session *sess = conn->sess;
2611 struct kvec *iov;
2612 struct iscsi_logout_rsp *hdr;
2613 /*
2614 * The actual shutting down of Sessions and/or Connections
2615 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2616 * is done in scsi_logout_post_handler().
2617 */
2618 switch (cmd->logout_reason) {
2619 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2620 pr_debug("iSCSI session logout successful, setting"
2621 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2622 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2623 break;
2624 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2625 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2626 break;
2627 /*
2628 * For CLOSECONNECTION logout requests carrying
2629 * a matching logout CID -> local CID, the reference
2630 * for the local CID will have been incremented in
2631 * iscsi_logout_closeconnection().
2632 *
2633 * For CLOSECONNECTION logout requests carrying
2634 * a different CID than the connection it arrived
2635 * on, the connection responding to cmd->logout_cid
2636 * is stopped in iscsit_logout_post_handler_diffcid().
2637 */
2638
2639 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2640 " successful.\n", cmd->logout_cid, conn->cid);
2641 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2642 break;
2643 case ISCSI_LOGOUT_REASON_RECOVERY:
2644 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2645 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2646 break;
2647 /*
2648 * If the connection is still active from our point of view
2649 * force connection recovery to occur.
2650 */
2651 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2652 cmd->logout_cid);
2653 if ((logout_conn)) {
2654 iscsit_connection_reinstatement_rcfr(logout_conn);
2655 iscsit_dec_conn_usage_count(logout_conn);
2656 }
2657
2658 cr = iscsit_get_inactive_connection_recovery_entry(
2659 conn->sess, cmd->logout_cid);
2660 if (!cr) {
2661 pr_err("Unable to locate CID: %hu for"
2662 " REMOVECONNFORRECOVERY Logout Request.\n",
2663 cmd->logout_cid);
2664 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2665 break;
2666 }
2667
2668 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2669
2670 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2671 " for recovery for CID: %hu on CID: %hu successful.\n",
2672 cmd->logout_cid, conn->cid);
2673 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2674 break;
2675 default:
2676 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2677 cmd->logout_reason);
2678 return -1;
2679 }
2680
2681 tx_size = ISCSI_HDR_LEN;
2682 hdr = (struct iscsi_logout_rsp *)cmd->pdu;
2683 memset(hdr, 0, ISCSI_HDR_LEN);
2684 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2685 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2686 hdr->response = cmd->logout_response;
2687 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2688 cmd->stat_sn = conn->stat_sn++;
2689 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2690
2691 iscsit_increment_maxcmdsn(cmd, conn->sess);
2692 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2693 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2694
2695 iov = &cmd->iov_misc[0];
2696 iov[niov].iov_base = cmd->pdu;
2697 iov[niov++].iov_len = ISCSI_HDR_LEN;
2698
2699 if (conn->conn_ops->HeaderDigest) {
2700 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2701
2702 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2703 (unsigned char *)hdr, ISCSI_HDR_LEN,
2704 0, NULL, (u8 *)header_digest);
2705
2706 iov[0].iov_len += ISCSI_CRC_LEN;
2707 tx_size += ISCSI_CRC_LEN;
2708 pr_debug("Attaching CRC32C HeaderDigest to"
2709 " Logout Response 0x%08x\n", *header_digest);
2710 }
2711 cmd->iov_misc_count = niov;
2712 cmd->tx_size = tx_size;
2713
2714 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
2715 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2716 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2717 cmd->logout_cid, conn->cid);
2718
2719 return 0;
2720}
2721
2722/*
2723 * Unsolicited NOPIN, either requesting a response or not.
2724 */
2725static int iscsit_send_unsolicited_nopin(
2726 struct iscsi_cmd *cmd,
2727 struct iscsi_conn *conn,
2728 int want_response)
2729{
2730 int tx_size = ISCSI_HDR_LEN;
2731 struct iscsi_nopin *hdr;
2732
2733 hdr = (struct iscsi_nopin *) cmd->pdu;
2734 memset(hdr, 0, ISCSI_HDR_LEN);
2735 hdr->opcode = ISCSI_OP_NOOP_IN;
2736 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2737 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2738 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2739 cmd->stat_sn = conn->stat_sn;
2740 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2741 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2742 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2743
2744 if (conn->conn_ops->HeaderDigest) {
2745 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2746
2747 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2748 (unsigned char *)hdr, ISCSI_HDR_LEN,
2749 0, NULL, (u8 *)header_digest);
2750
2751 tx_size += ISCSI_CRC_LEN;
2752 pr_debug("Attaching CRC32C HeaderDigest to"
2753 " NopIN 0x%08x\n", *header_digest);
2754 }
2755
2756 cmd->iov_misc[0].iov_base = cmd->pdu;
2757 cmd->iov_misc[0].iov_len = tx_size;
2758 cmd->iov_misc_count = 1;
2759 cmd->tx_size = tx_size;
2760
2761 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2762 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2763
2764 return 0;
2765}
2766
2767static int iscsit_send_nopin_response(
2768 struct iscsi_cmd *cmd,
2769 struct iscsi_conn *conn)
2770{
2771 int niov = 0, tx_size;
2772 u32 padding = 0;
2773 struct kvec *iov;
2774 struct iscsi_nopin *hdr;
2775
2776 tx_size = ISCSI_HDR_LEN;
2777 hdr = (struct iscsi_nopin *) cmd->pdu;
2778 memset(hdr, 0, ISCSI_HDR_LEN);
2779 hdr->opcode = ISCSI_OP_NOOP_IN;
2780 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2781 hton24(hdr->dlength, cmd->buf_ptr_size);
2782 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2783 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2784 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2785 cmd->stat_sn = conn->stat_sn++;
2786 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2787
2788 iscsit_increment_maxcmdsn(cmd, conn->sess);
2789 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2790 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2791
2792 iov = &cmd->iov_misc[0];
2793 iov[niov].iov_base = cmd->pdu;
2794 iov[niov++].iov_len = ISCSI_HDR_LEN;
2795
2796 if (conn->conn_ops->HeaderDigest) {
2797 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2798
2799 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2800 (unsigned char *)hdr, ISCSI_HDR_LEN,
2801 0, NULL, (u8 *)header_digest);
2802
2803 iov[0].iov_len += ISCSI_CRC_LEN;
2804 tx_size += ISCSI_CRC_LEN;
2805 pr_debug("Attaching CRC32C HeaderDigest"
2806 " to NopIn 0x%08x\n", *header_digest);
2807 }
2808
2809 /*
2810 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
2811 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
2812 */
2813 if (cmd->buf_ptr_size) {
2814 iov[niov].iov_base = cmd->buf_ptr;
2815 iov[niov++].iov_len = cmd->buf_ptr_size;
2816 tx_size += cmd->buf_ptr_size;
2817
2818 pr_debug("Echoing back %u bytes of ping"
2819 " data.\n", cmd->buf_ptr_size);
2820
2821 padding = ((-cmd->buf_ptr_size) & 3);
2822 if (padding != 0) {
2823 iov[niov].iov_base = &cmd->pad_bytes;
2824 iov[niov++].iov_len = padding;
2825 tx_size += padding;
2826 pr_debug("Attaching %u additional"
2827 " padding bytes.\n", padding);
2828 }
2829 if (conn->conn_ops->DataDigest) {
2830 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2831 cmd->buf_ptr, cmd->buf_ptr_size,
2832 padding, (u8 *)&cmd->pad_bytes,
2833 (u8 *)&cmd->data_crc);
2834
2835 iov[niov].iov_base = &cmd->data_crc;
2836 iov[niov++].iov_len = ISCSI_CRC_LEN;
2837 tx_size += ISCSI_CRC_LEN;
2838 pr_debug("Attached DataDigest for %u"
2839 " bytes of ping data, CRC 0x%08x\n",
2840 cmd->buf_ptr_size, cmd->data_crc);
2841 }
2842 }
2843
2844 cmd->iov_misc_count = niov;
2845 cmd->tx_size = tx_size;
2846
2847 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
2848 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
2849 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2850
2851 return 0;
2852}
2853
2854int iscsit_send_r2t(
2855 struct iscsi_cmd *cmd,
2856 struct iscsi_conn *conn)
2857{
2858 int tx_size = 0;
2859 struct iscsi_r2t *r2t;
2860 struct iscsi_r2t_rsp *hdr;
2861
2862 r2t = iscsit_get_r2t_from_list(cmd);
2863 if (!r2t)
2864 return -1;
2865
2866 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
2867 memset(hdr, 0, ISCSI_HDR_LEN);
2868 hdr->opcode = ISCSI_OP_R2T;
2869 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2870 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2871 (struct scsi_lun *)&hdr->lun);
2872 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2873 spin_lock_bh(&conn->sess->ttt_lock);
2874 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2875 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
2876 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2877 spin_unlock_bh(&conn->sess->ttt_lock);
2878 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
2879 hdr->statsn = cpu_to_be32(conn->stat_sn);
2880 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2881 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2882 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
2883 hdr->data_offset = cpu_to_be32(r2t->offset);
2884 hdr->data_length = cpu_to_be32(r2t->xfer_len);
2885
2886 cmd->iov_misc[0].iov_base = cmd->pdu;
2887 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
2888 tx_size += ISCSI_HDR_LEN;
2889
2890 if (conn->conn_ops->HeaderDigest) {
2891 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2892
2893 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2894 (unsigned char *)hdr, ISCSI_HDR_LEN,
2895 0, NULL, (u8 *)header_digest);
2896
2897 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
2898 tx_size += ISCSI_CRC_LEN;
2899 pr_debug("Attaching CRC32 HeaderDigest for R2T"
2900 " PDU 0x%08x\n", *header_digest);
2901 }
2902
2903 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
2904 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
2905 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
2906 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
2907 r2t->offset, r2t->xfer_len, conn->cid);
2908
2909 cmd->iov_misc_count = 1;
2910 cmd->tx_size = tx_size;
2911
2912 spin_lock_bh(&cmd->r2t_lock);
2913 r2t->sent_r2t = 1;
2914 spin_unlock_bh(&cmd->r2t_lock);
2915
2916 return 0;
2917}
2918
2919/*
2920 * type 0: Normal Operation.
2921 * type 1: Called from Storage Transport.
2922 * type 2: Called from iscsi_task_reassign_complete_write() for
2923 * connection recovery.
2924 */
2925int iscsit_build_r2ts_for_cmd(
2926 struct iscsi_cmd *cmd,
2927 struct iscsi_conn *conn,
2928 int type)
2929{
2930 int first_r2t = 1;
2931 u32 offset = 0, xfer_len = 0;
2932
2933 spin_lock_bh(&cmd->r2t_lock);
2934 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
2935 spin_unlock_bh(&cmd->r2t_lock);
2936 return 0;
2937 }
2938
2939 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
2940 if (cmd->r2t_offset < cmd->write_data_done)
2941 cmd->r2t_offset = cmd->write_data_done;
2942
2943 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
2944 if (conn->sess->sess_ops->DataSequenceInOrder) {
2945 offset = cmd->r2t_offset;
2946
2947 if (first_r2t && (type == 2)) {
2948 xfer_len = ((offset +
2949 (conn->sess->sess_ops->MaxBurstLength -
2950 cmd->next_burst_len) >
2951 cmd->data_length) ?
2952 (cmd->data_length - offset) :
2953 (conn->sess->sess_ops->MaxBurstLength -
2954 cmd->next_burst_len));
2955 } else {
2956 xfer_len = ((offset +
2957 conn->sess->sess_ops->MaxBurstLength) >
2958 cmd->data_length) ?
2959 (cmd->data_length - offset) :
2960 conn->sess->sess_ops->MaxBurstLength;
2961 }
2962 cmd->r2t_offset += xfer_len;
2963
2964 if (cmd->r2t_offset == cmd->data_length)
2965 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2966 } else {
2967 struct iscsi_seq *seq;
2968
2969 seq = iscsit_get_seq_holder_for_r2t(cmd);
2970 if (!seq) {
2971 spin_unlock_bh(&cmd->r2t_lock);
2972 return -1;
2973 }
2974
2975 offset = seq->offset;
2976 xfer_len = seq->xfer_len;
2977
2978 if (cmd->seq_send_order == cmd->seq_count)
2979 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2980 }
2981 cmd->outstanding_r2ts++;
2982 first_r2t = 0;
2983
2984 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
2985 spin_unlock_bh(&cmd->r2t_lock);
2986 return -1;
2987 }
2988
2989 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
2990 break;
2991 }
2992 spin_unlock_bh(&cmd->r2t_lock);
2993
2994 return 0;
2995}
2996
2997static int iscsit_send_status(
2998 struct iscsi_cmd *cmd,
2999 struct iscsi_conn *conn)
3000{
3001 u8 iov_count = 0, recovery;
3002 u32 padding = 0, tx_size = 0;
3003 struct iscsi_scsi_rsp *hdr;
3004 struct kvec *iov;
3005
3006 recovery = (cmd->i_state != ISTATE_SEND_STATUS);
3007 if (!recovery)
3008 cmd->stat_sn = conn->stat_sn++;
3009
3010 spin_lock_bh(&conn->sess->session_stats_lock);
3011 conn->sess->rsp_pdus++;
3012 spin_unlock_bh(&conn->sess->session_stats_lock);
3013
3014 hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
3015 memset(hdr, 0, ISCSI_HDR_LEN);
3016 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3017 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3018 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3019 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3020 hdr->residual_count = cpu_to_be32(cmd->residual_count);
3021 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3022 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3023 hdr->residual_count = cpu_to_be32(cmd->residual_count);
3024 }
3025 hdr->response = cmd->iscsi_response;
3026 hdr->cmd_status = cmd->se_cmd.scsi_status;
3027 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3028 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3029
3030 iscsit_increment_maxcmdsn(cmd, conn->sess);
3031 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3032 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3033
3034 iov = &cmd->iov_misc[0];
3035 iov[iov_count].iov_base = cmd->pdu;
3036 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3037 tx_size += ISCSI_HDR_LEN;
3038
3039 /*
3040 * Attach SENSE DATA payload to iSCSI Response PDU
3041 */
3042 if (cmd->se_cmd.sense_buffer &&
3043 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3044 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3045 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3046 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
3047 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
3048 iov[iov_count++].iov_len =
3049 (cmd->se_cmd.scsi_sense_length + padding);
3050 tx_size += cmd->se_cmd.scsi_sense_length;
3051
3052 if (padding) {
3053 memset(cmd->se_cmd.sense_buffer +
3054 cmd->se_cmd.scsi_sense_length, 0, padding);
3055 tx_size += padding;
3056 pr_debug("Adding %u bytes of padding to"
3057 " SENSE.\n", padding);
3058 }
3059
3060 if (conn->conn_ops->DataDigest) {
3061 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3062 cmd->se_cmd.sense_buffer,
3063 (cmd->se_cmd.scsi_sense_length + padding),
3064 0, NULL, (u8 *)&cmd->data_crc);
3065
3066 iov[iov_count].iov_base = &cmd->data_crc;
3067 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3068 tx_size += ISCSI_CRC_LEN;
3069
3070 pr_debug("Attaching CRC32 DataDigest for"
3071 " SENSE, %u bytes CRC 0x%08x\n",
3072 (cmd->se_cmd.scsi_sense_length + padding),
3073 cmd->data_crc);
3074 }
3075
3076 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3077 " Response PDU\n",
3078 cmd->se_cmd.scsi_sense_length);
3079 }
3080
3081 if (conn->conn_ops->HeaderDigest) {
3082 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3083
3084 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3085 (unsigned char *)hdr, ISCSI_HDR_LEN,
3086 0, NULL, (u8 *)header_digest);
3087
3088 iov[0].iov_len += ISCSI_CRC_LEN;
3089 tx_size += ISCSI_CRC_LEN;
3090 pr_debug("Attaching CRC32 HeaderDigest for Response"
3091 " PDU 0x%08x\n", *header_digest);
3092 }
3093
3094 cmd->iov_misc_count = iov_count;
3095 cmd->tx_size = tx_size;
3096
3097 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3098 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3099 (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
3100 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
3101
3102 return 0;
3103}
3104
3105static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3106{
3107 switch (se_tmr->response) {
3108 case TMR_FUNCTION_COMPLETE:
3109 return ISCSI_TMF_RSP_COMPLETE;
3110 case TMR_TASK_DOES_NOT_EXIST:
3111 return ISCSI_TMF_RSP_NO_TASK;
3112 case TMR_LUN_DOES_NOT_EXIST:
3113 return ISCSI_TMF_RSP_NO_LUN;
3114 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3115 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3116 case TMR_FUNCTION_AUTHORIZATION_FAILED:
3117 return ISCSI_TMF_RSP_AUTH_FAILED;
3118 case TMR_FUNCTION_REJECTED:
3119 default:
3120 return ISCSI_TMF_RSP_REJECTED;
3121 }
3122}
3123
3124static int iscsit_send_task_mgt_rsp(
3125 struct iscsi_cmd *cmd,
3126 struct iscsi_conn *conn)
3127{
3128 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3129 struct iscsi_tm_rsp *hdr;
3130 u32 tx_size = 0;
3131
3132 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3133 memset(hdr, 0, ISCSI_HDR_LEN);
3134 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3135 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3136 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3137 cmd->stat_sn = conn->stat_sn++;
3138 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3139
3140 iscsit_increment_maxcmdsn(cmd, conn->sess);
3141 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3142 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3143
3144 cmd->iov_misc[0].iov_base = cmd->pdu;
3145 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3146 tx_size += ISCSI_HDR_LEN;
3147
3148 if (conn->conn_ops->HeaderDigest) {
3149 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3150
3151 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3152 (unsigned char *)hdr, ISCSI_HDR_LEN,
3153 0, NULL, (u8 *)header_digest);
3154
3155 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3156 tx_size += ISCSI_CRC_LEN;
3157 pr_debug("Attaching CRC32 HeaderDigest for Task"
3158 " Mgmt Response PDU 0x%08x\n", *header_digest);
3159 }
3160
3161 cmd->iov_misc_count = 1;
3162 cmd->tx_size = tx_size;
3163
3164 pr_debug("Built Task Management Response ITT: 0x%08x,"
3165 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3166 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3167
3168 return 0;
3169}
3170
3171static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3172{
3173 char *payload = NULL;
3174 struct iscsi_conn *conn = cmd->conn;
3175 struct iscsi_portal_group *tpg;
3176 struct iscsi_tiqn *tiqn;
3177 struct iscsi_tpg_np *tpg_np;
3178 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3179 unsigned char buf[256];
3180
3181 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
3182 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
3183
3184 memset(buf, 0, 256);
3185
3186 payload = kzalloc(buffer_len, GFP_KERNEL);
3187 if (!payload) {
3188 pr_err("Unable to allocate memory for sendtargets"
3189 " response.\n");
3190 return -ENOMEM;
3191 }
3192
3193 spin_lock(&tiqn_lock);
3194 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3195 len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
3196 len += 1;
3197
3198 if ((len + payload_len) > buffer_len) {
3199 spin_unlock(&tiqn->tiqn_tpg_lock);
3200 end_of_buf = 1;
3201 goto eob;
3202 }
3203 memcpy((void *)payload + payload_len, buf, len);
3204 payload_len += len;
3205
3206 spin_lock(&tiqn->tiqn_tpg_lock);
3207 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3208
3209 spin_lock(&tpg->tpg_state_lock);
3210 if ((tpg->tpg_state == TPG_STATE_FREE) ||
3211 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
3212 spin_unlock(&tpg->tpg_state_lock);
3213 continue;
3214 }
3215 spin_unlock(&tpg->tpg_state_lock);
3216
3217 spin_lock(&tpg->tpg_np_lock);
3218 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3219 tpg_np_list) {
3220 len = sprintf(buf, "TargetAddress="
3221 "%s%s%s:%hu,%hu",
3222 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
3223 "[" : "", tpg_np->tpg_np->np_ip,
3224 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
3225 "]" : "", tpg_np->tpg_np->np_port,
3226 tpg->tpgt);
3227 len += 1;
3228
3229 if ((len + payload_len) > buffer_len) {
3230 spin_unlock(&tpg->tpg_np_lock);
3231 spin_unlock(&tiqn->tiqn_tpg_lock);
3232 end_of_buf = 1;
3233 goto eob;
3234 }
3235 memcpy((void *)payload + payload_len, buf, len);
3236 payload_len += len;
3237 }
3238 spin_unlock(&tpg->tpg_np_lock);
3239 }
3240 spin_unlock(&tiqn->tiqn_tpg_lock);
3241eob:
3242 if (end_of_buf)
3243 break;
3244 }
3245 spin_unlock(&tiqn_lock);
3246
3247 cmd->buf_ptr = payload;
3248
3249 return payload_len;
3250}
3251
3252/*
3253 * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3254 * MaxRecvDataSegmentLength.
3255 */
3256static int iscsit_send_text_rsp(
3257 struct iscsi_cmd *cmd,
3258 struct iscsi_conn *conn)
3259{
3260 struct iscsi_text_rsp *hdr;
3261 struct kvec *iov;
3262 u32 padding = 0, tx_size = 0;
3263 int text_length, iov_count = 0;
3264
3265 text_length = iscsit_build_sendtargets_response(cmd);
3266 if (text_length < 0)
3267 return text_length;
3268
3269 padding = ((-text_length) & 3);
3270 if (padding != 0) {
3271 memset(cmd->buf_ptr + text_length, 0, padding);
3272 pr_debug("Attaching %u additional bytes for"
3273 " padding.\n", padding);
3274 }
3275
3276 hdr = (struct iscsi_text_rsp *) cmd->pdu;
3277 memset(hdr, 0, ISCSI_HDR_LEN);
3278 hdr->opcode = ISCSI_OP_TEXT_RSP;
3279 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3280 hton24(hdr->dlength, text_length);
3281 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3282 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3283 cmd->stat_sn = conn->stat_sn++;
3284 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3285
3286 iscsit_increment_maxcmdsn(cmd, conn->sess);
3287 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3288 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3289
3290 iov = &cmd->iov_misc[0];
3291
3292 iov[iov_count].iov_base = cmd->pdu;
3293 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3294 iov[iov_count].iov_base = cmd->buf_ptr;
3295 iov[iov_count++].iov_len = text_length + padding;
3296
3297 tx_size += (ISCSI_HDR_LEN + text_length + padding);
3298
3299 if (conn->conn_ops->HeaderDigest) {
3300 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3301
3302 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3303 (unsigned char *)hdr, ISCSI_HDR_LEN,
3304 0, NULL, (u8 *)header_digest);
3305
3306 iov[0].iov_len += ISCSI_CRC_LEN;
3307 tx_size += ISCSI_CRC_LEN;
3308 pr_debug("Attaching CRC32 HeaderDigest for"
3309 " Text Response PDU 0x%08x\n", *header_digest);
3310 }
3311
3312 if (conn->conn_ops->DataDigest) {
3313 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3314 cmd->buf_ptr, (text_length + padding),
3315 0, NULL, (u8 *)&cmd->data_crc);
3316
3317 iov[iov_count].iov_base = &cmd->data_crc;
3318 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3319 tx_size += ISCSI_CRC_LEN;
3320
3321 pr_debug("Attaching DataDigest for %u bytes of text"
3322 " data, CRC 0x%08x\n", (text_length + padding),
3323 cmd->data_crc);
3324 }
3325
3326 cmd->iov_misc_count = iov_count;
3327 cmd->tx_size = tx_size;
3328
3329 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
3330 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
3331 text_length, conn->cid);
3332 return 0;
3333}
3334
3335static int iscsit_send_reject(
3336 struct iscsi_cmd *cmd,
3337 struct iscsi_conn *conn)
3338{
3339 u32 iov_count = 0, tx_size = 0;
3340 struct iscsi_reject *hdr;
3341 struct kvec *iov;
3342
3343 hdr = (struct iscsi_reject *) cmd->pdu;
3344 hdr->opcode = ISCSI_OP_REJECT;
3345 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3346 hton24(hdr->dlength, ISCSI_HDR_LEN);
3347 cmd->stat_sn = conn->stat_sn++;
3348 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3349 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3350 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3351
3352 iov = &cmd->iov_misc[0];
3353
3354 iov[iov_count].iov_base = cmd->pdu;
3355 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3356 iov[iov_count].iov_base = cmd->buf_ptr;
3357 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3358
3359 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
3360
3361 if (conn->conn_ops->HeaderDigest) {
3362 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3363
3364 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3365 (unsigned char *)hdr, ISCSI_HDR_LEN,
3366 0, NULL, (u8 *)header_digest);
3367
3368 iov[0].iov_len += ISCSI_CRC_LEN;
3369 tx_size += ISCSI_CRC_LEN;
3370 pr_debug("Attaching CRC32 HeaderDigest for"
3371 " REJECT PDU 0x%08x\n", *header_digest);
3372 }
3373
3374 if (conn->conn_ops->DataDigest) {
3375 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3376 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
3377 0, NULL, (u8 *)&cmd->data_crc);
3378
3379 iov[iov_count].iov_base = &cmd->data_crc;
3380 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3381 tx_size += ISCSI_CRC_LEN;
3382 pr_debug("Attaching CRC32 DataDigest for REJECT"
3383 " PDU 0x%08x\n", cmd->data_crc);
3384 }
3385
3386 cmd->iov_misc_count = iov_count;
3387 cmd->tx_size = tx_size;
3388
3389 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3390 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3391
3392 return 0;
3393}
3394
3395static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
3396{
3397 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
3398 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
3399 wait_for_completion_interruptible_timeout(
3400 &conn->tx_half_close_comp,
3401 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
3402 }
3403}
3404
3405#ifdef CONFIG_SMP
3406
3407void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3408{
3409 struct iscsi_thread_set *ts = conn->thread_set;
3410 int ord, cpu;
3411 /*
3412 * thread_id is assigned from iscsit_global->ts_bitmap from
3413 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
3414 *
3415 * Here we use thread_id to determine which CPU that this
3416 * iSCSI connection's iscsi_thread_set will be scheduled to
3417 * execute upon.
3418 */
3419 ord = ts->thread_id % cpumask_weight(cpu_online_mask);
3420#if 0
3421 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
3422 " thread_id: %d\n", ord, ts->thread_id);
3423#endif
3424 for_each_online_cpu(cpu) {
3425 if (ord-- == 0) {
3426 cpumask_set_cpu(cpu, conn->conn_cpumask);
3427 return;
3428 }
3429 }
3430 /*
3431 * This should never be reached..
3432 */
3433 dump_stack();
3434 cpumask_setall(conn->conn_cpumask);
3435}
3436
3437static inline void iscsit_thread_check_cpumask(
3438 struct iscsi_conn *conn,
3439 struct task_struct *p,
3440 int mode)
3441{
3442 char buf[128];
3443 /*
3444 * mode == 1 signals iscsi_target_tx_thread() usage.
3445 * mode == 0 signals iscsi_target_rx_thread() usage.
3446 */
3447 if (mode == 1) {
3448 if (!conn->conn_tx_reset_cpumask)
3449 return;
3450 conn->conn_tx_reset_cpumask = 0;
3451 } else {
3452 if (!conn->conn_rx_reset_cpumask)
3453 return;
3454 conn->conn_rx_reset_cpumask = 0;
3455 }
3456 /*
3457 * Update the CPU mask for this single kthread so that
3458 * both TX and RX kthreads are scheduled to run on the
3459 * same CPU.
3460 */
3461 memset(buf, 0, 128);
3462 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3463#if 0
3464 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
3465 " %s for %s\n", buf, p->comm);
3466#endif
3467 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3468}
3469
3470#else
3471#define iscsit_thread_get_cpumask(X) ({})
3472#define iscsit_thread_check_cpumask(X, Y, Z) ({})
3473#endif /* CONFIG_SMP */
3474
3475int iscsi_target_tx_thread(void *arg)
3476{
3477 u8 state;
3478 int eodr = 0;
3479 int ret = 0;
3480 int sent_status = 0;
3481 int use_misc = 0;
3482 int map_sg = 0;
3483 struct iscsi_cmd *cmd = NULL;
3484 struct iscsi_conn *conn;
3485 struct iscsi_queue_req *qr = NULL;
3486 struct se_cmd *se_cmd;
3487 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
3488 /*
3489 * Allow ourselves to be interrupted by SIGINT so that a
3490 * connection recovery / failure event can be triggered externally.
3491 */
3492 allow_signal(SIGINT);
3493
3494restart:
3495 conn = iscsi_tx_thread_pre_handler(ts);
3496 if (!conn)
3497 goto out;
3498
3499 eodr = map_sg = ret = sent_status = use_misc = 0;
3500
3501 while (!kthread_should_stop()) {
3502 /*
3503 * Ensure that both TX and RX per connection kthreads
3504 * are scheduled to run on the same CPU.
3505 */
3506 iscsit_thread_check_cpumask(conn, current, 1);
3507
3508 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
3509
3510 if ((ts->status == ISCSI_THREAD_SET_RESET) ||
3511 signal_pending(current))
3512 goto transport_err;
3513
3514get_immediate:
3515 qr = iscsit_get_cmd_from_immediate_queue(conn);
3516 if (qr) {
3517 atomic_set(&conn->check_immediate_queue, 0);
3518 cmd = qr->cmd;
3519 state = qr->state;
3520 kmem_cache_free(lio_qr_cache, qr);
3521
3522 spin_lock_bh(&cmd->istate_lock);
3523 switch (state) {
3524 case ISTATE_SEND_R2T:
3525 spin_unlock_bh(&cmd->istate_lock);
3526 ret = iscsit_send_r2t(cmd, conn);
3527 break;
3528 case ISTATE_REMOVE:
3529 spin_unlock_bh(&cmd->istate_lock);
3530
3531 if (cmd->data_direction == DMA_TO_DEVICE)
3532 iscsit_stop_dataout_timer(cmd);
3533
3534 spin_lock_bh(&conn->cmd_lock);
3535 list_del(&cmd->i_list);
3536 spin_unlock_bh(&conn->cmd_lock);
3537 /*
3538 * Determine if a struct se_cmd is assoicated with
3539 * this struct iscsi_cmd.
3540 */
3541 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
3542 !(cmd->tmr_req))
3543 iscsit_release_cmd(cmd);
3544 else
3545 transport_generic_free_cmd(&cmd->se_cmd,
3546 1, 0);
3547 goto get_immediate;
3548 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3549 spin_unlock_bh(&cmd->istate_lock);
3550 iscsit_mod_nopin_response_timer(conn);
3551 ret = iscsit_send_unsolicited_nopin(cmd,
3552 conn, 1);
3553 break;
3554 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3555 spin_unlock_bh(&cmd->istate_lock);
3556 ret = iscsit_send_unsolicited_nopin(cmd,
3557 conn, 0);
3558 break;
3559 default:
3560 pr_err("Unknown Opcode: 0x%02x ITT:"
3561 " 0x%08x, i_state: %d on CID: %hu\n",
3562 cmd->iscsi_opcode, cmd->init_task_tag, state,
3563 conn->cid);
3564 spin_unlock_bh(&cmd->istate_lock);
3565 goto transport_err;
3566 }
3567 if (ret < 0) {
3568 conn->tx_immediate_queue = 0;
3569 goto transport_err;
3570 }
3571
3572 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3573 conn->tx_immediate_queue = 0;
3574 iscsit_tx_thread_wait_for_tcp(conn);
3575 goto transport_err;
3576 }
3577
3578 spin_lock_bh(&cmd->istate_lock);
3579 switch (state) {
3580 case ISTATE_SEND_R2T:
3581 spin_unlock_bh(&cmd->istate_lock);
3582 spin_lock_bh(&cmd->dataout_timeout_lock);
3583 iscsit_start_dataout_timer(cmd, conn);
3584 spin_unlock_bh(&cmd->dataout_timeout_lock);
3585 break;
3586 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3587 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
3588 spin_unlock_bh(&cmd->istate_lock);
3589 break;
3590 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3591 cmd->i_state = ISTATE_SENT_STATUS;
3592 spin_unlock_bh(&cmd->istate_lock);
3593 break;
3594 default:
3595 pr_err("Unknown Opcode: 0x%02x ITT:"
3596 " 0x%08x, i_state: %d on CID: %hu\n",
3597 cmd->iscsi_opcode, cmd->init_task_tag,
3598 state, conn->cid);
3599 spin_unlock_bh(&cmd->istate_lock);
3600 goto transport_err;
3601 }
3602 goto get_immediate;
3603 } else
3604 conn->tx_immediate_queue = 0;
3605
3606get_response:
3607 qr = iscsit_get_cmd_from_response_queue(conn);
3608 if (qr) {
3609 cmd = qr->cmd;
3610 state = qr->state;
3611 kmem_cache_free(lio_qr_cache, qr);
3612
3613 spin_lock_bh(&cmd->istate_lock);
3614check_rsp_state:
3615 switch (state) {
3616 case ISTATE_SEND_DATAIN:
3617 spin_unlock_bh(&cmd->istate_lock);
3618 ret = iscsit_send_data_in(cmd, conn,
3619 &eodr);
3620 map_sg = 1;
3621 break;
3622 case ISTATE_SEND_STATUS:
3623 case ISTATE_SEND_STATUS_RECOVERY:
3624 spin_unlock_bh(&cmd->istate_lock);
3625 use_misc = 1;
3626 ret = iscsit_send_status(cmd, conn);
3627 break;
3628 case ISTATE_SEND_LOGOUTRSP:
3629 spin_unlock_bh(&cmd->istate_lock);
3630 use_misc = 1;
3631 ret = iscsit_send_logout_response(cmd, conn);
3632 break;
3633 case ISTATE_SEND_ASYNCMSG:
3634 spin_unlock_bh(&cmd->istate_lock);
3635 use_misc = 1;
3636 ret = iscsit_send_conn_drop_async_message(
3637 cmd, conn);
3638 break;
3639 case ISTATE_SEND_NOPIN:
3640 spin_unlock_bh(&cmd->istate_lock);
3641 use_misc = 1;
3642 ret = iscsit_send_nopin_response(cmd, conn);
3643 break;
3644 case ISTATE_SEND_REJECT:
3645 spin_unlock_bh(&cmd->istate_lock);
3646 use_misc = 1;
3647 ret = iscsit_send_reject(cmd, conn);
3648 break;
3649 case ISTATE_SEND_TASKMGTRSP:
3650 spin_unlock_bh(&cmd->istate_lock);
3651 use_misc = 1;
3652 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3653 if (ret != 0)
3654 break;
3655 ret = iscsit_tmr_post_handler(cmd, conn);
3656 if (ret != 0)
3657 iscsit_fall_back_to_erl0(conn->sess);
3658 break;
3659 case ISTATE_SEND_TEXTRSP:
3660 spin_unlock_bh(&cmd->istate_lock);
3661 use_misc = 1;
3662 ret = iscsit_send_text_rsp(cmd, conn);
3663 break;
3664 default:
3665 pr_err("Unknown Opcode: 0x%02x ITT:"
3666 " 0x%08x, i_state: %d on CID: %hu\n",
3667 cmd->iscsi_opcode, cmd->init_task_tag,
3668 state, conn->cid);
3669 spin_unlock_bh(&cmd->istate_lock);
3670 goto transport_err;
3671 }
3672 if (ret < 0) {
3673 conn->tx_response_queue = 0;
3674 goto transport_err;
3675 }
3676
3677 se_cmd = &cmd->se_cmd;
3678
3679 if (map_sg && !conn->conn_ops->IFMarker) {
3680 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
3681 conn->tx_response_queue = 0;
3682 iscsit_tx_thread_wait_for_tcp(conn);
3683 iscsit_unmap_iovec(cmd);
3684 goto transport_err;
3685 }
3686 } else {
3687 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
3688 conn->tx_response_queue = 0;
3689 iscsit_tx_thread_wait_for_tcp(conn);
3690 iscsit_unmap_iovec(cmd);
3691 goto transport_err;
3692 }
3693 }
3694 map_sg = 0;
3695 iscsit_unmap_iovec(cmd);
3696
3697 spin_lock_bh(&cmd->istate_lock);
3698 switch (state) {
3699 case ISTATE_SEND_DATAIN:
3700 if (!eodr)
3701 goto check_rsp_state;
3702
3703 if (eodr == 1) {
3704 cmd->i_state = ISTATE_SENT_LAST_DATAIN;
3705 sent_status = 1;
3706 eodr = use_misc = 0;
3707 } else if (eodr == 2) {
3708 cmd->i_state = state =
3709 ISTATE_SEND_STATUS;
3710 sent_status = 0;
3711 eodr = use_misc = 0;
3712 goto check_rsp_state;
3713 }
3714 break;
3715 case ISTATE_SEND_STATUS:
3716 use_misc = 0;
3717 sent_status = 1;
3718 break;
3719 case ISTATE_SEND_ASYNCMSG:
3720 case ISTATE_SEND_NOPIN:
3721 case ISTATE_SEND_STATUS_RECOVERY:
3722 case ISTATE_SEND_TEXTRSP:
3723 use_misc = 0;
3724 sent_status = 1;
3725 break;
3726 case ISTATE_SEND_REJECT:
3727 use_misc = 0;
3728 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3729 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3730 spin_unlock_bh(&cmd->istate_lock);
3731 complete(&cmd->reject_comp);
3732 goto transport_err;
3733 }
3734 complete(&cmd->reject_comp);
3735 break;
3736 case ISTATE_SEND_TASKMGTRSP:
3737 use_misc = 0;
3738 sent_status = 1;
3739 break;
3740 case ISTATE_SEND_LOGOUTRSP:
3741 spin_unlock_bh(&cmd->istate_lock);
3742 if (!iscsit_logout_post_handler(cmd, conn))
3743 goto restart;
3744 spin_lock_bh(&cmd->istate_lock);
3745 use_misc = 0;
3746 sent_status = 1;
3747 break;
3748 default:
3749 pr_err("Unknown Opcode: 0x%02x ITT:"
3750 " 0x%08x, i_state: %d on CID: %hu\n",
3751 cmd->iscsi_opcode, cmd->init_task_tag,
3752 cmd->i_state, conn->cid);
3753 spin_unlock_bh(&cmd->istate_lock);
3754 goto transport_err;
3755 }
3756
3757 if (sent_status) {
3758 cmd->i_state = ISTATE_SENT_STATUS;
3759 sent_status = 0;
3760 }
3761 spin_unlock_bh(&cmd->istate_lock);
3762
3763 if (atomic_read(&conn->check_immediate_queue))
3764 goto get_immediate;
3765
3766 goto get_response;
3767 } else
3768 conn->tx_response_queue = 0;
3769 }
3770
3771transport_err:
3772 iscsit_take_action_for_connection_exit(conn);
3773 goto restart;
3774out:
3775 return 0;
3776}
3777
3778int iscsi_target_rx_thread(void *arg)
3779{
3780 int ret;
3781 u8 buffer[ISCSI_HDR_LEN], opcode;
3782 u32 checksum = 0, digest = 0;
3783 struct iscsi_conn *conn = NULL;
3784 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
3785 struct kvec iov;
3786 /*
3787 * Allow ourselves to be interrupted by SIGINT so that a
3788 * connection recovery / failure event can be triggered externally.
3789 */
3790 allow_signal(SIGINT);
3791
3792restart:
3793 conn = iscsi_rx_thread_pre_handler(ts);
3794 if (!conn)
3795 goto out;
3796
3797 while (!kthread_should_stop()) {
3798 /*
3799 * Ensure that both TX and RX per connection kthreads
3800 * are scheduled to run on the same CPU.
3801 */
3802 iscsit_thread_check_cpumask(conn, current, 0);
3803
3804 memset(buffer, 0, ISCSI_HDR_LEN);
3805 memset(&iov, 0, sizeof(struct kvec));
3806
3807 iov.iov_base = buffer;
3808 iov.iov_len = ISCSI_HDR_LEN;
3809
3810 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3811 if (ret != ISCSI_HDR_LEN) {
3812 iscsit_rx_thread_wait_for_tcp(conn);
3813 goto transport_err;
3814 }
3815
3816 /*
3817 * Set conn->bad_hdr for use with REJECT PDUs.
3818 */
3819 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
3820
3821 if (conn->conn_ops->HeaderDigest) {
3822 iov.iov_base = &digest;
3823 iov.iov_len = ISCSI_CRC_LEN;
3824
3825 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3826 if (ret != ISCSI_CRC_LEN) {
3827 iscsit_rx_thread_wait_for_tcp(conn);
3828 goto transport_err;
3829 }
3830
3831 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
3832 buffer, ISCSI_HDR_LEN,
3833 0, NULL, (u8 *)&checksum);
3834
3835 if (digest != checksum) {
3836 pr_err("HeaderDigest CRC32C failed,"
3837 " received 0x%08x, computed 0x%08x\n",
3838 digest, checksum);
3839 /*
3840 * Set the PDU to 0xff so it will intentionally
3841 * hit default in the switch below.
3842 */
3843 memset(buffer, 0xff, ISCSI_HDR_LEN);
3844 spin_lock_bh(&conn->sess->session_stats_lock);
3845 conn->sess->conn_digest_errors++;
3846 spin_unlock_bh(&conn->sess->session_stats_lock);
3847 } else {
3848 pr_debug("Got HeaderDigest CRC32C"
3849 " 0x%08x\n", checksum);
3850 }
3851 }
3852
3853 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
3854 goto transport_err;
3855
3856 opcode = buffer[0] & ISCSI_OPCODE_MASK;
3857
3858 if (conn->sess->sess_ops->SessionType &&
3859 ((!(opcode & ISCSI_OP_TEXT)) ||
3860 (!(opcode & ISCSI_OP_LOGOUT)))) {
3861 pr_err("Received illegal iSCSI Opcode: 0x%02x"
3862 " while in Discovery Session, rejecting.\n", opcode);
3863 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3864 buffer, conn);
3865 goto transport_err;
3866 }
3867
3868 switch (opcode) {
3869 case ISCSI_OP_SCSI_CMD:
3870 if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
3871 goto transport_err;
3872 break;
3873 case ISCSI_OP_SCSI_DATA_OUT:
3874 if (iscsit_handle_data_out(conn, buffer) < 0)
3875 goto transport_err;
3876 break;
3877 case ISCSI_OP_NOOP_OUT:
3878 if (iscsit_handle_nop_out(conn, buffer) < 0)
3879 goto transport_err;
3880 break;
3881 case ISCSI_OP_SCSI_TMFUNC:
3882 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
3883 goto transport_err;
3884 break;
3885 case ISCSI_OP_TEXT:
3886 if (iscsit_handle_text_cmd(conn, buffer) < 0)
3887 goto transport_err;
3888 break;
3889 case ISCSI_OP_LOGOUT:
3890 ret = iscsit_handle_logout_cmd(conn, buffer);
3891 if (ret > 0) {
3892 wait_for_completion_timeout(&conn->conn_logout_comp,
3893 SECONDS_FOR_LOGOUT_COMP * HZ);
3894 goto transport_err;
3895 } else if (ret < 0)
3896 goto transport_err;
3897 break;
3898 case ISCSI_OP_SNACK:
3899 if (iscsit_handle_snack(conn, buffer) < 0)
3900 goto transport_err;
3901 break;
3902 default:
3903 pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
3904 opcode);
3905 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3906 pr_err("Cannot recover from unknown"
3907 " opcode while ERL=0, closing iSCSI connection"
3908 ".\n");
3909 goto transport_err;
3910 }
3911 if (!conn->conn_ops->OFMarker) {
3912 pr_err("Unable to recover from unknown"
3913 " opcode while OFMarker=No, closing iSCSI"
3914 " connection.\n");
3915 goto transport_err;
3916 }
3917 if (iscsit_recover_from_unknown_opcode(conn) < 0) {
3918 pr_err("Unable to recover from unknown"
3919 " opcode, closing iSCSI connection.\n");
3920 goto transport_err;
3921 }
3922 break;
3923 }
3924 }
3925
3926transport_err:
3927 if (!signal_pending(current))
3928 atomic_set(&conn->transport_failed, 1);
3929 iscsit_take_action_for_connection_exit(conn);
3930 goto restart;
3931out:
3932 return 0;
3933}
3934
3935static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3936{
3937 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
3938 struct iscsi_session *sess = conn->sess;
3939 struct se_cmd *se_cmd;
3940 /*
3941 * We expect this function to only ever be called from either RX or TX
3942 * thread context via iscsit_close_connection() once the other context
3943 * has been reset -> returned sleeping pre-handler state.
3944 */
3945 spin_lock_bh(&conn->cmd_lock);
3946 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
3947 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
3948
3949 list_del(&cmd->i_list);
3950 spin_unlock_bh(&conn->cmd_lock);
3951 iscsit_increment_maxcmdsn(cmd, sess);
3952 se_cmd = &cmd->se_cmd;
3953 /*
3954 * Special cases for active iSCSI TMR, and
3955 * transport_lookup_cmd_lun() failing from
3956 * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
3957 */
3958 if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
3959 se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
3960 else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
3961 transport_release_cmd(se_cmd);
3962 else
3963 iscsit_release_cmd(cmd);
3964
3965 spin_lock_bh(&conn->cmd_lock);
3966 continue;
3967 }
3968 list_del(&cmd->i_list);
3969 spin_unlock_bh(&conn->cmd_lock);
3970
3971 iscsit_increment_maxcmdsn(cmd, sess);
3972 se_cmd = &cmd->se_cmd;
3973
3974 if (se_cmd->transport_wait_for_tasks)
3975 se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
3976
3977 spin_lock_bh(&conn->cmd_lock);
3978 }
3979 spin_unlock_bh(&conn->cmd_lock);
3980}
3981
3982static void iscsit_stop_timers_for_cmds(
3983 struct iscsi_conn *conn)
3984{
3985 struct iscsi_cmd *cmd;
3986
3987 spin_lock_bh(&conn->cmd_lock);
3988 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
3989 if (cmd->data_direction == DMA_TO_DEVICE)
3990 iscsit_stop_dataout_timer(cmd);
3991 }
3992 spin_unlock_bh(&conn->cmd_lock);
3993}
3994
3995int iscsit_close_connection(
3996 struct iscsi_conn *conn)
3997{
3998 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
3999 struct iscsi_session *sess = conn->sess;
4000
4001 pr_debug("Closing iSCSI connection CID %hu on SID:"
4002 " %u\n", conn->cid, sess->sid);
4003 /*
4004 * Always up conn_logout_comp just in case the RX Thread is sleeping
4005 * and the logout response never got sent because the connection
4006 * failed.
4007 */
4008 complete(&conn->conn_logout_comp);
4009
4010 iscsi_release_thread_set(conn);
4011
4012 iscsit_stop_timers_for_cmds(conn);
4013 iscsit_stop_nopin_response_timer(conn);
4014 iscsit_stop_nopin_timer(conn);
4015 iscsit_free_queue_reqs_for_conn(conn);
4016
4017 /*
4018 * During Connection recovery drop unacknowledged out of order
4019 * commands for this connection, and prepare the other commands
4020 * for realligence.
4021 *
4022 * During normal operation clear the out of order commands (but
4023 * do not free the struct iscsi_ooo_cmdsn's) and release all
4024 * struct iscsi_cmds.
4025 */
4026 if (atomic_read(&conn->connection_recovery)) {
4027 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4028 iscsit_prepare_cmds_for_realligance(conn);
4029 } else {
4030 iscsit_clear_ooo_cmdsns_for_conn(conn);
4031 iscsit_release_commands_from_conn(conn);
4032 }
4033
4034 /*
4035 * Handle decrementing session or connection usage count if
4036 * a logout response was not able to be sent because the
4037 * connection failed. Fall back to Session Recovery here.
4038 */
4039 if (atomic_read(&conn->conn_logout_remove)) {
4040 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4041 iscsit_dec_conn_usage_count(conn);
4042 iscsit_dec_session_usage_count(sess);
4043 }
4044 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4045 iscsit_dec_conn_usage_count(conn);
4046
4047 atomic_set(&conn->conn_logout_remove, 0);
4048 atomic_set(&sess->session_reinstatement, 0);
4049 atomic_set(&sess->session_fall_back_to_erl0, 1);
4050 }
4051
4052 spin_lock_bh(&sess->conn_lock);
4053 list_del(&conn->conn_list);
4054
4055 /*
4056 * Attempt to let the Initiator know this connection failed by
4057 * sending an Connection Dropped Async Message on another
4058 * active connection.
4059 */
4060 if (atomic_read(&conn->connection_recovery))
4061 iscsit_build_conn_drop_async_message(conn);
4062
4063 spin_unlock_bh(&sess->conn_lock);
4064
4065 /*
4066 * If connection reinstatement is being performed on this connection,
4067 * up the connection reinstatement semaphore that is being blocked on
4068 * in iscsit_cause_connection_reinstatement().
4069 */
4070 spin_lock_bh(&conn->state_lock);
4071 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4072 spin_unlock_bh(&conn->state_lock);
4073 complete(&conn->conn_wait_comp);
4074 wait_for_completion(&conn->conn_post_wait_comp);
4075 spin_lock_bh(&conn->state_lock);
4076 }
4077
4078 /*
4079 * If connection reinstatement is being performed on this connection
4080 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4081 * connection wait rcfr semaphore that is being blocked on
4082 * an iscsit_connection_reinstatement_rcfr().
4083 */
4084 if (atomic_read(&conn->connection_wait_rcfr)) {
4085 spin_unlock_bh(&conn->state_lock);
4086 complete(&conn->conn_wait_rcfr_comp);
4087 wait_for_completion(&conn->conn_post_wait_comp);
4088 spin_lock_bh(&conn->state_lock);
4089 }
4090 atomic_set(&conn->connection_reinstatement, 1);
4091 spin_unlock_bh(&conn->state_lock);
4092
4093 /*
4094 * If any other processes are accessing this connection pointer we
4095 * must wait until they have completed.
4096 */
4097 iscsit_check_conn_usage_count(conn);
4098
4099 if (conn->conn_rx_hash.tfm)
4100 crypto_free_hash(conn->conn_rx_hash.tfm);
4101 if (conn->conn_tx_hash.tfm)
4102 crypto_free_hash(conn->conn_tx_hash.tfm);
4103
4104 if (conn->conn_cpumask)
4105 free_cpumask_var(conn->conn_cpumask);
4106
4107 kfree(conn->conn_ops);
4108 conn->conn_ops = NULL;
4109
4110 if (conn->sock) {
4111 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
4112 kfree(conn->sock->file);
4113 conn->sock->file = NULL;
4114 }
4115 sock_release(conn->sock);
4116 }
4117 conn->thread_set = NULL;
4118
4119 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4120 conn->conn_state = TARG_CONN_STATE_FREE;
4121 kfree(conn);
4122
4123 spin_lock_bh(&sess->conn_lock);
4124 atomic_dec(&sess->nconn);
4125 pr_debug("Decremented iSCSI connection count to %hu from node:"
4126 " %s\n", atomic_read(&sess->nconn),
4127 sess->sess_ops->InitiatorName);
4128 /*
4129 * Make sure that if one connection fails in an non ERL=2 iSCSI
4130 * Session that they all fail.
4131 */
4132 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4133 !atomic_read(&sess->session_logout))
4134 atomic_set(&sess->session_fall_back_to_erl0, 1);
4135
4136 /*
4137 * If this was not the last connection in the session, and we are
4138 * performing session reinstatement or falling back to ERL=0, call
4139 * iscsit_stop_session() without sleeping to shutdown the other
4140 * active connections.
4141 */
4142 if (atomic_read(&sess->nconn)) {
4143 if (!atomic_read(&sess->session_reinstatement) &&
4144 !atomic_read(&sess->session_fall_back_to_erl0)) {
4145 spin_unlock_bh(&sess->conn_lock);
4146 return 0;
4147 }
4148 if (!atomic_read(&sess->session_stop_active)) {
4149 atomic_set(&sess->session_stop_active, 1);
4150 spin_unlock_bh(&sess->conn_lock);
4151 iscsit_stop_session(sess, 0, 0);
4152 return 0;
4153 }
4154 spin_unlock_bh(&sess->conn_lock);
4155 return 0;
4156 }
4157
4158 /*
4159 * If this was the last connection in the session and one of the
4160 * following is occurring:
4161 *
4162 * Session Reinstatement is not being performed, and are falling back
4163 * to ERL=0 call iscsit_close_session().
4164 *
4165 * Session Logout was requested. iscsit_close_session() will be called
4166 * elsewhere.
4167 *
4168 * Session Continuation is not being performed, start the Time2Retain
4169 * handler and check if sleep_on_sess_wait_sem is active.
4170 */
4171 if (!atomic_read(&sess->session_reinstatement) &&
4172 atomic_read(&sess->session_fall_back_to_erl0)) {
4173 spin_unlock_bh(&sess->conn_lock);
4174 iscsit_close_session(sess);
4175
4176 return 0;
4177 } else if (atomic_read(&sess->session_logout)) {
4178 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4179 sess->session_state = TARG_SESS_STATE_FREE;
4180 spin_unlock_bh(&sess->conn_lock);
4181
4182 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4183 complete(&sess->session_wait_comp);
4184
4185 return 0;
4186 } else {
4187 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4188 sess->session_state = TARG_SESS_STATE_FAILED;
4189
4190 if (!atomic_read(&sess->session_continuation)) {
4191 spin_unlock_bh(&sess->conn_lock);
4192 iscsit_start_time2retain_handler(sess);
4193 } else
4194 spin_unlock_bh(&sess->conn_lock);
4195
4196 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4197 complete(&sess->session_wait_comp);
4198
4199 return 0;
4200 }
4201 spin_unlock_bh(&sess->conn_lock);
4202
4203 return 0;
4204}
4205
4206int iscsit_close_session(struct iscsi_session *sess)
4207{
4208 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
4209 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4210
4211 if (atomic_read(&sess->nconn)) {
4212 pr_err("%d connection(s) still exist for iSCSI session"
4213 " to %s\n", atomic_read(&sess->nconn),
4214 sess->sess_ops->InitiatorName);
4215 BUG();
4216 }
4217
4218 spin_lock_bh(&se_tpg->session_lock);
4219 atomic_set(&sess->session_logout, 1);
4220 atomic_set(&sess->session_reinstatement, 1);
4221 iscsit_stop_time2retain_timer(sess);
4222 spin_unlock_bh(&se_tpg->session_lock);
4223
4224 /*
4225 * transport_deregister_session_configfs() will clear the
4226 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4227 * can be setting it again with __transport_register_session() in
4228 * iscsi_post_login_handler() again after the iscsit_stop_session()
4229 * completes in iscsi_np context.
4230 */
4231 transport_deregister_session_configfs(sess->se_sess);
4232
4233 /*
4234 * If any other processes are accessing this session pointer we must
4235 * wait until they have completed. If we are in an interrupt (the
4236 * time2retain handler) and contain and active session usage count we
4237 * restart the timer and exit.
4238 */
4239 if (!in_interrupt()) {
4240 if (iscsit_check_session_usage_count(sess) == 1)
4241 iscsit_stop_session(sess, 1, 1);
4242 } else {
4243 if (iscsit_check_session_usage_count(sess) == 2) {
4244 atomic_set(&sess->session_logout, 0);
4245 iscsit_start_time2retain_handler(sess);
4246 return 0;
4247 }
4248 }
4249
4250 transport_deregister_session(sess->se_sess);
4251
4252 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4253 iscsit_free_connection_recovery_entires(sess);
4254
4255 iscsit_free_all_ooo_cmdsns(sess);
4256
4257 spin_lock_bh(&se_tpg->session_lock);
4258 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4259 sess->session_state = TARG_SESS_STATE_FREE;
4260 pr_debug("Released iSCSI session from node: %s\n",
4261 sess->sess_ops->InitiatorName);
4262 tpg->nsessions--;
4263 if (tpg->tpg_tiqn)
4264 tpg->tpg_tiqn->tiqn_nsessions--;
4265
4266 pr_debug("Decremented number of active iSCSI Sessions on"
4267 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4268
4269 spin_lock(&sess_idr_lock);
4270 idr_remove(&sess_idr, sess->session_index);
4271 spin_unlock(&sess_idr_lock);
4272
4273 kfree(sess->sess_ops);
4274 sess->sess_ops = NULL;
4275 spin_unlock_bh(&se_tpg->session_lock);
4276
4277 kfree(sess);
4278 return 0;
4279}
4280
4281static void iscsit_logout_post_handler_closesession(
4282 struct iscsi_conn *conn)
4283{
4284 struct iscsi_session *sess = conn->sess;
4285
4286 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4287 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4288
4289 atomic_set(&conn->conn_logout_remove, 0);
4290 complete(&conn->conn_logout_comp);
4291
4292 iscsit_dec_conn_usage_count(conn);
4293 iscsit_stop_session(sess, 1, 1);
4294 iscsit_dec_session_usage_count(sess);
4295 iscsit_close_session(sess);
4296}
4297
4298static void iscsit_logout_post_handler_samecid(
4299 struct iscsi_conn *conn)
4300{
4301 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4302 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4303
4304 atomic_set(&conn->conn_logout_remove, 0);
4305 complete(&conn->conn_logout_comp);
4306
4307 iscsit_cause_connection_reinstatement(conn, 1);
4308 iscsit_dec_conn_usage_count(conn);
4309}
4310
4311static void iscsit_logout_post_handler_diffcid(
4312 struct iscsi_conn *conn,
4313 u16 cid)
4314{
4315 struct iscsi_conn *l_conn;
4316 struct iscsi_session *sess = conn->sess;
4317
4318 if (!sess)
4319 return;
4320
4321 spin_lock_bh(&sess->conn_lock);
4322 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4323 if (l_conn->cid == cid) {
4324 iscsit_inc_conn_usage_count(l_conn);
4325 break;
4326 }
4327 }
4328 spin_unlock_bh(&sess->conn_lock);
4329
4330 if (!l_conn)
4331 return;
4332
4333 if (l_conn->sock)
4334 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4335
4336 spin_lock_bh(&l_conn->state_lock);
4337 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4338 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4339 spin_unlock_bh(&l_conn->state_lock);
4340
4341 iscsit_cause_connection_reinstatement(l_conn, 1);
4342 iscsit_dec_conn_usage_count(l_conn);
4343}
4344
4345/*
4346 * Return of 0 causes the TX thread to restart.
4347 */
4348static int iscsit_logout_post_handler(
4349 struct iscsi_cmd *cmd,
4350 struct iscsi_conn *conn)
4351{
4352 int ret = 0;
4353
4354 switch (cmd->logout_reason) {
4355 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4356 switch (cmd->logout_response) {
4357 case ISCSI_LOGOUT_SUCCESS:
4358 case ISCSI_LOGOUT_CLEANUP_FAILED:
4359 default:
4360 iscsit_logout_post_handler_closesession(conn);
4361 break;
4362 }
4363 ret = 0;
4364 break;
4365 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4366 if (conn->cid == cmd->logout_cid) {
4367 switch (cmd->logout_response) {
4368 case ISCSI_LOGOUT_SUCCESS:
4369 case ISCSI_LOGOUT_CLEANUP_FAILED:
4370 default:
4371 iscsit_logout_post_handler_samecid(conn);
4372 break;
4373 }
4374 ret = 0;
4375 } else {
4376 switch (cmd->logout_response) {
4377 case ISCSI_LOGOUT_SUCCESS:
4378 iscsit_logout_post_handler_diffcid(conn,
4379 cmd->logout_cid);
4380 break;
4381 case ISCSI_LOGOUT_CID_NOT_FOUND:
4382 case ISCSI_LOGOUT_CLEANUP_FAILED:
4383 default:
4384 break;
4385 }
4386 ret = 1;
4387 }
4388 break;
4389 case ISCSI_LOGOUT_REASON_RECOVERY:
4390 switch (cmd->logout_response) {
4391 case ISCSI_LOGOUT_SUCCESS:
4392 case ISCSI_LOGOUT_CID_NOT_FOUND:
4393 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4394 case ISCSI_LOGOUT_CLEANUP_FAILED:
4395 default:
4396 break;
4397 }
4398 ret = 1;
4399 break;
4400 default:
4401 break;
4402
4403 }
4404 return ret;
4405}
4406
4407void iscsit_fail_session(struct iscsi_session *sess)
4408{
4409 struct iscsi_conn *conn;
4410
4411 spin_lock_bh(&sess->conn_lock);
4412 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4413 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4414 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4415 }
4416 spin_unlock_bh(&sess->conn_lock);
4417
4418 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4419 sess->session_state = TARG_SESS_STATE_FAILED;
4420}
4421
4422int iscsit_free_session(struct iscsi_session *sess)
4423{
4424 u16 conn_count = atomic_read(&sess->nconn);
4425 struct iscsi_conn *conn, *conn_tmp = NULL;
4426 int is_last;
4427
4428 spin_lock_bh(&sess->conn_lock);
4429 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4430
4431 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4432 conn_list) {
4433 if (conn_count == 0)
4434 break;
4435
4436 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4437 is_last = 1;
4438 } else {
4439 iscsit_inc_conn_usage_count(conn_tmp);
4440 is_last = 0;
4441 }
4442 iscsit_inc_conn_usage_count(conn);
4443
4444 spin_unlock_bh(&sess->conn_lock);
4445 iscsit_cause_connection_reinstatement(conn, 1);
4446 spin_lock_bh(&sess->conn_lock);
4447
4448 iscsit_dec_conn_usage_count(conn);
4449 if (is_last == 0)
4450 iscsit_dec_conn_usage_count(conn_tmp);
4451
4452 conn_count--;
4453 }
4454
4455 if (atomic_read(&sess->nconn)) {
4456 spin_unlock_bh(&sess->conn_lock);
4457 wait_for_completion(&sess->session_wait_comp);
4458 } else
4459 spin_unlock_bh(&sess->conn_lock);
4460
4461 iscsit_close_session(sess);
4462 return 0;
4463}
4464
4465void iscsit_stop_session(
4466 struct iscsi_session *sess,
4467 int session_sleep,
4468 int connection_sleep)
4469{
4470 u16 conn_count = atomic_read(&sess->nconn);
4471 struct iscsi_conn *conn, *conn_tmp = NULL;
4472 int is_last;
4473
4474 spin_lock_bh(&sess->conn_lock);
4475 if (session_sleep)
4476 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4477
4478 if (connection_sleep) {
4479 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4480 conn_list) {
4481 if (conn_count == 0)
4482 break;
4483
4484 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4485 is_last = 1;
4486 } else {
4487 iscsit_inc_conn_usage_count(conn_tmp);
4488 is_last = 0;
4489 }
4490 iscsit_inc_conn_usage_count(conn);
4491
4492 spin_unlock_bh(&sess->conn_lock);
4493 iscsit_cause_connection_reinstatement(conn, 1);
4494 spin_lock_bh(&sess->conn_lock);
4495
4496 iscsit_dec_conn_usage_count(conn);
4497 if (is_last == 0)
4498 iscsit_dec_conn_usage_count(conn_tmp);
4499 conn_count--;
4500 }
4501 } else {
4502 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4503 iscsit_cause_connection_reinstatement(conn, 0);
4504 }
4505
4506 if (session_sleep && atomic_read(&sess->nconn)) {
4507 spin_unlock_bh(&sess->conn_lock);
4508 wait_for_completion(&sess->session_wait_comp);
4509 } else
4510 spin_unlock_bh(&sess->conn_lock);
4511}
4512
4513int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4514{
4515 struct iscsi_session *sess;
4516 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4517 struct se_session *se_sess, *se_sess_tmp;
4518 int session_count = 0;
4519
4520 spin_lock_bh(&se_tpg->session_lock);
4521 if (tpg->nsessions && !force) {
4522 spin_unlock_bh(&se_tpg->session_lock);
4523 return -1;
4524 }
4525
4526 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4527 sess_list) {
4528 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4529
4530 spin_lock(&sess->conn_lock);
4531 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4532 atomic_read(&sess->session_logout) ||
4533 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4534 spin_unlock(&sess->conn_lock);
4535 continue;
4536 }
4537 atomic_set(&sess->session_reinstatement, 1);
4538 spin_unlock(&sess->conn_lock);
4539 spin_unlock_bh(&se_tpg->session_lock);
4540
4541 iscsit_free_session(sess);
4542 spin_lock_bh(&se_tpg->session_lock);
4543
4544 session_count++;
4545 }
4546 spin_unlock_bh(&se_tpg->session_lock);
4547
4548 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4549 " Group: %hu\n", session_count, tpg->tpgt);
4550 return 0;
4551}
4552
4553MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4554MODULE_VERSION("4.1.x");
4555MODULE_AUTHOR("nab@Linux-iSCSI.org");
4556MODULE_LICENSE("GPL");
4557
4558module_init(iscsi_target_init_module);
4559module_exit(iscsi_target_cleanup_module);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 000000000000..5db2ddeed5eb
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,42 @@
1#ifndef ISCSI_TARGET_H
2#define ISCSI_TARGET_H
3
4extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
5extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
6extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
7extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
8extern void iscsit_del_tiqn(struct iscsi_tiqn *);
9extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
10extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
11extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
12 char *, int);
13extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
14 struct iscsi_portal_group *);
15extern int iscsit_del_np(struct iscsi_np *);
16extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
17extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
18extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
19extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
20extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
21extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
22extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
23extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
24extern int iscsi_target_tx_thread(void *);
25extern int iscsi_target_rx_thread(void *);
26extern int iscsit_close_connection(struct iscsi_conn *);
27extern int iscsit_close_session(struct iscsi_session *);
28extern void iscsit_fail_session(struct iscsi_session *);
29extern int iscsit_free_session(struct iscsi_session *);
30extern void iscsit_stop_session(struct iscsi_session *, int, int);
31extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
32
33extern struct iscsit_global *iscsit_global;
34extern struct target_fabric_configfs *lio_target_fabric_configfs;
35
36extern struct kmem_cache *lio_dr_cache;
37extern struct kmem_cache *lio_ooo_cache;
38extern struct kmem_cache *lio_cmd_cache;
39extern struct kmem_cache *lio_qr_cache;
40extern struct kmem_cache *lio_r2t_cache;
41
42#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 000000000000..11fd74307811
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,490 @@
1/*******************************************************************************
2 * This file houses the main functions for the iSCSI CHAP support
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/string.h>
22#include <linux/crypto.h>
23#include <linux/err.h>
24#include <linux/scatterlist.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_nego.h"
28#include "iscsi_target_auth.h"
29
30static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
31{
32 unsigned char result = 0;
33 /*
34 * MSB
35 */
36 if ((val[0] >= 'a') && (val[0] <= 'f'))
37 result = ((val[0] - 'a' + 10) & 0xf) << 4;
38 else
39 if ((val[0] >= 'A') && (val[0] <= 'F'))
40 result = ((val[0] - 'A' + 10) & 0xf) << 4;
41 else /* digit */
42 result = ((val[0] - '0') & 0xf) << 4;
43 /*
44 * LSB
45 */
46 if ((val[1] >= 'a') && (val[1] <= 'f'))
47 result |= ((val[1] - 'a' + 10) & 0xf);
48 else
49 if ((val[1] >= 'A') && (val[1] <= 'F'))
50 result |= ((val[1] - 'A' + 10) & 0xf);
51 else /* digit */
52 result |= ((val[1] - '0') & 0xf);
53
54 return result;
55}
56
57static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
58{
59 int i, j = 0;
60
61 for (i = 0; i < len; i += 2) {
62 dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
63 }
64
65 dst[j] = '\0';
66 return j;
67}
68
69static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
70{
71 int i;
72
73 for (i = 0; i < src_len; i++) {
74 sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
75 }
76}
77
78static void chap_set_random(char *data, int length)
79{
80 long r;
81 unsigned n;
82
83 while (length > 0) {
84 get_random_bytes(&r, sizeof(long));
85 r = r ^ (r >> 8);
86 r = r ^ (r >> 4);
87 n = r & 0x7;
88
89 get_random_bytes(&r, sizeof(long));
90 r = r ^ (r >> 8);
91 r = r ^ (r >> 5);
92 n = (n << 3) | (r & 0x7);
93
94 get_random_bytes(&r, sizeof(long));
95 r = r ^ (r >> 8);
96 r = r ^ (r >> 5);
97 n = (n << 2) | (r & 0x3);
98
99 *data++ = n;
100 length--;
101 }
102}
103
104static void chap_gen_challenge(
105 struct iscsi_conn *conn,
106 int caller,
107 char *c_str,
108 unsigned int *c_len)
109{
110 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
111 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
112
113 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
114
115 chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
116 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
117 CHAP_CHALLENGE_LENGTH);
118 /*
119 * Set CHAP_C, and copy the generated challenge into c_str.
120 */
121 *c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
122 *c_len += 1;
123
124 pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
125 challenge_asciihex);
126}
127
128
129static struct iscsi_chap *chap_server_open(
130 struct iscsi_conn *conn,
131 struct iscsi_node_auth *auth,
132 const char *a_str,
133 char *aic_str,
134 unsigned int *aic_len)
135{
136 struct iscsi_chap *chap;
137
138 if (!(auth->naf_flags & NAF_USERID_SET) ||
139 !(auth->naf_flags & NAF_PASSWORD_SET)) {
140 pr_err("CHAP user or password not set for"
141 " Initiator ACL\n");
142 return NULL;
143 }
144
145 conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
146 if (!conn->auth_protocol)
147 return NULL;
148
149 chap = (struct iscsi_chap *) conn->auth_protocol;
150 /*
151 * We only support MD5 MDA presently.
152 */
153 if (strncmp(a_str, "CHAP_A=5", 8)) {
154 pr_err("CHAP_A is not MD5.\n");
155 return NULL;
156 }
157 pr_debug("[server] Got CHAP_A=5\n");
158 /*
159 * Send back CHAP_A set to MD5.
160 */
161 *aic_len = sprintf(aic_str, "CHAP_A=5");
162 *aic_len += 1;
163 chap->digest_type = CHAP_DIGEST_MD5;
164 pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
165 /*
166 * Set Identifier.
167 */
168 chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
169 *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
170 *aic_len += 1;
171 pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
172 /*
173 * Generate Challenge.
174 */
175 chap_gen_challenge(conn, 1, aic_str, aic_len);
176
177 return chap;
178}
179
180static void chap_close(struct iscsi_conn *conn)
181{
182 kfree(conn->auth_protocol);
183 conn->auth_protocol = NULL;
184}
185
186static int chap_server_compute_md5(
187 struct iscsi_conn *conn,
188 struct iscsi_node_auth *auth,
189 char *nr_in_ptr,
190 char *nr_out_ptr,
191 unsigned int *nr_out_len)
192{
193 char *endptr;
194 unsigned char id, digest[MD5_SIGNATURE_SIZE];
195 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
196 unsigned char identifier[10], *challenge = NULL;
197 unsigned char *challenge_binhex = NULL;
198 unsigned char client_digest[MD5_SIGNATURE_SIZE];
199 unsigned char server_digest[MD5_SIGNATURE_SIZE];
200 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
201 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
202 struct crypto_hash *tfm;
203 struct hash_desc desc;
204 struct scatterlist sg;
205 int auth_ret = -1, ret, challenge_len;
206
207 memset(identifier, 0, 10);
208 memset(chap_n, 0, MAX_CHAP_N_SIZE);
209 memset(chap_r, 0, MAX_RESPONSE_LENGTH);
210 memset(digest, 0, MD5_SIGNATURE_SIZE);
211 memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
212 memset(client_digest, 0, MD5_SIGNATURE_SIZE);
213 memset(server_digest, 0, MD5_SIGNATURE_SIZE);
214
215 challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
216 if (!challenge) {
217 pr_err("Unable to allocate challenge buffer\n");
218 goto out;
219 }
220
221 challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
222 if (!challenge_binhex) {
223 pr_err("Unable to allocate challenge_binhex buffer\n");
224 goto out;
225 }
226 /*
227 * Extract CHAP_N.
228 */
229 if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
230 &type) < 0) {
231 pr_err("Could not find CHAP_N.\n");
232 goto out;
233 }
234 if (type == HEX) {
235 pr_err("Could not find CHAP_N.\n");
236 goto out;
237 }
238
239 if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
240 pr_err("CHAP_N values do not match!\n");
241 goto out;
242 }
243 pr_debug("[server] Got CHAP_N=%s\n", chap_n);
244 /*
245 * Extract CHAP_R.
246 */
247 if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
248 &type) < 0) {
249 pr_err("Could not find CHAP_R.\n");
250 goto out;
251 }
252 if (type != HEX) {
253 pr_err("Could not find CHAP_R.\n");
254 goto out;
255 }
256
257 pr_debug("[server] Got CHAP_R=%s\n", chap_r);
258 chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
259
260 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
261 if (IS_ERR(tfm)) {
262 pr_err("Unable to allocate struct crypto_hash\n");
263 goto out;
264 }
265 desc.tfm = tfm;
266 desc.flags = 0;
267
268 ret = crypto_hash_init(&desc);
269 if (ret < 0) {
270 pr_err("crypto_hash_init() failed\n");
271 crypto_free_hash(tfm);
272 goto out;
273 }
274
275 sg_init_one(&sg, (void *)&chap->id, 1);
276 ret = crypto_hash_update(&desc, &sg, 1);
277 if (ret < 0) {
278 pr_err("crypto_hash_update() failed for id\n");
279 crypto_free_hash(tfm);
280 goto out;
281 }
282
283 sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
284 ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
285 if (ret < 0) {
286 pr_err("crypto_hash_update() failed for password\n");
287 crypto_free_hash(tfm);
288 goto out;
289 }
290
291 sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
292 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
293 if (ret < 0) {
294 pr_err("crypto_hash_update() failed for challenge\n");
295 crypto_free_hash(tfm);
296 goto out;
297 }
298
299 ret = crypto_hash_final(&desc, server_digest);
300 if (ret < 0) {
301 pr_err("crypto_hash_final() failed for server digest\n");
302 crypto_free_hash(tfm);
303 goto out;
304 }
305 crypto_free_hash(tfm);
306
307 chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
308 pr_debug("[server] MD5 Server Digest: %s\n", response);
309
310 if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
311 pr_debug("[server] MD5 Digests do not match!\n\n");
312 goto out;
313 } else
314 pr_debug("[server] MD5 Digests match, CHAP connetication"
315 " successful.\n\n");
316 /*
317 * One way authentication has succeeded, return now if mutual
318 * authentication is not enabled.
319 */
320 if (!auth->authenticate_target) {
321 kfree(challenge);
322 kfree(challenge_binhex);
323 return 0;
324 }
325 /*
326 * Get CHAP_I.
327 */
328 if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
329 pr_err("Could not find CHAP_I.\n");
330 goto out;
331 }
332
333 if (type == HEX)
334 id = (unsigned char)simple_strtoul((char *)&identifier[2],
335 &endptr, 0);
336 else
337 id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
338 /*
339 * RFC 1994 says Identifier is no more than octet (8 bits).
340 */
341 pr_debug("[server] Got CHAP_I=%d\n", id);
342 /*
343 * Get CHAP_C.
344 */
345 if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
346 challenge, &type) < 0) {
347 pr_err("Could not find CHAP_C.\n");
348 goto out;
349 }
350
351 if (type != HEX) {
352 pr_err("Could not find CHAP_C.\n");
353 goto out;
354 }
355 pr_debug("[server] Got CHAP_C=%s\n", challenge);
356 challenge_len = chap_string_to_hex(challenge_binhex, challenge,
357 strlen(challenge));
358 if (!challenge_len) {
359 pr_err("Unable to convert incoming challenge\n");
360 goto out;
361 }
362 /*
363 * Generate CHAP_N and CHAP_R for mutual authentication.
364 */
365 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
366 if (IS_ERR(tfm)) {
367 pr_err("Unable to allocate struct crypto_hash\n");
368 goto out;
369 }
370 desc.tfm = tfm;
371 desc.flags = 0;
372
373 ret = crypto_hash_init(&desc);
374 if (ret < 0) {
375 pr_err("crypto_hash_init() failed\n");
376 crypto_free_hash(tfm);
377 goto out;
378 }
379
380 sg_init_one(&sg, (void *)&id, 1);
381 ret = crypto_hash_update(&desc, &sg, 1);
382 if (ret < 0) {
383 pr_err("crypto_hash_update() failed for id\n");
384 crypto_free_hash(tfm);
385 goto out;
386 }
387
388 sg_init_one(&sg, (void *)auth->password_mutual,
389 strlen(auth->password_mutual));
390 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
391 if (ret < 0) {
392 pr_err("crypto_hash_update() failed for"
393 " password_mutual\n");
394 crypto_free_hash(tfm);
395 goto out;
396 }
397 /*
398 * Convert received challenge to binary hex.
399 */
400 sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
401 ret = crypto_hash_update(&desc, &sg, challenge_len);
402 if (ret < 0) {
403 pr_err("crypto_hash_update() failed for ma challenge\n");
404 crypto_free_hash(tfm);
405 goto out;
406 }
407
408 ret = crypto_hash_final(&desc, digest);
409 if (ret < 0) {
410 pr_err("crypto_hash_final() failed for ma digest\n");
411 crypto_free_hash(tfm);
412 goto out;
413 }
414 crypto_free_hash(tfm);
415 /*
416 * Generate CHAP_N and CHAP_R.
417 */
418 *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
419 *nr_out_len += 1;
420 pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
421 /*
422 * Convert response from binary hex to ascii hext.
423 */
424 chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
425 *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
426 response);
427 *nr_out_len += 1;
428 pr_debug("[server] Sending CHAP_R=0x%s\n", response);
429 auth_ret = 0;
430out:
431 kfree(challenge);
432 kfree(challenge_binhex);
433 return auth_ret;
434}
435
436static int chap_got_response(
437 struct iscsi_conn *conn,
438 struct iscsi_node_auth *auth,
439 char *nr_in_ptr,
440 char *nr_out_ptr,
441 unsigned int *nr_out_len)
442{
443 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
444
445 switch (chap->digest_type) {
446 case CHAP_DIGEST_MD5:
447 if (chap_server_compute_md5(conn, auth, nr_in_ptr,
448 nr_out_ptr, nr_out_len) < 0)
449 return -1;
450 return 0;
451 default:
452 pr_err("Unknown CHAP digest type %d!\n",
453 chap->digest_type);
454 return -1;
455 }
456}
457
458u32 chap_main_loop(
459 struct iscsi_conn *conn,
460 struct iscsi_node_auth *auth,
461 char *in_text,
462 char *out_text,
463 int *in_len,
464 int *out_len)
465{
466 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
467
468 if (!chap) {
469 chap = chap_server_open(conn, auth, in_text, out_text, out_len);
470 if (!chap)
471 return 2;
472 chap->chap_state = CHAP_STAGE_SERVER_AIC;
473 return 0;
474 } else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
475 convert_null_to_semi(in_text, *in_len);
476 if (chap_got_response(conn, auth, in_text, out_text,
477 out_len) < 0) {
478 chap_close(conn);
479 return 2;
480 }
481 if (auth->authenticate_target)
482 chap->chap_state = CHAP_STAGE_SERVER_NR;
483 else
484 *out_len = 0;
485 chap_close(conn);
486 return 1;
487 }
488
489 return 2;
490}
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 000000000000..2f463c09626d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,31 @@
1#ifndef _ISCSI_CHAP_H_
2#define _ISCSI_CHAP_H_
3
4#define CHAP_DIGEST_MD5 5
5#define CHAP_DIGEST_SHA 6
6
7#define CHAP_CHALLENGE_LENGTH 16
8#define CHAP_CHALLENGE_STR_LEN 4096
9#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
10#define MAX_CHAP_N_SIZE 512
11
12#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
13
14#define CHAP_STAGE_CLIENT_A 1
15#define CHAP_STAGE_SERVER_AIC 2
16#define CHAP_STAGE_CLIENT_NR 3
17#define CHAP_STAGE_CLIENT_NRIC 4
18#define CHAP_STAGE_SERVER_NR 5
19
20extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
21 int *, int *);
22
23struct iscsi_chap {
24 unsigned char digest_type;
25 unsigned char id;
26 unsigned char challenge[CHAP_CHALLENGE_LENGTH];
27 unsigned int authenticate_target;
28 unsigned int chap_state;
29} ____cacheline_aligned;
30
31#endif /*** _ISCSI_CHAP_H_ ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 000000000000..32bb92c44450
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1882 @@
1/*******************************************************************************
2 * This file contains the configfs implementation for iSCSI Target mode
3 * from the LIO-Target Project.
4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ****************************************************************************/
21
22#include <linux/configfs.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25#include <target/target_core_fabric_ops.h>
26#include <target/target_core_fabric_configfs.h>
27#include <target/target_core_fabric_lib.h>
28#include <target/target_core_device.h>
29#include <target/target_core_tpg.h>
30#include <target/target_core_configfs.h>
31#include <target/configfs_macros.h>
32
33#include "iscsi_target_core.h"
34#include "iscsi_target_parameters.h"
35#include "iscsi_target_device.h"
36#include "iscsi_target_erl0.h"
37#include "iscsi_target_nodeattrib.h"
38#include "iscsi_target_tpg.h"
39#include "iscsi_target_util.h"
40#include "iscsi_target.h"
41#include "iscsi_target_stat.h"
42#include "iscsi_target_configfs.h"
43
44struct target_fabric_configfs *lio_target_fabric_configfs;
45
46struct lio_target_configfs_attribute {
47 struct configfs_attribute attr;
48 ssize_t (*show)(void *, char *);
49 ssize_t (*store)(void *, const char *, size_t);
50};
51
52struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
53 struct config_item *item,
54 struct iscsi_tiqn **tiqn_out)
55{
56 struct se_portal_group *se_tpg = container_of(to_config_group(item),
57 struct se_portal_group, tpg_group);
58 struct iscsi_portal_group *tpg =
59 (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
60 int ret;
61
62 if (!tpg) {
63 pr_err("Unable to locate struct iscsi_portal_group "
64 "pointer\n");
65 return NULL;
66 }
67 ret = iscsit_get_tpg(tpg);
68 if (ret < 0)
69 return NULL;
70
71 *tiqn_out = tpg->tpg_tiqn;
72 return tpg;
73}
74
75/* Start items for lio_target_portal_cit */
76
77static ssize_t lio_target_np_show_sctp(
78 struct se_tpg_np *se_tpg_np,
79 char *page)
80{
81 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
82 struct iscsi_tpg_np, se_tpg_np);
83 struct iscsi_tpg_np *tpg_np_sctp;
84 ssize_t rb;
85
86 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
87 if (tpg_np_sctp)
88 rb = sprintf(page, "1\n");
89 else
90 rb = sprintf(page, "0\n");
91
92 return rb;
93}
94
95static ssize_t lio_target_np_store_sctp(
96 struct se_tpg_np *se_tpg_np,
97 const char *page,
98 size_t count)
99{
100 struct iscsi_np *np;
101 struct iscsi_portal_group *tpg;
102 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
103 struct iscsi_tpg_np, se_tpg_np);
104 struct iscsi_tpg_np *tpg_np_sctp = NULL;
105 char *endptr;
106 u32 op;
107 int ret;
108
109 op = simple_strtoul(page, &endptr, 0);
110 if ((op != 1) && (op != 0)) {
111 pr_err("Illegal value for tpg_enable: %u\n", op);
112 return -EINVAL;
113 }
114 np = tpg_np->tpg_np;
115 if (!np) {
116 pr_err("Unable to locate struct iscsi_np from"
117 " struct iscsi_tpg_np\n");
118 return -EINVAL;
119 }
120
121 tpg = tpg_np->tpg;
122 if (iscsit_get_tpg(tpg) < 0)
123 return -EINVAL;
124
125 if (op) {
126 /*
127 * Use existing np->np_sockaddr for SCTP network portal reference
128 */
129 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
130 np->np_ip, tpg_np, ISCSI_SCTP_TCP);
131 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
132 goto out;
133 } else {
134 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
135 if (!tpg_np_sctp)
136 goto out;
137
138 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
139 if (ret < 0)
140 goto out;
141 }
142
143 iscsit_put_tpg(tpg);
144 return count;
145out:
146 iscsit_put_tpg(tpg);
147 return -EINVAL;
148}
149
150TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
151
152static struct configfs_attribute *lio_target_portal_attrs[] = {
153 &lio_target_np_sctp.attr,
154 NULL,
155};
156
157/* Stop items for lio_target_portal_cit */
158
159/* Start items for lio_target_np_cit */
160
161#define MAX_PORTAL_LEN 256
162
163struct se_tpg_np *lio_target_call_addnptotpg(
164 struct se_portal_group *se_tpg,
165 struct config_group *group,
166 const char *name)
167{
168 struct iscsi_portal_group *tpg;
169 struct iscsi_tpg_np *tpg_np;
170 char *str, *str2, *ip_str, *port_str;
171 struct __kernel_sockaddr_storage sockaddr;
172 struct sockaddr_in *sock_in;
173 struct sockaddr_in6 *sock_in6;
174 unsigned long port;
175 int ret;
176 char buf[MAX_PORTAL_LEN + 1];
177
178 if (strlen(name) > MAX_PORTAL_LEN) {
179 pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
180 (int)strlen(name), MAX_PORTAL_LEN);
181 return ERR_PTR(-EOVERFLOW);
182 }
183 memset(buf, 0, MAX_PORTAL_LEN + 1);
184 snprintf(buf, MAX_PORTAL_LEN, "%s", name);
185
186 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
187
188 str = strstr(buf, "[");
189 if (str) {
190 const char *end;
191
192 str2 = strstr(str, "]");
193 if (!str2) {
194 pr_err("Unable to locate trailing \"]\""
195 " in IPv6 iSCSI network portal address\n");
196 return ERR_PTR(-EINVAL);
197 }
198 str++; /* Skip over leading "[" */
199 *str2 = '\0'; /* Terminate the IPv6 address */
200 str2++; /* Skip over the "]" */
201 port_str = strstr(str2, ":");
202 if (!port_str) {
203 pr_err("Unable to locate \":port\""
204 " in IPv6 iSCSI network portal address\n");
205 return ERR_PTR(-EINVAL);
206 }
207 *port_str = '\0'; /* Terminate string for IP */
208 port_str++; /* Skip over ":" */
209
210 ret = strict_strtoul(port_str, 0, &port);
211 if (ret < 0) {
212 pr_err("strict_strtoul() failed for port_str: %d\n", ret);
213 return ERR_PTR(ret);
214 }
215 sock_in6 = (struct sockaddr_in6 *)&sockaddr;
216 sock_in6->sin6_family = AF_INET6;
217 sock_in6->sin6_port = htons((unsigned short)port);
218 ret = in6_pton(str, IPV6_ADDRESS_SPACE,
219 (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
220 if (ret <= 0) {
221 pr_err("in6_pton returned: %d\n", ret);
222 return ERR_PTR(-EINVAL);
223 }
224 } else {
225 str = ip_str = &buf[0];
226 port_str = strstr(ip_str, ":");
227 if (!port_str) {
228 pr_err("Unable to locate \":port\""
229 " in IPv4 iSCSI network portal address\n");
230 return ERR_PTR(-EINVAL);
231 }
232 *port_str = '\0'; /* Terminate string for IP */
233 port_str++; /* Skip over ":" */
234
235 ret = strict_strtoul(port_str, 0, &port);
236 if (ret < 0) {
237 pr_err("strict_strtoul() failed for port_str: %d\n", ret);
238 return ERR_PTR(ret);
239 }
240 sock_in = (struct sockaddr_in *)&sockaddr;
241 sock_in->sin_family = AF_INET;
242 sock_in->sin_port = htons((unsigned short)port);
243 sock_in->sin_addr.s_addr = in_aton(ip_str);
244 }
245 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
246 ret = iscsit_get_tpg(tpg);
247 if (ret < 0)
248 return ERR_PTR(-EINVAL);
249
250 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
251 " PORTAL: %s\n",
252 config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
253 tpg->tpgt, name);
254 /*
255 * Assume ISCSI_TCP by default. Other network portals for other
256 * iSCSI fabrics:
257 *
258 * Traditional iSCSI over SCTP (initial support)
259 * iSER/TCP (TODO, hardware available)
260 * iSER/SCTP (TODO, software emulation with osc-iwarp)
261 * iSER/IB (TODO, hardware available)
262 *
263 * can be enabled with atributes under
264 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
265 *
266 */
267 tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
268 ISCSI_TCP);
269 if (IS_ERR(tpg_np)) {
270 iscsit_put_tpg(tpg);
271 return ERR_PTR(PTR_ERR(tpg_np));
272 }
273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
274
275 iscsit_put_tpg(tpg);
276 return &tpg_np->se_tpg_np;
277}
278
279static void lio_target_call_delnpfromtpg(
280 struct se_tpg_np *se_tpg_np)
281{
282 struct iscsi_portal_group *tpg;
283 struct iscsi_tpg_np *tpg_np;
284 struct se_portal_group *se_tpg;
285 int ret;
286
287 tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
288 tpg = tpg_np->tpg;
289 ret = iscsit_get_tpg(tpg);
290 if (ret < 0)
291 return;
292
293 se_tpg = &tpg->tpg_se_tpg;
294 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
295 " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
296 tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
297
298 ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
299 if (ret < 0)
300 goto out;
301
302 pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
303out:
304 iscsit_put_tpg(tpg);
305}
306
307/* End items for lio_target_np_cit */
308
309/* Start items for lio_target_nacl_attrib_cit */
310
311#define DEF_NACL_ATTRIB(name) \
312static ssize_t iscsi_nacl_attrib_show_##name( \
313 struct se_node_acl *se_nacl, \
314 char *page) \
315{ \
316 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
317 se_node_acl); \
318 \
319 return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
320} \
321 \
322static ssize_t iscsi_nacl_attrib_store_##name( \
323 struct se_node_acl *se_nacl, \
324 const char *page, \
325 size_t count) \
326{ \
327 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
328 se_node_acl); \
329 char *endptr; \
330 u32 val; \
331 int ret; \
332 \
333 val = simple_strtoul(page, &endptr, 0); \
334 ret = iscsit_na_##name(nacl, val); \
335 if (ret < 0) \
336 return ret; \
337 \
338 return count; \
339}
340
341#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
342/*
343 * Define iscsi_node_attrib_s_dataout_timeout
344 */
345DEF_NACL_ATTRIB(dataout_timeout);
346NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
347/*
348 * Define iscsi_node_attrib_s_dataout_timeout_retries
349 */
350DEF_NACL_ATTRIB(dataout_timeout_retries);
351NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
352/*
353 * Define iscsi_node_attrib_s_default_erl
354 */
355DEF_NACL_ATTRIB(default_erl);
356NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
357/*
358 * Define iscsi_node_attrib_s_nopin_timeout
359 */
360DEF_NACL_ATTRIB(nopin_timeout);
361NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
362/*
363 * Define iscsi_node_attrib_s_nopin_response_timeout
364 */
365DEF_NACL_ATTRIB(nopin_response_timeout);
366NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
367/*
368 * Define iscsi_node_attrib_s_random_datain_pdu_offsets
369 */
370DEF_NACL_ATTRIB(random_datain_pdu_offsets);
371NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
372/*
373 * Define iscsi_node_attrib_s_random_datain_seq_offsets
374 */
375DEF_NACL_ATTRIB(random_datain_seq_offsets);
376NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
377/*
378 * Define iscsi_node_attrib_s_random_r2t_offsets
379 */
380DEF_NACL_ATTRIB(random_r2t_offsets);
381NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
382
383static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
384 &iscsi_nacl_attrib_dataout_timeout.attr,
385 &iscsi_nacl_attrib_dataout_timeout_retries.attr,
386 &iscsi_nacl_attrib_default_erl.attr,
387 &iscsi_nacl_attrib_nopin_timeout.attr,
388 &iscsi_nacl_attrib_nopin_response_timeout.attr,
389 &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
390 &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
391 &iscsi_nacl_attrib_random_r2t_offsets.attr,
392 NULL,
393};
394
395/* End items for lio_target_nacl_attrib_cit */
396
397/* Start items for lio_target_nacl_auth_cit */
398
399#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
400static ssize_t __iscsi_##prefix##_show_##name( \
401 struct iscsi_node_acl *nacl, \
402 char *page) \
403{ \
404 struct iscsi_node_auth *auth = &nacl->node_auth; \
405 \
406 if (!capable(CAP_SYS_ADMIN)) \
407 return -EPERM; \
408 return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
409} \
410 \
411static ssize_t __iscsi_##prefix##_store_##name( \
412 struct iscsi_node_acl *nacl, \
413 const char *page, \
414 size_t count) \
415{ \
416 struct iscsi_node_auth *auth = &nacl->node_auth; \
417 \
418 if (!capable(CAP_SYS_ADMIN)) \
419 return -EPERM; \
420 \
421 snprintf(auth->name, PAGE_SIZE, "%s", page); \
422 if (!strncmp("NULL", auth->name, 4)) \
423 auth->naf_flags &= ~flags; \
424 else \
425 auth->naf_flags |= flags; \
426 \
427 if ((auth->naf_flags & NAF_USERID_IN_SET) && \
428 (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
429 auth->authenticate_target = 1; \
430 else \
431 auth->authenticate_target = 0; \
432 \
433 return count; \
434}
435
436#define __DEF_NACL_AUTH_INT(prefix, name) \
437static ssize_t __iscsi_##prefix##_show_##name( \
438 struct iscsi_node_acl *nacl, \
439 char *page) \
440{ \
441 struct iscsi_node_auth *auth = &nacl->node_auth; \
442 \
443 if (!capable(CAP_SYS_ADMIN)) \
444 return -EPERM; \
445 \
446 return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
447}
448
449#define DEF_NACL_AUTH_STR(name, flags) \
450 __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
451static ssize_t iscsi_nacl_auth_show_##name( \
452 struct se_node_acl *nacl, \
453 char *page) \
454{ \
455 return __iscsi_nacl_auth_show_##name(container_of(nacl, \
456 struct iscsi_node_acl, se_node_acl), page); \
457} \
458static ssize_t iscsi_nacl_auth_store_##name( \
459 struct se_node_acl *nacl, \
460 const char *page, \
461 size_t count) \
462{ \
463 return __iscsi_nacl_auth_store_##name(container_of(nacl, \
464 struct iscsi_node_acl, se_node_acl), page, count); \
465}
466
467#define DEF_NACL_AUTH_INT(name) \
468 __DEF_NACL_AUTH_INT(nacl_auth, name) \
469static ssize_t iscsi_nacl_auth_show_##name( \
470 struct se_node_acl *nacl, \
471 char *page) \
472{ \
473 return __iscsi_nacl_auth_show_##name(container_of(nacl, \
474 struct iscsi_node_acl, se_node_acl), page); \
475}
476
477#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
478#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
479
480/*
481 * One-way authentication userid
482 */
483DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
484AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
485/*
486 * One-way authentication password
487 */
488DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
489AUTH_ATTR(password, S_IRUGO | S_IWUSR);
490/*
491 * Enforce mutual authentication
492 */
493DEF_NACL_AUTH_INT(authenticate_target);
494AUTH_ATTR_RO(authenticate_target);
495/*
496 * Mutual authentication userid
497 */
498DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
499AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
500/*
501 * Mutual authentication password
502 */
503DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
504AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
505
506static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
507 &iscsi_nacl_auth_userid.attr,
508 &iscsi_nacl_auth_password.attr,
509 &iscsi_nacl_auth_authenticate_target.attr,
510 &iscsi_nacl_auth_userid_mutual.attr,
511 &iscsi_nacl_auth_password_mutual.attr,
512 NULL,
513};
514
515/* End items for lio_target_nacl_auth_cit */
516
517/* Start items for lio_target_nacl_param_cit */
518
519#define DEF_NACL_PARAM(name) \
520static ssize_t iscsi_nacl_param_show_##name( \
521 struct se_node_acl *se_nacl, \
522 char *page) \
523{ \
524 struct iscsi_session *sess; \
525 struct se_session *se_sess; \
526 ssize_t rb; \
527 \
528 spin_lock_bh(&se_nacl->nacl_sess_lock); \
529 se_sess = se_nacl->nacl_sess; \
530 if (!se_sess) { \
531 rb = snprintf(page, PAGE_SIZE, \
532 "No Active iSCSI Session\n"); \
533 } else { \
534 sess = se_sess->fabric_sess_ptr; \
535 rb = snprintf(page, PAGE_SIZE, "%u\n", \
536 (u32)sess->sess_ops->name); \
537 } \
538 spin_unlock_bh(&se_nacl->nacl_sess_lock); \
539 \
540 return rb; \
541}
542
543#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
544
545DEF_NACL_PARAM(MaxConnections);
546NACL_PARAM_ATTR(MaxConnections);
547
548DEF_NACL_PARAM(InitialR2T);
549NACL_PARAM_ATTR(InitialR2T);
550
551DEF_NACL_PARAM(ImmediateData);
552NACL_PARAM_ATTR(ImmediateData);
553
554DEF_NACL_PARAM(MaxBurstLength);
555NACL_PARAM_ATTR(MaxBurstLength);
556
557DEF_NACL_PARAM(FirstBurstLength);
558NACL_PARAM_ATTR(FirstBurstLength);
559
560DEF_NACL_PARAM(DefaultTime2Wait);
561NACL_PARAM_ATTR(DefaultTime2Wait);
562
563DEF_NACL_PARAM(DefaultTime2Retain);
564NACL_PARAM_ATTR(DefaultTime2Retain);
565
566DEF_NACL_PARAM(MaxOutstandingR2T);
567NACL_PARAM_ATTR(MaxOutstandingR2T);
568
569DEF_NACL_PARAM(DataPDUInOrder);
570NACL_PARAM_ATTR(DataPDUInOrder);
571
572DEF_NACL_PARAM(DataSequenceInOrder);
573NACL_PARAM_ATTR(DataSequenceInOrder);
574
575DEF_NACL_PARAM(ErrorRecoveryLevel);
576NACL_PARAM_ATTR(ErrorRecoveryLevel);
577
578static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
579 &iscsi_nacl_param_MaxConnections.attr,
580 &iscsi_nacl_param_InitialR2T.attr,
581 &iscsi_nacl_param_ImmediateData.attr,
582 &iscsi_nacl_param_MaxBurstLength.attr,
583 &iscsi_nacl_param_FirstBurstLength.attr,
584 &iscsi_nacl_param_DefaultTime2Wait.attr,
585 &iscsi_nacl_param_DefaultTime2Retain.attr,
586 &iscsi_nacl_param_MaxOutstandingR2T.attr,
587 &iscsi_nacl_param_DataPDUInOrder.attr,
588 &iscsi_nacl_param_DataSequenceInOrder.attr,
589 &iscsi_nacl_param_ErrorRecoveryLevel.attr,
590 NULL,
591};
592
593/* End items for lio_target_nacl_param_cit */
594
595/* Start items for lio_target_acl_cit */
596
597static ssize_t lio_target_nacl_show_info(
598 struct se_node_acl *se_nacl,
599 char *page)
600{
601 struct iscsi_session *sess;
602 struct iscsi_conn *conn;
603 struct se_session *se_sess;
604 ssize_t rb = 0;
605
606 spin_lock_bh(&se_nacl->nacl_sess_lock);
607 se_sess = se_nacl->nacl_sess;
608 if (!se_sess) {
609 rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
610 " Endpoint: %s\n", se_nacl->initiatorname);
611 } else {
612 sess = se_sess->fabric_sess_ptr;
613
614 if (sess->sess_ops->InitiatorName)
615 rb += sprintf(page+rb, "InitiatorName: %s\n",
616 sess->sess_ops->InitiatorName);
617 if (sess->sess_ops->InitiatorAlias)
618 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
619 sess->sess_ops->InitiatorAlias);
620
621 rb += sprintf(page+rb, "LIO Session ID: %u "
622 "ISID: 0x%02x %02x %02x %02x %02x %02x "
623 "TSIH: %hu ", sess->sid,
624 sess->isid[0], sess->isid[1], sess->isid[2],
625 sess->isid[3], sess->isid[4], sess->isid[5],
626 sess->tsih);
627 rb += sprintf(page+rb, "SessionType: %s\n",
628 (sess->sess_ops->SessionType) ?
629 "Discovery" : "Normal");
630 rb += sprintf(page+rb, "Session State: ");
631 switch (sess->session_state) {
632 case TARG_SESS_STATE_FREE:
633 rb += sprintf(page+rb, "TARG_SESS_FREE\n");
634 break;
635 case TARG_SESS_STATE_ACTIVE:
636 rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
637 break;
638 case TARG_SESS_STATE_LOGGED_IN:
639 rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
640 break;
641 case TARG_SESS_STATE_FAILED:
642 rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
643 break;
644 case TARG_SESS_STATE_IN_CONTINUE:
645 rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
646 break;
647 default:
648 rb += sprintf(page+rb, "ERROR: Unknown Session"
649 " State!\n");
650 break;
651 }
652
653 rb += sprintf(page+rb, "---------------------[iSCSI Session"
654 " Values]-----------------------\n");
655 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
656 " : MaxCmdSN : ITT : TTT\n");
657 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
658 " 0x%08x 0x%08x\n",
659 sess->cmdsn_window,
660 (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
661 sess->exp_cmd_sn, sess->max_cmd_sn,
662 sess->init_task_tag, sess->targ_xfer_tag);
663 rb += sprintf(page+rb, "----------------------[iSCSI"
664 " Connections]-------------------------\n");
665
666 spin_lock(&sess->conn_lock);
667 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
668 rb += sprintf(page+rb, "CID: %hu Connection"
669 " State: ", conn->cid);
670 switch (conn->conn_state) {
671 case TARG_CONN_STATE_FREE:
672 rb += sprintf(page+rb,
673 "TARG_CONN_STATE_FREE\n");
674 break;
675 case TARG_CONN_STATE_XPT_UP:
676 rb += sprintf(page+rb,
677 "TARG_CONN_STATE_XPT_UP\n");
678 break;
679 case TARG_CONN_STATE_IN_LOGIN:
680 rb += sprintf(page+rb,
681 "TARG_CONN_STATE_IN_LOGIN\n");
682 break;
683 case TARG_CONN_STATE_LOGGED_IN:
684 rb += sprintf(page+rb,
685 "TARG_CONN_STATE_LOGGED_IN\n");
686 break;
687 case TARG_CONN_STATE_IN_LOGOUT:
688 rb += sprintf(page+rb,
689 "TARG_CONN_STATE_IN_LOGOUT\n");
690 break;
691 case TARG_CONN_STATE_LOGOUT_REQUESTED:
692 rb += sprintf(page+rb,
693 "TARG_CONN_STATE_LOGOUT_REQUESTED\n");
694 break;
695 case TARG_CONN_STATE_CLEANUP_WAIT:
696 rb += sprintf(page+rb,
697 "TARG_CONN_STATE_CLEANUP_WAIT\n");
698 break;
699 default:
700 rb += sprintf(page+rb,
701 "ERROR: Unknown Connection State!\n");
702 break;
703 }
704
705 rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
706 (conn->network_transport == ISCSI_TCP) ?
707 "TCP" : "SCTP");
708 rb += sprintf(page+rb, " StatSN: 0x%08x\n",
709 conn->stat_sn);
710 }
711 spin_unlock(&sess->conn_lock);
712 }
713 spin_unlock_bh(&se_nacl->nacl_sess_lock);
714
715 return rb;
716}
717
718TF_NACL_BASE_ATTR_RO(lio_target, info);
719
720static ssize_t lio_target_nacl_show_cmdsn_depth(
721 struct se_node_acl *se_nacl,
722 char *page)
723{
724 return sprintf(page, "%u\n", se_nacl->queue_depth);
725}
726
727static ssize_t lio_target_nacl_store_cmdsn_depth(
728 struct se_node_acl *se_nacl,
729 const char *page,
730 size_t count)
731{
732 struct se_portal_group *se_tpg = se_nacl->se_tpg;
733 struct iscsi_portal_group *tpg = container_of(se_tpg,
734 struct iscsi_portal_group, tpg_se_tpg);
735 struct config_item *acl_ci, *tpg_ci, *wwn_ci;
736 char *endptr;
737 u32 cmdsn_depth = 0;
738 int ret;
739
740 cmdsn_depth = simple_strtoul(page, &endptr, 0);
741 if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
742 pr_err("Passed cmdsn_depth: %u exceeds"
743 " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
744 TA_DEFAULT_CMDSN_DEPTH_MAX);
745 return -EINVAL;
746 }
747 acl_ci = &se_nacl->acl_group.cg_item;
748 if (!acl_ci) {
749 pr_err("Unable to locatel acl_ci\n");
750 return -EINVAL;
751 }
752 tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
753 if (!tpg_ci) {
754 pr_err("Unable to locate tpg_ci\n");
755 return -EINVAL;
756 }
757 wwn_ci = &tpg_ci->ci_group->cg_item;
758 if (!wwn_ci) {
759 pr_err("Unable to locate config_item wwn_ci\n");
760 return -EINVAL;
761 }
762
763 if (iscsit_get_tpg(tpg) < 0)
764 return -EINVAL;
765 /*
766 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
767 */
768 ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
769 config_item_name(acl_ci), cmdsn_depth, 1);
770
771 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
772 "InitiatorName: %s\n", config_item_name(wwn_ci),
773 config_item_name(tpg_ci), cmdsn_depth,
774 config_item_name(acl_ci));
775
776 iscsit_put_tpg(tpg);
777 return (!ret) ? count : (ssize_t)ret;
778}
779
780TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
781
782static struct configfs_attribute *lio_target_initiator_attrs[] = {
783 &lio_target_nacl_info.attr,
784 &lio_target_nacl_cmdsn_depth.attr,
785 NULL,
786};
787
788static struct se_node_acl *lio_tpg_alloc_fabric_acl(
789 struct se_portal_group *se_tpg)
790{
791 struct iscsi_node_acl *acl;
792
793 acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
794 if (!acl) {
795 pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
796 return NULL;
797 }
798
799 return &acl->se_node_acl;
800}
801
802static struct se_node_acl *lio_target_make_nodeacl(
803 struct se_portal_group *se_tpg,
804 struct config_group *group,
805 const char *name)
806{
807 struct config_group *stats_cg;
808 struct iscsi_node_acl *acl;
809 struct se_node_acl *se_nacl_new, *se_nacl;
810 struct iscsi_portal_group *tpg = container_of(se_tpg,
811 struct iscsi_portal_group, tpg_se_tpg);
812 u32 cmdsn_depth;
813
814 se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
815 if (!se_nacl_new)
816 return ERR_PTR(-ENOMEM);
817
818 acl = container_of(se_nacl_new, struct iscsi_node_acl,
819 se_node_acl);
820
821 cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
822 /*
823 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
824 * when converting a NdoeACL from demo mode -> explict
825 */
826 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
827 name, cmdsn_depth);
828 if (IS_ERR(se_nacl))
829 return se_nacl;
830
831 stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
832
833 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
834 GFP_KERNEL);
835 if (!stats_cg->default_groups) {
836 pr_err("Unable to allocate memory for"
837 " stats_cg->default_groups\n");
838 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
839 kfree(acl);
840 return ERR_PTR(-ENOMEM);
841 }
842
843 stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
844 stats_cg->default_groups[1] = NULL;
845 config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
846 "iscsi_sess_stats", &iscsi_stat_sess_cit);
847
848 return se_nacl;
849}
850
851static void lio_target_drop_nodeacl(
852 struct se_node_acl *se_nacl)
853{
854 struct se_portal_group *se_tpg = se_nacl->se_tpg;
855 struct iscsi_node_acl *acl = container_of(se_nacl,
856 struct iscsi_node_acl, se_node_acl);
857 struct config_item *df_item;
858 struct config_group *stats_cg;
859 int i;
860
861 stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
862 for (i = 0; stats_cg->default_groups[i]; i++) {
863 df_item = &stats_cg->default_groups[i]->cg_item;
864 stats_cg->default_groups[i] = NULL;
865 config_item_put(df_item);
866 }
867 kfree(stats_cg->default_groups);
868
869 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
870 kfree(acl);
871}
872
873/* End items for lio_target_acl_cit */
874
875/* Start items for lio_target_tpg_attrib_cit */
876
877#define DEF_TPG_ATTRIB(name) \
878 \
879static ssize_t iscsi_tpg_attrib_show_##name( \
880 struct se_portal_group *se_tpg, \
881 char *page) \
882{ \
883 struct iscsi_portal_group *tpg = container_of(se_tpg, \
884 struct iscsi_portal_group, tpg_se_tpg); \
885 ssize_t rb; \
886 \
887 if (iscsit_get_tpg(tpg) < 0) \
888 return -EINVAL; \
889 \
890 rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
891 iscsit_put_tpg(tpg); \
892 return rb; \
893} \
894 \
895static ssize_t iscsi_tpg_attrib_store_##name( \
896 struct se_portal_group *se_tpg, \
897 const char *page, \
898 size_t count) \
899{ \
900 struct iscsi_portal_group *tpg = container_of(se_tpg, \
901 struct iscsi_portal_group, tpg_se_tpg); \
902 char *endptr; \
903 u32 val; \
904 int ret; \
905 \
906 if (iscsit_get_tpg(tpg) < 0) \
907 return -EINVAL; \
908 \
909 val = simple_strtoul(page, &endptr, 0); \
910 ret = iscsit_ta_##name(tpg, val); \
911 if (ret < 0) \
912 goto out; \
913 \
914 iscsit_put_tpg(tpg); \
915 return count; \
916out: \
917 iscsit_put_tpg(tpg); \
918 return ret; \
919}
920
921#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
922
923/*
924 * Define iscsi_tpg_attrib_s_authentication
925 */
926DEF_TPG_ATTRIB(authentication);
927TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
928/*
929 * Define iscsi_tpg_attrib_s_login_timeout
930 */
931DEF_TPG_ATTRIB(login_timeout);
932TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
933/*
934 * Define iscsi_tpg_attrib_s_netif_timeout
935 */
936DEF_TPG_ATTRIB(netif_timeout);
937TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
938/*
939 * Define iscsi_tpg_attrib_s_generate_node_acls
940 */
941DEF_TPG_ATTRIB(generate_node_acls);
942TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
943/*
944 * Define iscsi_tpg_attrib_s_default_cmdsn_depth
945 */
946DEF_TPG_ATTRIB(default_cmdsn_depth);
947TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
948/*
949 Define iscsi_tpg_attrib_s_cache_dynamic_acls
950 */
951DEF_TPG_ATTRIB(cache_dynamic_acls);
952TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
953/*
954 * Define iscsi_tpg_attrib_s_demo_mode_write_protect
955 */
956DEF_TPG_ATTRIB(demo_mode_write_protect);
957TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
958/*
959 * Define iscsi_tpg_attrib_s_prod_mode_write_protect
960 */
961DEF_TPG_ATTRIB(prod_mode_write_protect);
962TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
963
964static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
965 &iscsi_tpg_attrib_authentication.attr,
966 &iscsi_tpg_attrib_login_timeout.attr,
967 &iscsi_tpg_attrib_netif_timeout.attr,
968 &iscsi_tpg_attrib_generate_node_acls.attr,
969 &iscsi_tpg_attrib_default_cmdsn_depth.attr,
970 &iscsi_tpg_attrib_cache_dynamic_acls.attr,
971 &iscsi_tpg_attrib_demo_mode_write_protect.attr,
972 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
973 NULL,
974};
975
976/* End items for lio_target_tpg_attrib_cit */
977
978/* Start items for lio_target_tpg_param_cit */
979
980#define DEF_TPG_PARAM(name) \
981static ssize_t iscsi_tpg_param_show_##name( \
982 struct se_portal_group *se_tpg, \
983 char *page) \
984{ \
985 struct iscsi_portal_group *tpg = container_of(se_tpg, \
986 struct iscsi_portal_group, tpg_se_tpg); \
987 struct iscsi_param *param; \
988 ssize_t rb; \
989 \
990 if (iscsit_get_tpg(tpg) < 0) \
991 return -EINVAL; \
992 \
993 param = iscsi_find_param_from_key(__stringify(name), \
994 tpg->param_list); \
995 if (!param) { \
996 iscsit_put_tpg(tpg); \
997 return -EINVAL; \
998 } \
999 rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
1000 \
1001 iscsit_put_tpg(tpg); \
1002 return rb; \
1003} \
1004static ssize_t iscsi_tpg_param_store_##name( \
1005 struct se_portal_group *se_tpg, \
1006 const char *page, \
1007 size_t count) \
1008{ \
1009 struct iscsi_portal_group *tpg = container_of(se_tpg, \
1010 struct iscsi_portal_group, tpg_se_tpg); \
1011 char *buf; \
1012 int ret; \
1013 \
1014 buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
1015 if (!buf) \
1016 return -ENOMEM; \
1017 snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
1018 buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
1019 \
1020 if (iscsit_get_tpg(tpg) < 0) { \
1021 kfree(buf); \
1022 return -EINVAL; \
1023 } \
1024 \
1025 ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
1026 if (ret < 0) \
1027 goto out; \
1028 \
1029 kfree(buf); \
1030 iscsit_put_tpg(tpg); \
1031 return count; \
1032out: \
1033 kfree(buf); \
1034 iscsit_put_tpg(tpg); \
1035 return -EINVAL; \
1036}
1037
1038#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
1039
1040DEF_TPG_PARAM(AuthMethod);
1041TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
1042
1043DEF_TPG_PARAM(HeaderDigest);
1044TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
1045
1046DEF_TPG_PARAM(DataDigest);
1047TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
1048
1049DEF_TPG_PARAM(MaxConnections);
1050TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
1051
1052DEF_TPG_PARAM(TargetAlias);
1053TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
1054
1055DEF_TPG_PARAM(InitialR2T);
1056TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
1057
1058DEF_TPG_PARAM(ImmediateData);
1059TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
1060
1061DEF_TPG_PARAM(MaxRecvDataSegmentLength);
1062TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
1063
1064DEF_TPG_PARAM(MaxBurstLength);
1065TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
1066
1067DEF_TPG_PARAM(FirstBurstLength);
1068TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
1069
1070DEF_TPG_PARAM(DefaultTime2Wait);
1071TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
1072
1073DEF_TPG_PARAM(DefaultTime2Retain);
1074TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
1075
1076DEF_TPG_PARAM(MaxOutstandingR2T);
1077TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
1078
1079DEF_TPG_PARAM(DataPDUInOrder);
1080TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
1081
1082DEF_TPG_PARAM(DataSequenceInOrder);
1083TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
1084
1085DEF_TPG_PARAM(ErrorRecoveryLevel);
1086TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
1087
1088DEF_TPG_PARAM(IFMarker);
1089TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
1090
1091DEF_TPG_PARAM(OFMarker);
1092TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
1093
1094DEF_TPG_PARAM(IFMarkInt);
1095TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
1096
1097DEF_TPG_PARAM(OFMarkInt);
1098TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
1099
1100static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
1101 &iscsi_tpg_param_AuthMethod.attr,
1102 &iscsi_tpg_param_HeaderDigest.attr,
1103 &iscsi_tpg_param_DataDigest.attr,
1104 &iscsi_tpg_param_MaxConnections.attr,
1105 &iscsi_tpg_param_TargetAlias.attr,
1106 &iscsi_tpg_param_InitialR2T.attr,
1107 &iscsi_tpg_param_ImmediateData.attr,
1108 &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
1109 &iscsi_tpg_param_MaxBurstLength.attr,
1110 &iscsi_tpg_param_FirstBurstLength.attr,
1111 &iscsi_tpg_param_DefaultTime2Wait.attr,
1112 &iscsi_tpg_param_DefaultTime2Retain.attr,
1113 &iscsi_tpg_param_MaxOutstandingR2T.attr,
1114 &iscsi_tpg_param_DataPDUInOrder.attr,
1115 &iscsi_tpg_param_DataSequenceInOrder.attr,
1116 &iscsi_tpg_param_ErrorRecoveryLevel.attr,
1117 &iscsi_tpg_param_IFMarker.attr,
1118 &iscsi_tpg_param_OFMarker.attr,
1119 &iscsi_tpg_param_IFMarkInt.attr,
1120 &iscsi_tpg_param_OFMarkInt.attr,
1121 NULL,
1122};
1123
1124/* End items for lio_target_tpg_param_cit */
1125
1126/* Start items for lio_target_tpg_cit */
1127
1128static ssize_t lio_target_tpg_show_enable(
1129 struct se_portal_group *se_tpg,
1130 char *page)
1131{
1132 struct iscsi_portal_group *tpg = container_of(se_tpg,
1133 struct iscsi_portal_group, tpg_se_tpg);
1134 ssize_t len;
1135
1136 spin_lock(&tpg->tpg_state_lock);
1137 len = sprintf(page, "%d\n",
1138 (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
1139 spin_unlock(&tpg->tpg_state_lock);
1140
1141 return len;
1142}
1143
1144static ssize_t lio_target_tpg_store_enable(
1145 struct se_portal_group *se_tpg,
1146 const char *page,
1147 size_t count)
1148{
1149 struct iscsi_portal_group *tpg = container_of(se_tpg,
1150 struct iscsi_portal_group, tpg_se_tpg);
1151 char *endptr;
1152 u32 op;
1153 int ret = 0;
1154
1155 op = simple_strtoul(page, &endptr, 0);
1156 if ((op != 1) && (op != 0)) {
1157 pr_err("Illegal value for tpg_enable: %u\n", op);
1158 return -EINVAL;
1159 }
1160
1161 ret = iscsit_get_tpg(tpg);
1162 if (ret < 0)
1163 return -EINVAL;
1164
1165 if (op) {
1166 ret = iscsit_tpg_enable_portal_group(tpg);
1167 if (ret < 0)
1168 goto out;
1169 } else {
1170 /*
1171 * iscsit_tpg_disable_portal_group() assumes force=1
1172 */
1173 ret = iscsit_tpg_disable_portal_group(tpg, 1);
1174 if (ret < 0)
1175 goto out;
1176 }
1177
1178 iscsit_put_tpg(tpg);
1179 return count;
1180out:
1181 iscsit_put_tpg(tpg);
1182 return -EINVAL;
1183}
1184
1185TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
1186
1187static struct configfs_attribute *lio_target_tpg_attrs[] = {
1188 &lio_target_tpg_enable.attr,
1189 NULL,
1190};
1191
1192/* End items for lio_target_tpg_cit */
1193
1194/* Start items for lio_target_tiqn_cit */
1195
1196struct se_portal_group *lio_target_tiqn_addtpg(
1197 struct se_wwn *wwn,
1198 struct config_group *group,
1199 const char *name)
1200{
1201 struct iscsi_portal_group *tpg;
1202 struct iscsi_tiqn *tiqn;
1203 char *tpgt_str, *end_ptr;
1204 int ret = 0;
1205 unsigned short int tpgt;
1206
1207 tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1208 /*
1209 * Only tpgt_# directory groups can be created below
1210 * target/iscsi/iqn.superturodiskarry/
1211 */
1212 tpgt_str = strstr(name, "tpgt_");
1213 if (!tpgt_str) {
1214 pr_err("Unable to locate \"tpgt_#\" directory"
1215 " group\n");
1216 return NULL;
1217 }
1218 tpgt_str += 5; /* Skip ahead of "tpgt_" */
1219 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1220
1221 tpg = iscsit_alloc_portal_group(tiqn, tpgt);
1222 if (!tpg)
1223 return NULL;
1224
1225 ret = core_tpg_register(
1226 &lio_target_fabric_configfs->tf_ops,
1227 wwn, &tpg->tpg_se_tpg, (void *)tpg,
1228 TRANSPORT_TPG_TYPE_NORMAL);
1229 if (ret < 0)
1230 return NULL;
1231
1232 ret = iscsit_tpg_add_portal_group(tiqn, tpg);
1233 if (ret != 0)
1234 goto out;
1235
1236 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1237 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
1238 name);
1239 return &tpg->tpg_se_tpg;
1240out:
1241 core_tpg_deregister(&tpg->tpg_se_tpg);
1242 kfree(tpg);
1243 return NULL;
1244}
1245
1246void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
1247{
1248 struct iscsi_portal_group *tpg;
1249 struct iscsi_tiqn *tiqn;
1250
1251 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
1252 tiqn = tpg->tpg_tiqn;
1253 /*
1254 * iscsit_tpg_del_portal_group() assumes force=1
1255 */
1256 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
1257 iscsit_tpg_del_portal_group(tiqn, tpg, 1);
1258}
1259
1260/* End items for lio_target_tiqn_cit */
1261
1262/* Start LIO-Target TIQN struct contig_item lio_target_cit */
1263
1264static ssize_t lio_target_wwn_show_attr_lio_version(
1265 struct target_fabric_configfs *tf,
1266 char *page)
1267{
1268 return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
1269}
1270
1271TF_WWN_ATTR_RO(lio_target, lio_version);
1272
1273static struct configfs_attribute *lio_target_wwn_attrs[] = {
1274 &lio_target_wwn_lio_version.attr,
1275 NULL,
1276};
1277
1278struct se_wwn *lio_target_call_coreaddtiqn(
1279 struct target_fabric_configfs *tf,
1280 struct config_group *group,
1281 const char *name)
1282{
1283 struct config_group *stats_cg;
1284 struct iscsi_tiqn *tiqn;
1285
1286 tiqn = iscsit_add_tiqn((unsigned char *)name);
1287 if (IS_ERR(tiqn))
1288 return ERR_PTR(PTR_ERR(tiqn));
1289 /*
1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1291 */
1292 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
1293
1294 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
1295 GFP_KERNEL);
1296 if (!stats_cg->default_groups) {
1297 pr_err("Unable to allocate memory for"
1298 " stats_cg->default_groups\n");
1299 iscsit_del_tiqn(tiqn);
1300 return ERR_PTR(-ENOMEM);
1301 }
1302
1303 stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
1304 stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
1305 stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
1306 stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
1307 stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
1308 stats_cg->default_groups[5] = NULL;
1309 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
1310 "iscsi_instance", &iscsi_stat_instance_cit);
1311 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
1312 "iscsi_sess_err", &iscsi_stat_sess_err_cit);
1313 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
1314 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
1315 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
1316 "iscsi_login_stats", &iscsi_stat_login_cit);
1317 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
1318 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1319
1320 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1321 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1322 " %s\n", name);
1323 return &tiqn->tiqn_wwn;
1324}
1325
1326void lio_target_call_coredeltiqn(
1327 struct se_wwn *wwn)
1328{
1329 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1330 struct config_item *df_item;
1331 struct config_group *stats_cg;
1332 int i;
1333
1334 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
1335 for (i = 0; stats_cg->default_groups[i]; i++) {
1336 df_item = &stats_cg->default_groups[i]->cg_item;
1337 stats_cg->default_groups[i] = NULL;
1338 config_item_put(df_item);
1339 }
1340 kfree(stats_cg->default_groups);
1341
1342 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
1343 tiqn->tiqn);
1344 iscsit_del_tiqn(tiqn);
1345}
1346
1347/* End LIO-Target TIQN struct contig_lio_target_cit */
1348
1349/* Start lio_target_discovery_auth_cit */
1350
1351#define DEF_DISC_AUTH_STR(name, flags) \
1352 __DEF_NACL_AUTH_STR(disc, name, flags) \
1353static ssize_t iscsi_disc_show_##name( \
1354 struct target_fabric_configfs *tf, \
1355 char *page) \
1356{ \
1357 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
1358 page); \
1359} \
1360static ssize_t iscsi_disc_store_##name( \
1361 struct target_fabric_configfs *tf, \
1362 const char *page, \
1363 size_t count) \
1364{ \
1365 return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
1366 page, count); \
1367}
1368
1369#define DEF_DISC_AUTH_INT(name) \
1370 __DEF_NACL_AUTH_INT(disc, name) \
1371static ssize_t iscsi_disc_show_##name( \
1372 struct target_fabric_configfs *tf, \
1373 char *page) \
1374{ \
1375 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
1376 page); \
1377}
1378
1379#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
1380#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
1381
1382/*
1383 * One-way authentication userid
1384 */
1385DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
1386DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
1387/*
1388 * One-way authentication password
1389 */
1390DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
1391DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
1392/*
1393 * Enforce mutual authentication
1394 */
1395DEF_DISC_AUTH_INT(authenticate_target);
1396DISC_AUTH_ATTR_RO(authenticate_target);
1397/*
1398 * Mutual authentication userid
1399 */
1400DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
1401DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
1402/*
1403 * Mutual authentication password
1404 */
1405DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
1406DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
1407
1408/*
1409 * enforce_discovery_auth
1410 */
1411static ssize_t iscsi_disc_show_enforce_discovery_auth(
1412 struct target_fabric_configfs *tf,
1413 char *page)
1414{
1415 struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
1416
1417 return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
1418}
1419
1420static ssize_t iscsi_disc_store_enforce_discovery_auth(
1421 struct target_fabric_configfs *tf,
1422 const char *page,
1423 size_t count)
1424{
1425 struct iscsi_param *param;
1426 struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
1427 char *endptr;
1428 u32 op;
1429
1430 op = simple_strtoul(page, &endptr, 0);
1431 if ((op != 1) && (op != 0)) {
1432 pr_err("Illegal value for enforce_discovery_auth:"
1433 " %u\n", op);
1434 return -EINVAL;
1435 }
1436
1437 if (!discovery_tpg) {
1438 pr_err("iscsit_global->discovery_tpg is NULL\n");
1439 return -EINVAL;
1440 }
1441
1442 param = iscsi_find_param_from_key(AUTHMETHOD,
1443 discovery_tpg->param_list);
1444 if (!param)
1445 return -EINVAL;
1446
1447 if (op) {
1448 /*
1449 * Reset the AuthMethod key to CHAP.
1450 */
1451 if (iscsi_update_param_value(param, CHAP) < 0)
1452 return -EINVAL;
1453
1454 discovery_tpg->tpg_attrib.authentication = 1;
1455 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
1456 pr_debug("LIO-CORE[0] Successfully enabled"
1457 " authentication enforcement for iSCSI"
1458 " Discovery TPG\n");
1459 } else {
1460 /*
1461 * Reset the AuthMethod key to CHAP,None
1462 */
1463 if (iscsi_update_param_value(param, "CHAP,None") < 0)
1464 return -EINVAL;
1465
1466 discovery_tpg->tpg_attrib.authentication = 0;
1467 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
1468 pr_debug("LIO-CORE[0] Successfully disabled"
1469 " authentication enforcement for iSCSI"
1470 " Discovery TPG\n");
1471 }
1472
1473 return count;
1474}
1475
1476DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
1477
1478static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
1479 &iscsi_disc_userid.attr,
1480 &iscsi_disc_password.attr,
1481 &iscsi_disc_authenticate_target.attr,
1482 &iscsi_disc_userid_mutual.attr,
1483 &iscsi_disc_password_mutual.attr,
1484 &iscsi_disc_enforce_discovery_auth.attr,
1485 NULL,
1486};
1487
1488/* End lio_target_discovery_auth_cit */
1489
1490/* Start functions for target_core_fabric_ops */
1491
1492static char *iscsi_get_fabric_name(void)
1493{
1494 return "iSCSI";
1495}
1496
1497static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
1498{
1499 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1500
1501 return cmd->init_task_tag;
1502}
1503
1504static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
1505{
1506 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1507
1508 return cmd->i_state;
1509}
1510
1511static int iscsi_is_state_remove(struct se_cmd *se_cmd)
1512{
1513 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1514
1515 return (cmd->i_state == ISTATE_REMOVE);
1516}
1517
1518static int lio_sess_logged_in(struct se_session *se_sess)
1519{
1520 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1521 int ret;
1522 /*
1523 * Called with spin_lock_bh(&tpg_lock); and
1524 * spin_lock(&se_tpg->session_lock); held.
1525 */
1526 spin_lock(&sess->conn_lock);
1527 ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
1528 spin_unlock(&sess->conn_lock);
1529
1530 return ret;
1531}
1532
1533static u32 lio_sess_get_index(struct se_session *se_sess)
1534{
1535 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1536
1537 return sess->session_index;
1538}
1539
1540static u32 lio_sess_get_initiator_sid(
1541 struct se_session *se_sess,
1542 unsigned char *buf,
1543 u32 size)
1544{
1545 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1546 /*
1547 * iSCSI Initiator Session Identifier from RFC-3720.
1548 */
1549 return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
1550 sess->isid[0], sess->isid[1], sess->isid[2],
1551 sess->isid[3], sess->isid[4], sess->isid[5]);
1552}
1553
1554static int lio_queue_data_in(struct se_cmd *se_cmd)
1555{
1556 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1557
1558 cmd->i_state = ISTATE_SEND_DATAIN;
1559 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1560 return 0;
1561}
1562
1563static int lio_write_pending(struct se_cmd *se_cmd)
1564{
1565 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1566
1567 if (!cmd->immediate_data && !cmd->unsolicited_data)
1568 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
1569
1570 return 0;
1571}
1572
1573static int lio_write_pending_status(struct se_cmd *se_cmd)
1574{
1575 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1576 int ret;
1577
1578 spin_lock_bh(&cmd->istate_lock);
1579 ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
1580 spin_unlock_bh(&cmd->istate_lock);
1581
1582 return ret;
1583}
1584
1585static int lio_queue_status(struct se_cmd *se_cmd)
1586{
1587 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1588
1589 cmd->i_state = ISTATE_SEND_STATUS;
1590 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1591 return 0;
1592}
1593
1594static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1595{
1596 unsigned char *buffer = se_cmd->sense_buffer;
1597 /*
1598 * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
1599 * 16-bit SenseLength.
1600 */
1601 buffer[0] = ((sense_length >> 8) & 0xff);
1602 buffer[1] = (sense_length & 0xff);
1603 /*
1604 * Return two byte offset into allocated sense_buffer.
1605 */
1606 return 2;
1607}
1608
1609static u16 lio_get_fabric_sense_len(void)
1610{
1611 /*
1612 * Return two byte offset into allocated sense_buffer.
1613 */
1614 return 2;
1615}
1616
1617static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
1618{
1619 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1620
1621 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1622 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1623 return 0;
1624}
1625
1626static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
1627{
1628 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1629
1630 return &tpg->tpg_tiqn->tiqn[0];
1631}
1632
1633static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
1634{
1635 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1636
1637 return tpg->tpgt;
1638}
1639
1640static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
1641{
1642 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1643
1644 return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
1645}
1646
1647static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
1648{
1649 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1650
1651 return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
1652}
1653
1654static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
1655{
1656 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1657
1658 return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
1659}
1660
1661static int lio_tpg_check_demo_mode_write_protect(
1662 struct se_portal_group *se_tpg)
1663{
1664 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1665
1666 return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
1667}
1668
1669static int lio_tpg_check_prod_mode_write_protect(
1670 struct se_portal_group *se_tpg)
1671{
1672 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1673
1674 return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
1675}
1676
1677static void lio_tpg_release_fabric_acl(
1678 struct se_portal_group *se_tpg,
1679 struct se_node_acl *se_acl)
1680{
1681 struct iscsi_node_acl *acl = container_of(se_acl,
1682 struct iscsi_node_acl, se_node_acl);
1683 kfree(acl);
1684}
1685
1686/*
1687 * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
1688 *
1689 * Also, this function calls iscsit_inc_session_usage_count() on the
1690 * struct iscsi_session in question.
1691 */
1692static int lio_tpg_shutdown_session(struct se_session *se_sess)
1693{
1694 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1695
1696 spin_lock(&sess->conn_lock);
1697 if (atomic_read(&sess->session_fall_back_to_erl0) ||
1698 atomic_read(&sess->session_logout) ||
1699 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1700 spin_unlock(&sess->conn_lock);
1701 return 0;
1702 }
1703 atomic_set(&sess->session_reinstatement, 1);
1704 spin_unlock(&sess->conn_lock);
1705
1706 iscsit_inc_session_usage_count(sess);
1707 iscsit_stop_time2retain_timer(sess);
1708
1709 return 1;
1710}
1711
1712/*
1713 * Calls iscsit_dec_session_usage_count() as inverse of
1714 * lio_tpg_shutdown_session()
1715 */
1716static void lio_tpg_close_session(struct se_session *se_sess)
1717{
1718 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1719 /*
1720 * If the iSCSI Session for the iSCSI Initiator Node exists,
1721 * forcefully shutdown the iSCSI NEXUS.
1722 */
1723 iscsit_stop_session(sess, 1, 1);
1724 iscsit_dec_session_usage_count(sess);
1725 iscsit_close_session(sess);
1726}
1727
1728static void lio_tpg_stop_session(
1729 struct se_session *se_sess,
1730 int sess_sleep,
1731 int conn_sleep)
1732{
1733 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1734
1735 iscsit_stop_session(sess, sess_sleep, conn_sleep);
1736}
1737
1738static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
1739{
1740 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1741
1742 iscsit_fall_back_to_erl0(sess);
1743}
1744
1745static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
1746{
1747 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1748
1749 return tpg->tpg_tiqn->tiqn_index;
1750}
1751
1752static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
1753{
1754 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
1755 se_node_acl);
1756
1757 ISCSI_NODE_ATTRIB(acl)->nacl = acl;
1758 iscsit_set_default_node_attribues(acl);
1759}
1760
1761static void lio_release_cmd(struct se_cmd *se_cmd)
1762{
1763 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1764
1765 iscsit_release_cmd(cmd);
1766}
1767
1768/* End functions for target_core_fabric_ops */
1769
1770int iscsi_target_register_configfs(void)
1771{
1772 struct target_fabric_configfs *fabric;
1773 int ret;
1774
1775 lio_target_fabric_configfs = NULL;
1776 fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
1777 if (IS_ERR(fabric)) {
1778 pr_err("target_fabric_configfs_init() for"
1779 " LIO-Target failed!\n");
1780 return PTR_ERR(fabric);
1781 }
1782 /*
1783 * Setup the fabric API of function pointers used by target_core_mod..
1784 */
1785 fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
1786 fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
1787 fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
1788 fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
1789 fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
1790 fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
1791 fabric->tf_ops.tpg_get_pr_transport_id_len =
1792 &iscsi_get_pr_transport_id_len;
1793 fabric->tf_ops.tpg_parse_pr_out_transport_id =
1794 &iscsi_parse_pr_out_transport_id;
1795 fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
1796 fabric->tf_ops.tpg_check_demo_mode_cache =
1797 &lio_tpg_check_demo_mode_cache;
1798 fabric->tf_ops.tpg_check_demo_mode_write_protect =
1799 &lio_tpg_check_demo_mode_write_protect;
1800 fabric->tf_ops.tpg_check_prod_mode_write_protect =
1801 &lio_tpg_check_prod_mode_write_protect;
1802 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
1803 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
1804 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
1805 fabric->tf_ops.release_cmd = &lio_release_cmd;
1806 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
1807 fabric->tf_ops.close_session = &lio_tpg_close_session;
1808 fabric->tf_ops.stop_session = &lio_tpg_stop_session;
1809 fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
1810 fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
1811 fabric->tf_ops.sess_get_index = &lio_sess_get_index;
1812 fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
1813 fabric->tf_ops.write_pending = &lio_write_pending;
1814 fabric->tf_ops.write_pending_status = &lio_write_pending_status;
1815 fabric->tf_ops.set_default_node_attributes =
1816 &lio_set_default_node_attributes;
1817 fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
1818 fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
1819 fabric->tf_ops.queue_data_in = &lio_queue_data_in;
1820 fabric->tf_ops.queue_status = &lio_queue_status;
1821 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
1822 fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
1823 fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
1824 fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
1825 /*
1826 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1827 */
1828 fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
1829 fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
1830 fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
1831 fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
1832 fabric->tf_ops.fabric_post_link = NULL;
1833 fabric->tf_ops.fabric_pre_unlink = NULL;
1834 fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
1835 fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
1836 fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
1837 fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
1838 /*
1839 * Setup default attribute lists for various fabric->tf_cit_tmpl
1840 * sturct config_item_type's
1841 */
1842 TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
1843 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
1844 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
1845 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
1846 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
1847 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
1848 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
1849 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
1850 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
1851 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
1852
1853 ret = target_fabric_configfs_register(fabric);
1854 if (ret < 0) {
1855 pr_err("target_fabric_configfs_register() for"
1856 " LIO-Target failed!\n");
1857 target_fabric_configfs_free(fabric);
1858 return ret;
1859 }
1860
1861 lio_target_fabric_configfs = fabric;
1862 pr_debug("LIO_TARGET[0] - Set fabric ->"
1863 " lio_target_fabric_configfs\n");
1864 return 0;
1865}
1866
1867
1868void iscsi_target_deregister_configfs(void)
1869{
1870 if (!lio_target_fabric_configfs)
1871 return;
1872 /*
1873 * Shutdown discovery sessions and disable discovery TPG
1874 */
1875 if (iscsit_global->discovery_tpg)
1876 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
1877
1878 target_fabric_configfs_deregister(lio_target_fabric_configfs);
1879 lio_target_fabric_configfs = NULL;
1880 pr_debug("LIO_TARGET[0] - Cleared"
1881 " lio_target_fabric_configfs\n");
1882}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
new file mode 100644
index 000000000000..8cd5a63c4edc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.h
@@ -0,0 +1,7 @@
1#ifndef ISCSI_TARGET_CONFIGFS_H
2#define ISCSI_TARGET_CONFIGFS_H
3
4extern int iscsi_target_register_configfs(void);
5extern void iscsi_target_deregister_configfs(void);
6
7#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
new file mode 100644
index 000000000000..470ed551eeb5
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,859 @@
1#ifndef ISCSI_TARGET_CORE_H
2#define ISCSI_TARGET_CORE_H
3
4#include <linux/in.h>
5#include <linux/configfs.h>
6#include <net/sock.h>
7#include <net/tcp.h>
8#include <scsi/scsi_cmnd.h>
9#include <scsi/iscsi_proto.h>
10#include <target/target_core_base.h>
11
12#define ISCSIT_VERSION "v4.1.0-rc1"
13#define ISCSI_MAX_DATASN_MISSING_COUNT 16
14#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
15#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
16#define SECONDS_FOR_ASYNC_LOGOUT 10
17#define SECONDS_FOR_ASYNC_TEXT 10
18#define SECONDS_FOR_LOGOUT_COMP 15
19#define WHITE_SPACE " \t\v\f\n\r"
20
21/* struct iscsi_node_attrib sanity values */
22#define NA_DATAOUT_TIMEOUT 3
23#define NA_DATAOUT_TIMEOUT_MAX 60
24#define NA_DATAOUT_TIMEOUT_MIX 2
25#define NA_DATAOUT_TIMEOUT_RETRIES 5
26#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
27#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
28#define NA_NOPIN_TIMEOUT 5
29#define NA_NOPIN_TIMEOUT_MAX 60
30#define NA_NOPIN_TIMEOUT_MIN 3
31#define NA_NOPIN_RESPONSE_TIMEOUT 5
32#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
33#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
34#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
35#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
36#define NA_RANDOM_R2T_OFFSETS 0
37#define NA_DEFAULT_ERL 0
38#define NA_DEFAULT_ERL_MAX 2
39#define NA_DEFAULT_ERL_MIN 0
40
41/* struct iscsi_tpg_attrib sanity values */
42#define TA_AUTHENTICATION 1
43#define TA_LOGIN_TIMEOUT 15
44#define TA_LOGIN_TIMEOUT_MAX 30
45#define TA_LOGIN_TIMEOUT_MIN 5
46#define TA_NETIF_TIMEOUT 2
47#define TA_NETIF_TIMEOUT_MAX 15
48#define TA_NETIF_TIMEOUT_MIN 2
49#define TA_GENERATE_NODE_ACLS 0
50#define TA_DEFAULT_CMDSN_DEPTH 16
51#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
52#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
53#define TA_CACHE_DYNAMIC_ACLS 0
54/* Enabled by default in demo mode (generic_node_acls=1) */
55#define TA_DEMO_MODE_WRITE_PROTECT 1
56/* Disabled by default in production mode w/ explict ACLs */
57#define TA_PROD_MODE_WRITE_PROTECT 0
58#define TA_CACHE_CORE_NPS 0
59
60enum tpg_np_network_transport_table {
61 ISCSI_TCP = 0,
62 ISCSI_SCTP_TCP = 1,
63 ISCSI_SCTP_UDP = 2,
64 ISCSI_IWARP_TCP = 3,
65 ISCSI_IWARP_SCTP = 4,
66 ISCSI_INFINIBAND = 5,
67};
68
69/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
70enum target_conn_state_table {
71 TARG_CONN_STATE_FREE = 0x1,
72 TARG_CONN_STATE_XPT_UP = 0x3,
73 TARG_CONN_STATE_IN_LOGIN = 0x4,
74 TARG_CONN_STATE_LOGGED_IN = 0x5,
75 TARG_CONN_STATE_IN_LOGOUT = 0x6,
76 TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
77 TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
78};
79
80/* RFC-3720 7.3.2 Session State Diagram for a Target */
81enum target_sess_state_table {
82 TARG_SESS_STATE_FREE = 0x1,
83 TARG_SESS_STATE_ACTIVE = 0x2,
84 TARG_SESS_STATE_LOGGED_IN = 0x3,
85 TARG_SESS_STATE_FAILED = 0x4,
86 TARG_SESS_STATE_IN_CONTINUE = 0x5,
87};
88
89/* struct iscsi_data_count->type */
90enum data_count_type {
91 ISCSI_RX_DATA = 1,
92 ISCSI_TX_DATA = 2,
93};
94
95/* struct iscsi_datain_req->dr_complete */
96enum datain_req_comp_table {
97 DATAIN_COMPLETE_NORMAL = 1,
98 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
99 DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
100};
101
102/* struct iscsi_datain_req->recovery */
103enum datain_req_rec_table {
104 DATAIN_WITHIN_COMMAND_RECOVERY = 1,
105 DATAIN_CONNECTION_RECOVERY = 2,
106};
107
108/* struct iscsi_portal_group->state */
109enum tpg_state_table {
110 TPG_STATE_FREE = 0,
111 TPG_STATE_ACTIVE = 1,
112 TPG_STATE_INACTIVE = 2,
113 TPG_STATE_COLD_RESET = 3,
114};
115
116/* struct iscsi_tiqn->tiqn_state */
117enum tiqn_state_table {
118 TIQN_STATE_ACTIVE = 1,
119 TIQN_STATE_SHUTDOWN = 2,
120};
121
122/* struct iscsi_cmd->cmd_flags */
123enum cmd_flags_table {
124 ICF_GOT_LAST_DATAOUT = 0x00000001,
125 ICF_GOT_DATACK_SNACK = 0x00000002,
126 ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
127 ICF_SENT_LAST_R2T = 0x00000008,
128 ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
129 ICF_CONTIG_MEMORY = 0x00000020,
130 ICF_ATTACHED_TO_RQUEUE = 0x00000040,
131 ICF_OOO_CMDSN = 0x00000080,
132 ICF_REJECT_FAIL_CONN = 0x00000100,
133};
134
135/* struct iscsi_cmd->i_state */
136enum cmd_i_state_table {
137 ISTATE_NO_STATE = 0,
138 ISTATE_NEW_CMD = 1,
139 ISTATE_DEFERRED_CMD = 2,
140 ISTATE_UNSOLICITED_DATA = 3,
141 ISTATE_RECEIVE_DATAOUT = 4,
142 ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
143 ISTATE_RECEIVED_LAST_DATAOUT = 6,
144 ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
145 ISTATE_IN_CONNECTION_RECOVERY = 8,
146 ISTATE_RECEIVED_TASKMGT = 9,
147 ISTATE_SEND_ASYNCMSG = 10,
148 ISTATE_SENT_ASYNCMSG = 11,
149 ISTATE_SEND_DATAIN = 12,
150 ISTATE_SEND_LAST_DATAIN = 13,
151 ISTATE_SENT_LAST_DATAIN = 14,
152 ISTATE_SEND_LOGOUTRSP = 15,
153 ISTATE_SENT_LOGOUTRSP = 16,
154 ISTATE_SEND_NOPIN = 17,
155 ISTATE_SENT_NOPIN = 18,
156 ISTATE_SEND_REJECT = 19,
157 ISTATE_SENT_REJECT = 20,
158 ISTATE_SEND_R2T = 21,
159 ISTATE_SENT_R2T = 22,
160 ISTATE_SEND_R2T_RECOVERY = 23,
161 ISTATE_SENT_R2T_RECOVERY = 24,
162 ISTATE_SEND_LAST_R2T = 25,
163 ISTATE_SENT_LAST_R2T = 26,
164 ISTATE_SEND_LAST_R2T_RECOVERY = 27,
165 ISTATE_SENT_LAST_R2T_RECOVERY = 28,
166 ISTATE_SEND_STATUS = 29,
167 ISTATE_SEND_STATUS_BROKEN_PC = 30,
168 ISTATE_SENT_STATUS = 31,
169 ISTATE_SEND_STATUS_RECOVERY = 32,
170 ISTATE_SENT_STATUS_RECOVERY = 33,
171 ISTATE_SEND_TASKMGTRSP = 34,
172 ISTATE_SENT_TASKMGTRSP = 35,
173 ISTATE_SEND_TEXTRSP = 36,
174 ISTATE_SENT_TEXTRSP = 37,
175 ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
176 ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
177 ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
178 ISTATE_REMOVE = 41,
179 ISTATE_FREE = 42,
180};
181
182/* Used for iscsi_recover_cmdsn() return values */
183enum recover_cmdsn_ret_table {
184 CMDSN_ERROR_CANNOT_RECOVER = -1,
185 CMDSN_NORMAL_OPERATION = 0,
186 CMDSN_LOWER_THAN_EXP = 1,
187 CMDSN_HIGHER_THAN_EXP = 2,
188};
189
190/* Used for iscsi_handle_immediate_data() return values */
191enum immedate_data_ret_table {
192 IMMEDIATE_DATA_CANNOT_RECOVER = -1,
193 IMMEDIATE_DATA_NORMAL_OPERATION = 0,
194 IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
195};
196
197/* Used for iscsi_decide_dataout_action() return values */
198enum dataout_action_ret_table {
199 DATAOUT_CANNOT_RECOVER = -1,
200 DATAOUT_NORMAL = 0,
201 DATAOUT_SEND_R2T = 1,
202 DATAOUT_SEND_TO_TRANSPORT = 2,
203 DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
204};
205
206/* Used for struct iscsi_node_auth->naf_flags */
207enum naf_flags_table {
208 NAF_USERID_SET = 0x01,
209 NAF_PASSWORD_SET = 0x02,
210 NAF_USERID_IN_SET = 0x04,
211 NAF_PASSWORD_IN_SET = 0x08,
212};
213
214/* Used by various struct timer_list to manage iSCSI specific state */
215enum iscsi_timer_flags_table {
216 ISCSI_TF_RUNNING = 0x01,
217 ISCSI_TF_STOP = 0x02,
218 ISCSI_TF_EXPIRED = 0x04,
219};
220
221/* Used for struct iscsi_np->np_flags */
222enum np_flags_table {
223 NPF_IP_NETWORK = 0x00,
224 NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
225};
226
227/* Used for struct iscsi_np->np_thread_state */
228enum np_thread_state_table {
229 ISCSI_NP_THREAD_ACTIVE = 1,
230 ISCSI_NP_THREAD_INACTIVE = 2,
231 ISCSI_NP_THREAD_RESET = 3,
232 ISCSI_NP_THREAD_SHUTDOWN = 4,
233 ISCSI_NP_THREAD_EXIT = 5,
234};
235
236struct iscsi_conn_ops {
237 u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
238 u8 DataDigest; /* [0,1] == [None,CRC32C] */
239 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
240 u8 OFMarker; /* [0,1] == [No,Yes] */
241 u8 IFMarker; /* [0,1] == [No,Yes] */
242 u32 OFMarkInt; /* [1..65535] */
243 u32 IFMarkInt; /* [1..65535] */
244};
245
246struct iscsi_sess_ops {
247 char InitiatorName[224];
248 char InitiatorAlias[256];
249 char TargetName[224];
250 char TargetAlias[256];
251 char TargetAddress[256];
252 u16 TargetPortalGroupTag; /* [0..65535] */
253 u16 MaxConnections; /* [1..65535] */
254 u8 InitialR2T; /* [0,1] == [No,Yes] */
255 u8 ImmediateData; /* [0,1] == [No,Yes] */
256 u32 MaxBurstLength; /* [512..2**24-1] */
257 u32 FirstBurstLength; /* [512..2**24-1] */
258 u16 DefaultTime2Wait; /* [0..3600] */
259 u16 DefaultTime2Retain; /* [0..3600] */
260 u16 MaxOutstandingR2T; /* [1..65535] */
261 u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
262 u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
263 u8 ErrorRecoveryLevel; /* [0..2] */
264 u8 SessionType; /* [0,1] == [Normal,Discovery]*/
265};
266
267struct iscsi_queue_req {
268 int state;
269 struct iscsi_cmd *cmd;
270 struct list_head qr_list;
271};
272
273struct iscsi_data_count {
274 int data_length;
275 int sync_and_steering;
276 enum data_count_type type;
277 u32 iov_count;
278 u32 ss_iov_count;
279 u32 ss_marker_count;
280 struct kvec *iov;
281};
282
283struct iscsi_param_list {
284 struct list_head param_list;
285 struct list_head extra_response_list;
286};
287
288struct iscsi_datain_req {
289 enum datain_req_comp_table dr_complete;
290 int generate_recovery_values;
291 enum datain_req_rec_table recovery;
292 u32 begrun;
293 u32 runlength;
294 u32 data_length;
295 u32 data_offset;
296 u32 data_offset_end;
297 u32 data_sn;
298 u32 next_burst_len;
299 u32 read_data_done;
300 u32 seq_send_order;
301 struct list_head dr_list;
302} ____cacheline_aligned;
303
304struct iscsi_ooo_cmdsn {
305 u16 cid;
306 u32 batch_count;
307 u32 cmdsn;
308 u32 exp_cmdsn;
309 struct iscsi_cmd *cmd;
310 struct list_head ooo_list;
311} ____cacheline_aligned;
312
313struct iscsi_datain {
314 u8 flags;
315 u32 data_sn;
316 u32 length;
317 u32 offset;
318} ____cacheline_aligned;
319
320struct iscsi_r2t {
321 int seq_complete;
322 int recovery_r2t;
323 int sent_r2t;
324 u32 r2t_sn;
325 u32 offset;
326 u32 targ_xfer_tag;
327 u32 xfer_len;
328 struct list_head r2t_list;
329} ____cacheline_aligned;
330
331struct iscsi_cmd {
332 enum iscsi_timer_flags_table dataout_timer_flags;
333 /* DataOUT timeout retries */
334 u8 dataout_timeout_retries;
335 /* Within command recovery count */
336 u8 error_recovery_count;
337 /* iSCSI dependent state for out or order CmdSNs */
338 enum cmd_i_state_table deferred_i_state;
339 /* iSCSI dependent state */
340 enum cmd_i_state_table i_state;
341 /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
342 u8 immediate_cmd;
343 /* Immediate data present */
344 u8 immediate_data;
345 /* iSCSI Opcode */
346 u8 iscsi_opcode;
347 /* iSCSI Response Code */
348 u8 iscsi_response;
349 /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
350 u8 logout_reason;
351 /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
352 u8 logout_response;
353 /* MaxCmdSN has been incremented */
354 u8 maxcmdsn_inc;
355 /* Immediate Unsolicited Dataout */
356 u8 unsolicited_data;
357 /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
358 u16 logout_cid;
359 /* Command flags */
360 enum cmd_flags_table cmd_flags;
361 /* Initiator Task Tag assigned from Initiator */
362 u32 init_task_tag;
363 /* Target Transfer Tag assigned from Target */
364 u32 targ_xfer_tag;
365 /* CmdSN assigned from Initiator */
366 u32 cmd_sn;
367 /* ExpStatSN assigned from Initiator */
368 u32 exp_stat_sn;
369 /* StatSN assigned to this ITT */
370 u32 stat_sn;
371 /* DataSN Counter */
372 u32 data_sn;
373 /* R2TSN Counter */
374 u32 r2t_sn;
375 /* Last DataSN acknowledged via DataAck SNACK */
376 u32 acked_data_sn;
377 /* Used for echoing NOPOUT ping data */
378 u32 buf_ptr_size;
379 /* Used to store DataDigest */
380 u32 data_crc;
381 /* Total size in bytes associated with command */
382 u32 data_length;
383 /* Counter for MaxOutstandingR2T */
384 u32 outstanding_r2ts;
385 /* Next R2T Offset when DataSequenceInOrder=Yes */
386 u32 r2t_offset;
387 /* Iovec current and orig count for iscsi_cmd->iov_data */
388 u32 iov_data_count;
389 u32 orig_iov_data_count;
390 /* Number of miscellaneous iovecs used for IP stack calls */
391 u32 iov_misc_count;
392 /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
393 u32 pdu_count;
394 /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
395 u32 pdu_send_order;
396 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
397 u32 pdu_start;
398 u32 residual_count;
399 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
400 u32 seq_send_order;
401 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
402 u32 seq_count;
403 /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
404 u32 seq_no;
405 /* Lowest offset in current DataOUT sequence */
406 u32 seq_start_offset;
407 /* Highest offset in current DataOUT sequence */
408 u32 seq_end_offset;
409 /* Total size in bytes received so far of READ data */
410 u32 read_data_done;
411 /* Total size in bytes received so far of WRITE data */
412 u32 write_data_done;
413 /* Counter for FirstBurstLength key */
414 u32 first_burst_len;
415 /* Counter for MaxBurstLength key */
416 u32 next_burst_len;
417 /* Transfer size used for IP stack calls */
418 u32 tx_size;
419 /* Buffer used for various purposes */
420 void *buf_ptr;
421 /* See include/linux/dma-mapping.h */
422 enum dma_data_direction data_direction;
423 /* iSCSI PDU Header + CRC */
424 unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
425 /* Number of times struct iscsi_cmd is present in immediate queue */
426 atomic_t immed_queue_count;
427 atomic_t response_queue_count;
428 atomic_t transport_sent;
429 spinlock_t datain_lock;
430 spinlock_t dataout_timeout_lock;
431 /* spinlock for protecting struct iscsi_cmd->i_state */
432 spinlock_t istate_lock;
433 /* spinlock for adding within command recovery entries */
434 spinlock_t error_lock;
435 /* spinlock for adding R2Ts */
436 spinlock_t r2t_lock;
437 /* DataIN List */
438 struct list_head datain_list;
439 /* R2T List */
440 struct list_head cmd_r2t_list;
441 struct completion reject_comp;
442 /* Timer for DataOUT */
443 struct timer_list dataout_timer;
444 /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
445 struct kvec *iov_data;
446 /* Iovecs for miscellaneous purposes */
447#define ISCSI_MISC_IOVECS 5
448 struct kvec iov_misc[ISCSI_MISC_IOVECS];
449 /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
450 struct iscsi_pdu *pdu_list;
451 /* Current struct iscsi_pdu used for DataPDUInOrder=No */
452 struct iscsi_pdu *pdu_ptr;
453 /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
454 struct iscsi_seq *seq_list;
455 /* Current struct iscsi_seq used for DataSequenceInOrder=No */
456 struct iscsi_seq *seq_ptr;
457 /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
458 struct iscsi_tmr_req *tmr_req;
459 /* Connection this command is alligient to */
460 struct iscsi_conn *conn;
461 /* Pointer to connection recovery entry */
462 struct iscsi_conn_recovery *cr;
463 /* Session the command is part of, used for connection recovery */
464 struct iscsi_session *sess;
465 /* list_head for connection list */
466 struct list_head i_list;
467 /* The TCM I/O descriptor that is accessed via container_of() */
468 struct se_cmd se_cmd;
469 /* Sense buffer that will be mapped into outgoing status */
470#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
471 unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
472
473 struct scatterlist *t_mem_sg;
474 u32 t_mem_sg_nents;
475
476 u32 padding;
477 u8 pad_bytes[4];
478
479 struct scatterlist *first_data_sg;
480 u32 first_data_sg_off;
481 u32 kmapped_nents;
482
483} ____cacheline_aligned;
484
485struct iscsi_tmr_req {
486 bool task_reassign:1;
487 u32 ref_cmd_sn;
488 u32 exp_data_sn;
489 struct iscsi_conn_recovery *conn_recovery;
490 struct se_tmr_req *se_tmr_req;
491};
492
493struct iscsi_conn {
494 /* Authentication Successful for this connection */
495 u8 auth_complete;
496 /* State connection is currently in */
497 u8 conn_state;
498 u8 conn_logout_reason;
499 u8 network_transport;
500 enum iscsi_timer_flags_table nopin_timer_flags;
501 enum iscsi_timer_flags_table nopin_response_timer_flags;
502 u8 tx_immediate_queue;
503 u8 tx_response_queue;
504 /* Used to know what thread encountered a transport failure */
505 u8 which_thread;
506 /* connection id assigned by the Initiator */
507 u16 cid;
508 /* Remote TCP Port */
509 u16 login_port;
510 int net_size;
511 u32 auth_id;
512#define CONNFLAG_SCTP_STRUCT_FILE 0x01
513 u32 conn_flags;
514 /* Used for iscsi_tx_login_rsp() */
515 u32 login_itt;
516 u32 exp_statsn;
517 /* Per connection status sequence number */
518 u32 stat_sn;
519 /* IFMarkInt's Current Value */
520 u32 if_marker;
521 /* OFMarkInt's Current Value */
522 u32 of_marker;
523 /* Used for calculating OFMarker offset to next PDU */
524 u32 of_marker_offset;
525 /* Complete Bad PDU for sending reject */
526 unsigned char bad_hdr[ISCSI_HDR_LEN];
527#define IPV6_ADDRESS_SPACE 48
528 unsigned char login_ip[IPV6_ADDRESS_SPACE];
529 int conn_usage_count;
530 int conn_waiting_on_uc;
531 atomic_t check_immediate_queue;
532 atomic_t conn_logout_remove;
533 atomic_t connection_exit;
534 atomic_t connection_recovery;
535 atomic_t connection_reinstatement;
536 atomic_t connection_wait;
537 atomic_t connection_wait_rcfr;
538 atomic_t sleep_on_conn_wait_comp;
539 atomic_t transport_failed;
540 struct completion conn_post_wait_comp;
541 struct completion conn_wait_comp;
542 struct completion conn_wait_rcfr_comp;
543 struct completion conn_waiting_on_uc_comp;
544 struct completion conn_logout_comp;
545 struct completion tx_half_close_comp;
546 struct completion rx_half_close_comp;
547 /* socket used by this connection */
548 struct socket *sock;
549 struct timer_list nopin_timer;
550 struct timer_list nopin_response_timer;
551 struct timer_list transport_timer;
552 /* Spinlock used for add/deleting cmd's from conn_cmd_list */
553 spinlock_t cmd_lock;
554 spinlock_t conn_usage_lock;
555 spinlock_t immed_queue_lock;
556 spinlock_t nopin_timer_lock;
557 spinlock_t response_queue_lock;
558 spinlock_t state_lock;
559 /* libcrypto RX and TX contexts for crc32c */
560 struct hash_desc conn_rx_hash;
561 struct hash_desc conn_tx_hash;
562 /* Used for scheduling TX and RX connection kthreads */
563 cpumask_var_t conn_cpumask;
564 int conn_rx_reset_cpumask:1;
565 int conn_tx_reset_cpumask:1;
566 /* list_head of struct iscsi_cmd for this connection */
567 struct list_head conn_cmd_list;
568 struct list_head immed_queue_list;
569 struct list_head response_queue_list;
570 struct iscsi_conn_ops *conn_ops;
571 struct iscsi_param_list *param_list;
572 /* Used for per connection auth state machine */
573 void *auth_protocol;
574 struct iscsi_login_thread_s *login_thread;
575 struct iscsi_portal_group *tpg;
576 /* Pointer to parent session */
577 struct iscsi_session *sess;
578 /* Pointer to thread_set in use for this conn's threads */
579 struct iscsi_thread_set *thread_set;
580 /* list_head for session connection list */
581 struct list_head conn_list;
582} ____cacheline_aligned;
583
584struct iscsi_conn_recovery {
585 u16 cid;
586 u32 cmd_count;
587 u32 maxrecvdatasegmentlength;
588 int ready_for_reallegiance;
589 struct list_head conn_recovery_cmd_list;
590 spinlock_t conn_recovery_cmd_lock;
591 struct timer_list time2retain_timer;
592 struct iscsi_session *sess;
593 struct list_head cr_list;
594} ____cacheline_aligned;
595
596struct iscsi_session {
597 u8 initiator_vendor;
598 u8 isid[6];
599 enum iscsi_timer_flags_table time2retain_timer_flags;
600 u8 version_active;
601 u16 cid_called;
602 u16 conn_recovery_count;
603 u16 tsih;
604 /* state session is currently in */
605 u32 session_state;
606 /* session wide counter: initiator assigned task tag */
607 u32 init_task_tag;
608 /* session wide counter: target assigned task tag */
609 u32 targ_xfer_tag;
610 u32 cmdsn_window;
611
612 /* protects cmdsn values */
613 struct mutex cmdsn_mutex;
614 /* session wide counter: expected command sequence number */
615 u32 exp_cmd_sn;
616 /* session wide counter: maximum allowed command sequence number */
617 u32 max_cmd_sn;
618 struct list_head sess_ooo_cmdsn_list;
619
620 /* LIO specific session ID */
621 u32 sid;
622 char auth_type[8];
623 /* unique within the target */
624 int session_index;
625 /* Used for session reference counting */
626 int session_usage_count;
627 int session_waiting_on_uc;
628 u32 cmd_pdus;
629 u32 rsp_pdus;
630 u64 tx_data_octets;
631 u64 rx_data_octets;
632 u32 conn_digest_errors;
633 u32 conn_timeout_errors;
634 u64 creation_time;
635 spinlock_t session_stats_lock;
636 /* Number of active connections */
637 atomic_t nconn;
638 atomic_t session_continuation;
639 atomic_t session_fall_back_to_erl0;
640 atomic_t session_logout;
641 atomic_t session_reinstatement;
642 atomic_t session_stop_active;
643 atomic_t sleep_on_sess_wait_comp;
644 atomic_t transport_wait_cmds;
645 /* connection list */
646 struct list_head sess_conn_list;
647 struct list_head cr_active_list;
648 struct list_head cr_inactive_list;
649 spinlock_t conn_lock;
650 spinlock_t cr_a_lock;
651 spinlock_t cr_i_lock;
652 spinlock_t session_usage_lock;
653 spinlock_t ttt_lock;
654 struct completion async_msg_comp;
655 struct completion reinstatement_comp;
656 struct completion session_wait_comp;
657 struct completion session_waiting_on_uc_comp;
658 struct timer_list time2retain_timer;
659 struct iscsi_sess_ops *sess_ops;
660 struct se_session *se_sess;
661 struct iscsi_portal_group *tpg;
662} ____cacheline_aligned;
663
664struct iscsi_login {
665 u8 auth_complete;
666 u8 checked_for_existing;
667 u8 current_stage;
668 u8 leading_connection;
669 u8 first_request;
670 u8 version_min;
671 u8 version_max;
672 char isid[6];
673 u32 cmd_sn;
674 u32 init_task_tag;
675 u32 initial_exp_statsn;
676 u32 rsp_length;
677 u16 cid;
678 u16 tsih;
679 char *req;
680 char *rsp;
681 char *req_buf;
682 char *rsp_buf;
683} ____cacheline_aligned;
684
685struct iscsi_node_attrib {
686 u32 dataout_timeout;
687 u32 dataout_timeout_retries;
688 u32 default_erl;
689 u32 nopin_timeout;
690 u32 nopin_response_timeout;
691 u32 random_datain_pdu_offsets;
692 u32 random_datain_seq_offsets;
693 u32 random_r2t_offsets;
694 u32 tmr_cold_reset;
695 u32 tmr_warm_reset;
696 struct iscsi_node_acl *nacl;
697};
698
699struct se_dev_entry_s;
700
701struct iscsi_node_auth {
702 enum naf_flags_table naf_flags;
703 int authenticate_target;
704 /* Used for iscsit_global->discovery_auth,
705 * set to zero (auth disabled) by default */
706 int enforce_discovery_auth;
707#define MAX_USER_LEN 256
708#define MAX_PASS_LEN 256
709 char userid[MAX_USER_LEN];
710 char password[MAX_PASS_LEN];
711 char userid_mutual[MAX_USER_LEN];
712 char password_mutual[MAX_PASS_LEN];
713};
714
715#include "iscsi_target_stat.h"
716
717struct iscsi_node_stat_grps {
718 struct config_group iscsi_sess_stats_group;
719 struct config_group iscsi_conn_stats_group;
720};
721
722struct iscsi_node_acl {
723 struct iscsi_node_attrib node_attrib;
724 struct iscsi_node_auth node_auth;
725 struct iscsi_node_stat_grps node_stat_grps;
726 struct se_node_acl se_node_acl;
727};
728
729#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
730
731#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
732#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
733
734struct iscsi_tpg_attrib {
735 u32 authentication;
736 u32 login_timeout;
737 u32 netif_timeout;
738 u32 generate_node_acls;
739 u32 cache_dynamic_acls;
740 u32 default_cmdsn_depth;
741 u32 demo_mode_write_protect;
742 u32 prod_mode_write_protect;
743 struct iscsi_portal_group *tpg;
744};
745
746struct iscsi_np {
747 int np_network_transport;
748 int np_ip_proto;
749 int np_sock_type;
750 enum np_thread_state_table np_thread_state;
751 enum iscsi_timer_flags_table np_login_timer_flags;
752 u32 np_exports;
753 enum np_flags_table np_flags;
754 unsigned char np_ip[IPV6_ADDRESS_SPACE];
755 u16 np_port;
756 spinlock_t np_thread_lock;
757 struct completion np_restart_comp;
758 struct socket *np_socket;
759 struct __kernel_sockaddr_storage np_sockaddr;
760 struct task_struct *np_thread;
761 struct timer_list np_login_timer;
762 struct iscsi_portal_group *np_login_tpg;
763 struct list_head np_list;
764} ____cacheline_aligned;
765
766struct iscsi_tpg_np {
767 struct iscsi_np *tpg_np;
768 struct iscsi_portal_group *tpg;
769 struct iscsi_tpg_np *tpg_np_parent;
770 struct list_head tpg_np_list;
771 struct list_head tpg_np_child_list;
772 struct list_head tpg_np_parent_list;
773 struct se_tpg_np se_tpg_np;
774 spinlock_t tpg_np_parent_lock;
775};
776
777struct iscsi_portal_group {
778 unsigned char tpg_chap_id;
779 /* TPG State */
780 enum tpg_state_table tpg_state;
781 /* Target Portal Group Tag */
782 u16 tpgt;
783 /* Id assigned to target sessions */
784 u16 ntsih;
785 /* Number of active sessions */
786 u32 nsessions;
787 /* Number of Network Portals available for this TPG */
788 u32 num_tpg_nps;
789 /* Per TPG LIO specific session ID. */
790 u32 sid;
791 /* Spinlock for adding/removing Network Portals */
792 spinlock_t tpg_np_lock;
793 spinlock_t tpg_state_lock;
794 struct se_portal_group tpg_se_tpg;
795 struct mutex tpg_access_lock;
796 struct mutex np_login_lock;
797 struct iscsi_tpg_attrib tpg_attrib;
798 /* Pointer to default list of iSCSI parameters for TPG */
799 struct iscsi_param_list *param_list;
800 struct iscsi_tiqn *tpg_tiqn;
801 struct list_head tpg_gnp_list;
802 struct list_head tpg_list;
803} ____cacheline_aligned;
804
805#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
806#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
807#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
808#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
809#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
810
811struct iscsi_wwn_stat_grps {
812 struct config_group iscsi_stat_group;
813 struct config_group iscsi_instance_group;
814 struct config_group iscsi_sess_err_group;
815 struct config_group iscsi_tgt_attr_group;
816 struct config_group iscsi_login_stats_group;
817 struct config_group iscsi_logout_stats_group;
818};
819
820struct iscsi_tiqn {
821#define ISCSI_IQN_LEN 224
822 unsigned char tiqn[ISCSI_IQN_LEN];
823 enum tiqn_state_table tiqn_state;
824 int tiqn_access_count;
825 u32 tiqn_active_tpgs;
826 u32 tiqn_ntpgs;
827 u32 tiqn_num_tpg_nps;
828 u32 tiqn_nsessions;
829 struct list_head tiqn_list;
830 struct list_head tiqn_tpg_list;
831 spinlock_t tiqn_state_lock;
832 spinlock_t tiqn_tpg_lock;
833 struct se_wwn tiqn_wwn;
834 struct iscsi_wwn_stat_grps tiqn_stat_grps;
835 int tiqn_index;
836 struct iscsi_sess_err_stats sess_err_stats;
837 struct iscsi_login_stats login_stats;
838 struct iscsi_logout_stats logout_stats;
839} ____cacheline_aligned;
840
841#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
842
843struct iscsit_global {
844 /* In core shutdown */
845 u32 in_shutdown;
846 u32 active_ts;
847 /* Unique identifier used for the authentication daemon */
848 u32 auth_id;
849 u32 inactive_ts;
850 /* Thread Set bitmap count */
851 int ts_bitmap_count;
852 /* Thread Set bitmap pointer */
853 unsigned long *ts_bitmap;
854 /* Used for iSCSI discovery session authentication */
855 struct iscsi_node_acl discovery_acl;
856 struct iscsi_portal_group *discovery_tpg;
857};
858
859#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 000000000000..8c0495129513
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,531 @@
1/*******************************************************************************
2 * This file contains the iSCSI Target DataIN value generation functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <scsi/iscsi_proto.h>
22
23#include "iscsi_target_core.h"
24#include "iscsi_target_seq_pdu_list.h"
25#include "iscsi_target_erl1.h"
26#include "iscsi_target_util.h"
27#include "iscsi_target.h"
28#include "iscsi_target_datain_values.h"
29
30struct iscsi_datain_req *iscsit_allocate_datain_req(void)
31{
32 struct iscsi_datain_req *dr;
33
34 dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
35 if (!dr) {
36 pr_err("Unable to allocate memory for"
37 " struct iscsi_datain_req\n");
38 return NULL;
39 }
40 INIT_LIST_HEAD(&dr->dr_list);
41
42 return dr;
43}
44
45void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
46{
47 spin_lock(&cmd->datain_lock);
48 list_add_tail(&dr->dr_list, &cmd->datain_list);
49 spin_unlock(&cmd->datain_lock);
50}
51
52void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
53{
54 spin_lock(&cmd->datain_lock);
55 list_del(&dr->dr_list);
56 spin_unlock(&cmd->datain_lock);
57
58 kmem_cache_free(lio_dr_cache, dr);
59}
60
61void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
62{
63 struct iscsi_datain_req *dr, *dr_tmp;
64
65 spin_lock(&cmd->datain_lock);
66 list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
67 list_del(&dr->dr_list);
68 kmem_cache_free(lio_dr_cache, dr);
69 }
70 spin_unlock(&cmd->datain_lock);
71}
72
73struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
74{
75 struct iscsi_datain_req *dr;
76
77 if (list_empty(&cmd->datain_list)) {
78 pr_err("cmd->datain_list is empty for ITT:"
79 " 0x%08x\n", cmd->init_task_tag);
80 return NULL;
81 }
82 list_for_each_entry(dr, &cmd->datain_list, dr_list)
83 break;
84
85 return dr;
86}
87
88/*
89 * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
90 */
91static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
92 struct iscsi_cmd *cmd,
93 struct iscsi_datain *datain)
94{
95 u32 next_burst_len, read_data_done, read_data_left;
96 struct iscsi_conn *conn = cmd->conn;
97 struct iscsi_datain_req *dr;
98
99 dr = iscsit_get_datain_req(cmd);
100 if (!dr)
101 return NULL;
102
103 if (dr->recovery && dr->generate_recovery_values) {
104 if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
105 cmd, dr) < 0)
106 return NULL;
107
108 dr->generate_recovery_values = 0;
109 }
110
111 next_burst_len = (!dr->recovery) ?
112 cmd->next_burst_len : dr->next_burst_len;
113 read_data_done = (!dr->recovery) ?
114 cmd->read_data_done : dr->read_data_done;
115
116 read_data_left = (cmd->data_length - read_data_done);
117 if (!read_data_left) {
118 pr_err("ITT: 0x%08x read_data_left is zero!\n",
119 cmd->init_task_tag);
120 return NULL;
121 }
122
123 if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
124 (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
125 next_burst_len))) {
126 datain->length = read_data_left;
127
128 datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
129 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
130 datain->flags |= ISCSI_FLAG_DATA_ACK;
131 } else {
132 if ((next_burst_len +
133 conn->conn_ops->MaxRecvDataSegmentLength) <
134 conn->sess->sess_ops->MaxBurstLength) {
135 datain->length =
136 conn->conn_ops->MaxRecvDataSegmentLength;
137 next_burst_len += datain->length;
138 } else {
139 datain->length = (conn->sess->sess_ops->MaxBurstLength -
140 next_burst_len);
141 next_burst_len = 0;
142
143 datain->flags |= ISCSI_FLAG_CMD_FINAL;
144 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
145 datain->flags |= ISCSI_FLAG_DATA_ACK;
146 }
147 }
148
149 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
150 datain->offset = read_data_done;
151
152 if (!dr->recovery) {
153 cmd->next_burst_len = next_burst_len;
154 cmd->read_data_done += datain->length;
155 } else {
156 dr->next_burst_len = next_burst_len;
157 dr->read_data_done += datain->length;
158 }
159
160 if (!dr->recovery) {
161 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
162 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
163
164 return dr;
165 }
166
167 if (!dr->runlength) {
168 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
169 dr->dr_complete =
170 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
171 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
172 DATAIN_COMPLETE_CONNECTION_RECOVERY;
173 }
174 } else {
175 if ((dr->begrun + dr->runlength) == dr->data_sn) {
176 dr->dr_complete =
177 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
178 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
179 DATAIN_COMPLETE_CONNECTION_RECOVERY;
180 }
181 }
182
183 return dr;
184}
185
186/*
187 * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
188 */
189static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
190 struct iscsi_cmd *cmd,
191 struct iscsi_datain *datain)
192{
193 u32 offset, read_data_done, read_data_left, seq_send_order;
194 struct iscsi_conn *conn = cmd->conn;
195 struct iscsi_datain_req *dr;
196 struct iscsi_seq *seq;
197
198 dr = iscsit_get_datain_req(cmd);
199 if (!dr)
200 return NULL;
201
202 if (dr->recovery && dr->generate_recovery_values) {
203 if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
204 cmd, dr) < 0)
205 return NULL;
206
207 dr->generate_recovery_values = 0;
208 }
209
210 read_data_done = (!dr->recovery) ?
211 cmd->read_data_done : dr->read_data_done;
212 seq_send_order = (!dr->recovery) ?
213 cmd->seq_send_order : dr->seq_send_order;
214
215 read_data_left = (cmd->data_length - read_data_done);
216 if (!read_data_left) {
217 pr_err("ITT: 0x%08x read_data_left is zero!\n",
218 cmd->init_task_tag);
219 return NULL;
220 }
221
222 seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
223 if (!seq)
224 return NULL;
225
226 seq->sent = 1;
227
228 if (!dr->recovery && !seq->next_burst_len)
229 seq->first_datasn = cmd->data_sn;
230
231 offset = (seq->offset + seq->next_burst_len);
232
233 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
234 cmd->data_length) {
235 datain->length = (cmd->data_length - offset);
236 datain->offset = offset;
237
238 datain->flags |= ISCSI_FLAG_CMD_FINAL;
239 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
240 datain->flags |= ISCSI_FLAG_DATA_ACK;
241
242 seq->next_burst_len = 0;
243 seq_send_order++;
244 } else {
245 if ((seq->next_burst_len +
246 conn->conn_ops->MaxRecvDataSegmentLength) <
247 conn->sess->sess_ops->MaxBurstLength) {
248 datain->length =
249 conn->conn_ops->MaxRecvDataSegmentLength;
250 datain->offset = (seq->offset + seq->next_burst_len);
251
252 seq->next_burst_len += datain->length;
253 } else {
254 datain->length = (conn->sess->sess_ops->MaxBurstLength -
255 seq->next_burst_len);
256 datain->offset = (seq->offset + seq->next_burst_len);
257
258 datain->flags |= ISCSI_FLAG_CMD_FINAL;
259 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
260 datain->flags |= ISCSI_FLAG_DATA_ACK;
261
262 seq->next_burst_len = 0;
263 seq_send_order++;
264 }
265 }
266
267 if ((read_data_done + datain->length) == cmd->data_length)
268 datain->flags |= ISCSI_FLAG_DATA_STATUS;
269
270 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
271 if (!dr->recovery) {
272 cmd->seq_send_order = seq_send_order;
273 cmd->read_data_done += datain->length;
274 } else {
275 dr->seq_send_order = seq_send_order;
276 dr->read_data_done += datain->length;
277 }
278
279 if (!dr->recovery) {
280 if (datain->flags & ISCSI_FLAG_CMD_FINAL)
281 seq->last_datasn = datain->data_sn;
282 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
283 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
284
285 return dr;
286 }
287
288 if (!dr->runlength) {
289 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
290 dr->dr_complete =
291 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
292 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
293 DATAIN_COMPLETE_CONNECTION_RECOVERY;
294 }
295 } else {
296 if ((dr->begrun + dr->runlength) == dr->data_sn) {
297 dr->dr_complete =
298 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
299 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
300 DATAIN_COMPLETE_CONNECTION_RECOVERY;
301 }
302 }
303
304 return dr;
305}
306
307/*
308 * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
309 */
310static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
311 struct iscsi_cmd *cmd,
312 struct iscsi_datain *datain)
313{
314 u32 next_burst_len, read_data_done, read_data_left;
315 struct iscsi_conn *conn = cmd->conn;
316 struct iscsi_datain_req *dr;
317 struct iscsi_pdu *pdu;
318
319 dr = iscsit_get_datain_req(cmd);
320 if (!dr)
321 return NULL;
322
323 if (dr->recovery && dr->generate_recovery_values) {
324 if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
325 cmd, dr) < 0)
326 return NULL;
327
328 dr->generate_recovery_values = 0;
329 }
330
331 next_burst_len = (!dr->recovery) ?
332 cmd->next_burst_len : dr->next_burst_len;
333 read_data_done = (!dr->recovery) ?
334 cmd->read_data_done : dr->read_data_done;
335
336 read_data_left = (cmd->data_length - read_data_done);
337 if (!read_data_left) {
338 pr_err("ITT: 0x%08x read_data_left is zero!\n",
339 cmd->init_task_tag);
340 return dr;
341 }
342
343 pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
344 if (!pdu)
345 return dr;
346
347 if ((read_data_done + pdu->length) == cmd->data_length) {
348 pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
349 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
350 pdu->flags |= ISCSI_FLAG_DATA_ACK;
351
352 next_burst_len = 0;
353 } else {
354 if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
355 conn->sess->sess_ops->MaxBurstLength)
356 next_burst_len += pdu->length;
357 else {
358 pdu->flags |= ISCSI_FLAG_CMD_FINAL;
359 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
360 pdu->flags |= ISCSI_FLAG_DATA_ACK;
361
362 next_burst_len = 0;
363 }
364 }
365
366 pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
367 if (!dr->recovery) {
368 cmd->next_burst_len = next_burst_len;
369 cmd->read_data_done += pdu->length;
370 } else {
371 dr->next_burst_len = next_burst_len;
372 dr->read_data_done += pdu->length;
373 }
374
375 datain->flags = pdu->flags;
376 datain->length = pdu->length;
377 datain->offset = pdu->offset;
378 datain->data_sn = pdu->data_sn;
379
380 if (!dr->recovery) {
381 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
382 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
383
384 return dr;
385 }
386
387 if (!dr->runlength) {
388 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
389 dr->dr_complete =
390 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
391 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
392 DATAIN_COMPLETE_CONNECTION_RECOVERY;
393 }
394 } else {
395 if ((dr->begrun + dr->runlength) == dr->data_sn) {
396 dr->dr_complete =
397 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
398 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
399 DATAIN_COMPLETE_CONNECTION_RECOVERY;
400 }
401 }
402
403 return dr;
404}
405
406/*
407 * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
408 */
409static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
410 struct iscsi_cmd *cmd,
411 struct iscsi_datain *datain)
412{
413 u32 read_data_done, read_data_left, seq_send_order;
414 struct iscsi_conn *conn = cmd->conn;
415 struct iscsi_datain_req *dr;
416 struct iscsi_pdu *pdu;
417 struct iscsi_seq *seq = NULL;
418
419 dr = iscsit_get_datain_req(cmd);
420 if (!dr)
421 return NULL;
422
423 if (dr->recovery && dr->generate_recovery_values) {
424 if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
425 cmd, dr) < 0)
426 return NULL;
427
428 dr->generate_recovery_values = 0;
429 }
430
431 read_data_done = (!dr->recovery) ?
432 cmd->read_data_done : dr->read_data_done;
433 seq_send_order = (!dr->recovery) ?
434 cmd->seq_send_order : dr->seq_send_order;
435
436 read_data_left = (cmd->data_length - read_data_done);
437 if (!read_data_left) {
438 pr_err("ITT: 0x%08x read_data_left is zero!\n",
439 cmd->init_task_tag);
440 return NULL;
441 }
442
443 seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
444 if (!seq)
445 return NULL;
446
447 seq->sent = 1;
448
449 if (!dr->recovery && !seq->next_burst_len)
450 seq->first_datasn = cmd->data_sn;
451
452 pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
453 if (!pdu)
454 return NULL;
455
456 if (seq->pdu_send_order == seq->pdu_count) {
457 pdu->flags |= ISCSI_FLAG_CMD_FINAL;
458 if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
459 pdu->flags |= ISCSI_FLAG_DATA_ACK;
460
461 seq->next_burst_len = 0;
462 seq_send_order++;
463 } else
464 seq->next_burst_len += pdu->length;
465
466 if ((read_data_done + pdu->length) == cmd->data_length)
467 pdu->flags |= ISCSI_FLAG_DATA_STATUS;
468
469 pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
470 if (!dr->recovery) {
471 cmd->seq_send_order = seq_send_order;
472 cmd->read_data_done += pdu->length;
473 } else {
474 dr->seq_send_order = seq_send_order;
475 dr->read_data_done += pdu->length;
476 }
477
478 datain->flags = pdu->flags;
479 datain->length = pdu->length;
480 datain->offset = pdu->offset;
481 datain->data_sn = pdu->data_sn;
482
483 if (!dr->recovery) {
484 if (datain->flags & ISCSI_FLAG_CMD_FINAL)
485 seq->last_datasn = datain->data_sn;
486 if (datain->flags & ISCSI_FLAG_DATA_STATUS)
487 dr->dr_complete = DATAIN_COMPLETE_NORMAL;
488
489 return dr;
490 }
491
492 if (!dr->runlength) {
493 if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
494 dr->dr_complete =
495 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
496 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
497 DATAIN_COMPLETE_CONNECTION_RECOVERY;
498 }
499 } else {
500 if ((dr->begrun + dr->runlength) == dr->data_sn) {
501 dr->dr_complete =
502 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
503 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
504 DATAIN_COMPLETE_CONNECTION_RECOVERY;
505 }
506 }
507
508 return dr;
509}
510
511struct iscsi_datain_req *iscsit_get_datain_values(
512 struct iscsi_cmd *cmd,
513 struct iscsi_datain *datain)
514{
515 struct iscsi_conn *conn = cmd->conn;
516
517 if (conn->sess->sess_ops->DataSequenceInOrder &&
518 conn->sess->sess_ops->DataPDUInOrder)
519 return iscsit_set_datain_values_yes_and_yes(cmd, datain);
520 else if (!conn->sess->sess_ops->DataSequenceInOrder &&
521 conn->sess->sess_ops->DataPDUInOrder)
522 return iscsit_set_datain_values_no_and_yes(cmd, datain);
523 else if (conn->sess->sess_ops->DataSequenceInOrder &&
524 !conn->sess->sess_ops->DataPDUInOrder)
525 return iscsit_set_datain_values_yes_and_no(cmd, datain);
526 else if (!conn->sess->sess_ops->DataSequenceInOrder &&
527 !conn->sess->sess_ops->DataPDUInOrder)
528 return iscsit_set_datain_values_no_and_no(cmd, datain);
529
530 return NULL;
531}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 000000000000..646429ac5a02
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
1#ifndef ISCSI_TARGET_DATAIN_VALUES_H
2#define ISCSI_TARGET_DATAIN_VALUES_H
3
4extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
5extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
6extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
7extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
8extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
9extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
10 struct iscsi_datain *);
11
12#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 000000000000..a19fa5eea88e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,87 @@
1/*******************************************************************************
2 * This file contains the iSCSI Virtual Device and Disk Transport
3 * agnostic related functions.
4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <scsi/scsi_device.h>
23#include <target/target_core_base.h>
24#include <target/target_core_device.h>
25#include <target/target_core_transport.h>
26
27#include "iscsi_target_core.h"
28#include "iscsi_target_device.h"
29#include "iscsi_target_tpg.h"
30#include "iscsi_target_util.h"
31
32int iscsit_get_lun_for_tmr(
33 struct iscsi_cmd *cmd,
34 u64 lun)
35{
36 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
37
38 return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
39}
40
41int iscsit_get_lun_for_cmd(
42 struct iscsi_cmd *cmd,
43 unsigned char *cdb,
44 u64 lun)
45{
46 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
47
48 return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
49}
50
51void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
52{
53 struct se_node_acl *se_nacl;
54
55 /*
56 * This is a discovery session, the single queue slot was already
57 * assigned in iscsi_login_zero_tsih(). Since only Logout and
58 * Text Opcodes are allowed during discovery we do not have to worry
59 * about the HBA's queue depth here.
60 */
61 if (sess->sess_ops->SessionType)
62 return;
63
64 se_nacl = sess->se_sess->se_node_acl;
65
66 /*
67 * This is a normal session, set the Session's CmdSN window to the
68 * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
69 * has already been validated as a legal value in
70 * core_set_queue_depth_for_node().
71 */
72 sess->cmdsn_window = se_nacl->queue_depth;
73 sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
74}
75
76void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
77{
78 if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
79 return;
80
81 cmd->maxcmdsn_inc = 1;
82
83 mutex_lock(&sess->cmdsn_mutex);
84 sess->max_cmd_sn += 1;
85 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
86 mutex_unlock(&sess->cmdsn_mutex);
87}
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 000000000000..bef1cada15f8
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,9 @@
1#ifndef ISCSI_TARGET_DEVICE_H
2#define ISCSI_TARGET_DEVICE_H
3
4extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64);
5extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64);
6extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
7extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
8
9#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 000000000000..b7ffc3cd40cc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,1004 @@
1/******************************************************************************
2 * This file contains error recovery level zero functions used by
3 * the iSCSI Target driver.
4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_tq.h"
29#include "iscsi_target_erl0.h"
30#include "iscsi_target_erl1.h"
31#include "iscsi_target_erl2.h"
32#include "iscsi_target_util.h"
33#include "iscsi_target.h"
34
35/*
36 * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
37 * checks against to determine a PDU's Offset+Length is within the current
38 * DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
39 */
40void iscsit_set_dataout_sequence_values(
41 struct iscsi_cmd *cmd)
42{
43 struct iscsi_conn *conn = cmd->conn;
44 /*
45 * Still set seq_start_offset and seq_end_offset for Unsolicited
46 * DataOUT, even if DataSequenceInOrder=No.
47 */
48 if (cmd->unsolicited_data) {
49 cmd->seq_start_offset = cmd->write_data_done;
50 cmd->seq_end_offset = (cmd->write_data_done +
51 (cmd->data_length >
52 conn->sess->sess_ops->FirstBurstLength) ?
53 conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
54 return;
55 }
56
57 if (!conn->sess->sess_ops->DataSequenceInOrder)
58 return;
59
60 if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
61 cmd->seq_start_offset = cmd->write_data_done;
62 cmd->seq_end_offset = (cmd->data_length >
63 conn->sess->sess_ops->MaxBurstLength) ?
64 (cmd->write_data_done +
65 conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
66 } else {
67 cmd->seq_start_offset = cmd->seq_end_offset;
68 cmd->seq_end_offset = ((cmd->seq_end_offset +
69 conn->sess->sess_ops->MaxBurstLength) >=
70 cmd->data_length) ? cmd->data_length :
71 (cmd->seq_end_offset +
72 conn->sess->sess_ops->MaxBurstLength);
73 }
74}
75
76static int iscsit_dataout_within_command_recovery_check(
77 struct iscsi_cmd *cmd,
78 unsigned char *buf)
79{
80 struct iscsi_conn *conn = cmd->conn;
81 struct iscsi_data *hdr = (struct iscsi_data *) buf;
82 u32 payload_length = ntoh24(hdr->dlength);
83
84 /*
85 * We do the within-command recovery checks here as it is
86 * the first function called in iscsi_check_pre_dataout().
87 * Basically, if we are in within-command recovery and
88 * the PDU does not contain the offset the sequence needs,
89 * dump the payload.
90 *
91 * This only applies to DataPDUInOrder=Yes, for
92 * DataPDUInOrder=No we only re-request the failed PDU
93 * and check that all PDUs in a sequence are received
94 * upon end of sequence.
95 */
96 if (conn->sess->sess_ops->DataSequenceInOrder) {
97 if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
98 (cmd->write_data_done != hdr->offset))
99 goto dump;
100
101 cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
102 } else {
103 struct iscsi_seq *seq;
104
105 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
106 if (!seq)
107 return DATAOUT_CANNOT_RECOVER;
108 /*
109 * Set the struct iscsi_seq pointer to reuse later.
110 */
111 cmd->seq_ptr = seq;
112
113 if (conn->sess->sess_ops->DataPDUInOrder) {
114 if ((seq->status ==
115 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
116 ((seq->offset != hdr->offset) ||
117 (seq->data_sn != hdr->datasn)))
118 goto dump;
119 } else {
120 if ((seq->status ==
121 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
122 (seq->data_sn != hdr->datasn))
123 goto dump;
124 }
125
126 if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
127 goto dump;
128
129 if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
130 seq->status = 0;
131 }
132
133 return DATAOUT_NORMAL;
134
135dump:
136 pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
137 " 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
138 return iscsit_dump_data_payload(conn, payload_length, 1);
139}
140
141static int iscsit_dataout_check_unsolicited_sequence(
142 struct iscsi_cmd *cmd,
143 unsigned char *buf)
144{
145 u32 first_burst_len;
146 struct iscsi_conn *conn = cmd->conn;
147 struct iscsi_data *hdr = (struct iscsi_data *) buf;
148 u32 payload_length = ntoh24(hdr->dlength);
149
150
151 if ((hdr->offset < cmd->seq_start_offset) ||
152 ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
153 pr_err("Command ITT: 0x%08x with Offset: %u,"
154 " Length: %u outside of Unsolicited Sequence %u:%u while"
155 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
156 hdr->offset, payload_length, cmd->seq_start_offset,
157 cmd->seq_end_offset);
158 return DATAOUT_CANNOT_RECOVER;
159 }
160
161 first_burst_len = (cmd->first_burst_len + payload_length);
162
163 if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
164 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
165 " for this Unsolicited DataOut Burst.\n",
166 first_burst_len, conn->sess->sess_ops->FirstBurstLength);
167 transport_send_check_condition_and_sense(&cmd->se_cmd,
168 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
169 return DATAOUT_CANNOT_RECOVER;
170 }
171
172 /*
173 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
174 * checks for the current Unsolicited DataOUT Sequence.
175 */
176 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
177 /*
178 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
179 * sequence checks are handled in
180 * iscsit_dataout_datapduinorder_no_fbit().
181 */
182 if (!conn->sess->sess_ops->DataPDUInOrder)
183 goto out;
184
185 if ((first_burst_len != cmd->data_length) &&
186 (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
187 pr_err("Unsolicited non-immediate data"
188 " received %u does not equal FirstBurstLength: %u, and"
189 " does not equal ExpXferLen %u.\n", first_burst_len,
190 conn->sess->sess_ops->FirstBurstLength,
191 cmd->data_length);
192 transport_send_check_condition_and_sense(&cmd->se_cmd,
193 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
194 return DATAOUT_CANNOT_RECOVER;
195 }
196 } else {
197 if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
198 pr_err("Command ITT: 0x%08x reached"
199 " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
200 " error.\n", cmd->init_task_tag,
201 conn->sess->sess_ops->FirstBurstLength);
202 return DATAOUT_CANNOT_RECOVER;
203 }
204 if (first_burst_len == cmd->data_length) {
205 pr_err("Command ITT: 0x%08x reached"
206 " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
207 " error.\n", cmd->init_task_tag, cmd->data_length);
208 return DATAOUT_CANNOT_RECOVER;
209 }
210 }
211
212out:
213 return DATAOUT_NORMAL;
214}
215
216static int iscsit_dataout_check_sequence(
217 struct iscsi_cmd *cmd,
218 unsigned char *buf)
219{
220 u32 next_burst_len;
221 struct iscsi_conn *conn = cmd->conn;
222 struct iscsi_seq *seq = NULL;
223 struct iscsi_data *hdr = (struct iscsi_data *) buf;
224 u32 payload_length = ntoh24(hdr->dlength);
225
226 /*
227 * For DataSequenceInOrder=Yes: Check that the offset and offset+length
228 * is within range as defined by iscsi_set_dataout_sequence_values().
229 *
230 * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
231 * offset+length tuple.
232 */
233 if (conn->sess->sess_ops->DataSequenceInOrder) {
234 /*
235 * Due to possibility of recovery DataOUT sent by the initiator
236 * fullfilling an Recovery R2T, it's best to just dump the
237 * payload here, instead of erroring out.
238 */
239 if ((hdr->offset < cmd->seq_start_offset) ||
240 ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
241 pr_err("Command ITT: 0x%08x with Offset: %u,"
242 " Length: %u outside of Sequence %u:%u while"
243 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
244 hdr->offset, payload_length, cmd->seq_start_offset,
245 cmd->seq_end_offset);
246
247 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
248 return DATAOUT_CANNOT_RECOVER;
249 return DATAOUT_WITHIN_COMMAND_RECOVERY;
250 }
251
252 next_burst_len = (cmd->next_burst_len + payload_length);
253 } else {
254 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
255 if (!seq)
256 return DATAOUT_CANNOT_RECOVER;
257 /*
258 * Set the struct iscsi_seq pointer to reuse later.
259 */
260 cmd->seq_ptr = seq;
261
262 if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
263 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
264 return DATAOUT_CANNOT_RECOVER;
265 return DATAOUT_WITHIN_COMMAND_RECOVERY;
266 }
267
268 next_burst_len = (seq->next_burst_len + payload_length);
269 }
270
271 if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
272 pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
273 " Length: %u exceeds MaxBurstLength: %u. protocol"
274 " error.\n", cmd->init_task_tag,
275 (next_burst_len - payload_length),
276 payload_length, conn->sess->sess_ops->MaxBurstLength);
277 return DATAOUT_CANNOT_RECOVER;
278 }
279
280 /*
281 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
282 * checks for the current DataOUT Sequence.
283 */
284 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
285 /*
286 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
287 * sequence checks are handled in
288 * iscsit_dataout_datapduinorder_no_fbit().
289 */
290 if (!conn->sess->sess_ops->DataPDUInOrder)
291 goto out;
292
293 if (conn->sess->sess_ops->DataSequenceInOrder) {
294 if ((next_burst_len <
295 conn->sess->sess_ops->MaxBurstLength) &&
296 ((cmd->write_data_done + payload_length) <
297 cmd->data_length)) {
298 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
299 " before end of DataOUT sequence, protocol"
300 " error.\n", cmd->init_task_tag);
301 return DATAOUT_CANNOT_RECOVER;
302 }
303 } else {
304 if (next_burst_len < seq->xfer_len) {
305 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
306 " before end of DataOUT sequence, protocol"
307 " error.\n", cmd->init_task_tag);
308 return DATAOUT_CANNOT_RECOVER;
309 }
310 }
311 } else {
312 if (conn->sess->sess_ops->DataSequenceInOrder) {
313 if (next_burst_len ==
314 conn->sess->sess_ops->MaxBurstLength) {
315 pr_err("Command ITT: 0x%08x reached"
316 " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
317 " not set, protocol error.", cmd->init_task_tag,
318 conn->sess->sess_ops->MaxBurstLength);
319 return DATAOUT_CANNOT_RECOVER;
320 }
321 if ((cmd->write_data_done + payload_length) ==
322 cmd->data_length) {
323 pr_err("Command ITT: 0x%08x reached"
324 " last DataOUT PDU in sequence but ISCSI_FLAG_"
325 "CMD_FINAL is not set, protocol error.\n",
326 cmd->init_task_tag);
327 return DATAOUT_CANNOT_RECOVER;
328 }
329 } else {
330 if (next_burst_len == seq->xfer_len) {
331 pr_err("Command ITT: 0x%08x reached"
332 " last DataOUT PDU in sequence but ISCSI_FLAG_"
333 "CMD_FINAL is not set, protocol error.\n",
334 cmd->init_task_tag);
335 return DATAOUT_CANNOT_RECOVER;
336 }
337 }
338 }
339
340out:
341 return DATAOUT_NORMAL;
342}
343
344static int iscsit_dataout_check_datasn(
345 struct iscsi_cmd *cmd,
346 unsigned char *buf)
347{
348 int dump = 0, recovery = 0;
349 u32 data_sn = 0;
350 struct iscsi_conn *conn = cmd->conn;
351 struct iscsi_data *hdr = (struct iscsi_data *) buf;
352 u32 payload_length = ntoh24(hdr->dlength);
353
354 /*
355 * Considering the target has no method of re-requesting DataOUT
356 * by DataSN, if we receieve a greater DataSN than expected we
357 * assume the functions for DataPDUInOrder=[Yes,No] below will
358 * handle it.
359 *
360 * If the DataSN is less than expected, dump the payload.
361 */
362 if (conn->sess->sess_ops->DataSequenceInOrder)
363 data_sn = cmd->data_sn;
364 else {
365 struct iscsi_seq *seq = cmd->seq_ptr;
366 data_sn = seq->data_sn;
367 }
368
369 if (hdr->datasn > data_sn) {
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 hdr->datasn, data_sn);
373 recovery = 1;
374 goto recover;
375 } else if (hdr->datasn < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, hdr->datasn, data_sn);
379 dump = 1;
380 goto dump;
381 }
382
383 return DATAOUT_NORMAL;
384
385recover:
386 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
387 pr_err("Unable to perform within-command recovery"
388 " while ERL=0.\n");
389 return DATAOUT_CANNOT_RECOVER;
390 }
391dump:
392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
393 return DATAOUT_CANNOT_RECOVER;
394
395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
396 DATAOUT_NORMAL;
397}
398
399static int iscsit_dataout_pre_datapduinorder_yes(
400 struct iscsi_cmd *cmd,
401 unsigned char *buf)
402{
403 int dump = 0, recovery = 0;
404 struct iscsi_conn *conn = cmd->conn;
405 struct iscsi_data *hdr = (struct iscsi_data *) buf;
406 u32 payload_length = ntoh24(hdr->dlength);
407
408 /*
409 * For DataSequenceInOrder=Yes: If the offset is greater than the global
410 * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
411 * occured and fail the connection.
412 *
413 * For DataSequenceInOrder=No: If the offset is greater than the per
414 * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
415 * error has occured and fail the connection.
416 */
417 if (conn->sess->sess_ops->DataSequenceInOrder) {
418 if (hdr->offset != cmd->write_data_done) {
419 pr_err("Command ITT: 0x%08x, received offset"
420 " %u different than expected %u.\n", cmd->init_task_tag,
421 hdr->offset, cmd->write_data_done);
422 recovery = 1;
423 goto recover;
424 }
425 } else {
426 struct iscsi_seq *seq = cmd->seq_ptr;
427
428 if (hdr->offset > seq->offset) {
429 pr_err("Command ITT: 0x%08x, received offset"
430 " %u greater than expected %u.\n", cmd->init_task_tag,
431 hdr->offset, seq->offset);
432 recovery = 1;
433 goto recover;
434 } else if (hdr->offset < seq->offset) {
435 pr_err("Command ITT: 0x%08x, received offset"
436 " %u less than expected %u, discarding payload.\n",
437 cmd->init_task_tag, hdr->offset, seq->offset);
438 dump = 1;
439 goto dump;
440 }
441 }
442
443 return DATAOUT_NORMAL;
444
445recover:
446 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
447 pr_err("Unable to perform within-command recovery"
448 " while ERL=0.\n");
449 return DATAOUT_CANNOT_RECOVER;
450 }
451dump:
452 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
453 return DATAOUT_CANNOT_RECOVER;
454
455 return (recovery) ? iscsit_recover_dataout_sequence(cmd,
456 hdr->offset, payload_length) :
457 (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
458}
459
460static int iscsit_dataout_pre_datapduinorder_no(
461 struct iscsi_cmd *cmd,
462 unsigned char *buf)
463{
464 struct iscsi_pdu *pdu;
465 struct iscsi_data *hdr = (struct iscsi_data *) buf;
466 u32 payload_length = ntoh24(hdr->dlength);
467
468 pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
469 if (!pdu)
470 return DATAOUT_CANNOT_RECOVER;
471
472 cmd->pdu_ptr = pdu;
473
474 switch (pdu->status) {
475 case ISCSI_PDU_NOT_RECEIVED:
476 case ISCSI_PDU_CRC_FAILED:
477 case ISCSI_PDU_TIMED_OUT:
478 break;
479 case ISCSI_PDU_RECEIVED_OK:
480 pr_err("Command ITT: 0x%08x received already gotten"
481 " Offset: %u, Length: %u\n", cmd->init_task_tag,
482 hdr->offset, payload_length);
483 return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
484 default:
485 return DATAOUT_CANNOT_RECOVER;
486 }
487
488 return DATAOUT_NORMAL;
489}
490
491static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
492{
493 struct iscsi_r2t *r2t;
494
495 if (cmd->unsolicited_data)
496 return 0;
497
498 r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
499 if (!r2t)
500 return -1;
501
502 spin_lock_bh(&cmd->r2t_lock);
503 r2t->seq_complete = 1;
504 cmd->outstanding_r2ts--;
505 spin_unlock_bh(&cmd->r2t_lock);
506
507 return 0;
508}
509
510static int iscsit_dataout_update_datapduinorder_no(
511 struct iscsi_cmd *cmd,
512 u32 data_sn,
513 int f_bit)
514{
515 int ret = 0;
516 struct iscsi_pdu *pdu = cmd->pdu_ptr;
517
518 pdu->data_sn = data_sn;
519
520 switch (pdu->status) {
521 case ISCSI_PDU_NOT_RECEIVED:
522 pdu->status = ISCSI_PDU_RECEIVED_OK;
523 break;
524 case ISCSI_PDU_CRC_FAILED:
525 pdu->status = ISCSI_PDU_RECEIVED_OK;
526 break;
527 case ISCSI_PDU_TIMED_OUT:
528 pdu->status = ISCSI_PDU_RECEIVED_OK;
529 break;
530 default:
531 return DATAOUT_CANNOT_RECOVER;
532 }
533
534 if (f_bit) {
535 ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
536 if (ret == DATAOUT_CANNOT_RECOVER)
537 return ret;
538 }
539
540 return DATAOUT_NORMAL;
541}
542
543static int iscsit_dataout_post_crc_passed(
544 struct iscsi_cmd *cmd,
545 unsigned char *buf)
546{
547 int ret, send_r2t = 0;
548 struct iscsi_conn *conn = cmd->conn;
549 struct iscsi_seq *seq = NULL;
550 struct iscsi_data *hdr = (struct iscsi_data *) buf;
551 u32 payload_length = ntoh24(hdr->dlength);
552
553 if (cmd->unsolicited_data) {
554 if ((cmd->first_burst_len + payload_length) ==
555 conn->sess->sess_ops->FirstBurstLength) {
556 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
557 payload_length) < 0)
558 return DATAOUT_CANNOT_RECOVER;
559 send_r2t = 1;
560 }
561
562 if (!conn->sess->sess_ops->DataPDUInOrder) {
563 ret = iscsit_dataout_update_datapduinorder_no(cmd,
564 hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
565 if (ret == DATAOUT_CANNOT_RECOVER)
566 return ret;
567 }
568
569 cmd->first_burst_len += payload_length;
570
571 if (conn->sess->sess_ops->DataSequenceInOrder)
572 cmd->data_sn++;
573 else {
574 seq = cmd->seq_ptr;
575 seq->data_sn++;
576 seq->offset += payload_length;
577 }
578
579 if (send_r2t) {
580 if (seq)
581 seq->status = DATAOUT_SEQUENCE_COMPLETE;
582 cmd->first_burst_len = 0;
583 cmd->unsolicited_data = 0;
584 }
585 } else {
586 if (conn->sess->sess_ops->DataSequenceInOrder) {
587 if ((cmd->next_burst_len + payload_length) ==
588 conn->sess->sess_ops->MaxBurstLength) {
589 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
590 payload_length) < 0)
591 return DATAOUT_CANNOT_RECOVER;
592 send_r2t = 1;
593 }
594
595 if (!conn->sess->sess_ops->DataPDUInOrder) {
596 ret = iscsit_dataout_update_datapduinorder_no(
597 cmd, hdr->datasn,
598 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
599 if (ret == DATAOUT_CANNOT_RECOVER)
600 return ret;
601 }
602
603 cmd->next_burst_len += payload_length;
604 cmd->data_sn++;
605
606 if (send_r2t)
607 cmd->next_burst_len = 0;
608 } else {
609 seq = cmd->seq_ptr;
610
611 if ((seq->next_burst_len + payload_length) ==
612 seq->xfer_len) {
613 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
614 payload_length) < 0)
615 return DATAOUT_CANNOT_RECOVER;
616 send_r2t = 1;
617 }
618
619 if (!conn->sess->sess_ops->DataPDUInOrder) {
620 ret = iscsit_dataout_update_datapduinorder_no(
621 cmd, hdr->datasn,
622 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
623 if (ret == DATAOUT_CANNOT_RECOVER)
624 return ret;
625 }
626
627 seq->data_sn++;
628 seq->offset += payload_length;
629 seq->next_burst_len += payload_length;
630
631 if (send_r2t) {
632 seq->next_burst_len = 0;
633 seq->status = DATAOUT_SEQUENCE_COMPLETE;
634 }
635 }
636 }
637
638 if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
639 cmd->data_sn = 0;
640
641 cmd->write_data_done += payload_length;
642
643 return (cmd->write_data_done == cmd->data_length) ?
644 DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
645 DATAOUT_SEND_R2T : DATAOUT_NORMAL;
646}
647
648static int iscsit_dataout_post_crc_failed(
649 struct iscsi_cmd *cmd,
650 unsigned char *buf)
651{
652 struct iscsi_conn *conn = cmd->conn;
653 struct iscsi_pdu *pdu;
654 struct iscsi_data *hdr = (struct iscsi_data *) buf;
655 u32 payload_length = ntoh24(hdr->dlength);
656
657 if (conn->sess->sess_ops->DataPDUInOrder)
658 goto recover;
659 /*
660 * The rest of this function is only called when DataPDUInOrder=No.
661 */
662 pdu = cmd->pdu_ptr;
663
664 switch (pdu->status) {
665 case ISCSI_PDU_NOT_RECEIVED:
666 pdu->status = ISCSI_PDU_CRC_FAILED;
667 break;
668 case ISCSI_PDU_CRC_FAILED:
669 break;
670 case ISCSI_PDU_TIMED_OUT:
671 pdu->status = ISCSI_PDU_CRC_FAILED;
672 break;
673 default:
674 return DATAOUT_CANNOT_RECOVER;
675 }
676
677recover:
678 return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
679}
680
681/*
682 * Called from iscsit_handle_data_out() before DataOUT Payload is received
683 * and CRC computed.
684 */
685extern int iscsit_check_pre_dataout(
686 struct iscsi_cmd *cmd,
687 unsigned char *buf)
688{
689 int ret;
690 struct iscsi_conn *conn = cmd->conn;
691
692 ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
693 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
694 (ret == DATAOUT_CANNOT_RECOVER))
695 return ret;
696
697 ret = iscsit_dataout_check_datasn(cmd, buf);
698 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
699 (ret == DATAOUT_CANNOT_RECOVER))
700 return ret;
701
702 if (cmd->unsolicited_data) {
703 ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
704 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
705 (ret == DATAOUT_CANNOT_RECOVER))
706 return ret;
707 } else {
708 ret = iscsit_dataout_check_sequence(cmd, buf);
709 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
710 (ret == DATAOUT_CANNOT_RECOVER))
711 return ret;
712 }
713
714 return (conn->sess->sess_ops->DataPDUInOrder) ?
715 iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
716 iscsit_dataout_pre_datapduinorder_no(cmd, buf);
717}
718
719/*
720 * Called from iscsit_handle_data_out() after DataOUT Payload is received
721 * and CRC computed.
722 */
723int iscsit_check_post_dataout(
724 struct iscsi_cmd *cmd,
725 unsigned char *buf,
726 u8 data_crc_failed)
727{
728 struct iscsi_conn *conn = cmd->conn;
729
730 cmd->dataout_timeout_retries = 0;
731
732 if (!data_crc_failed)
733 return iscsit_dataout_post_crc_passed(cmd, buf);
734 else {
735 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
736 pr_err("Unable to recover from DataOUT CRC"
737 " failure while ERL=0, closing session.\n");
738 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
739 1, 0, buf, cmd);
740 return DATAOUT_CANNOT_RECOVER;
741 }
742
743 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
744 0, 0, buf, cmd);
745 return iscsit_dataout_post_crc_failed(cmd, buf);
746 }
747}
748
749static void iscsit_handle_time2retain_timeout(unsigned long data)
750{
751 struct iscsi_session *sess = (struct iscsi_session *) data;
752 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
753 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
754
755 spin_lock_bh(&se_tpg->session_lock);
756 if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
757 spin_unlock_bh(&se_tpg->session_lock);
758 return;
759 }
760 if (atomic_read(&sess->session_reinstatement)) {
761 pr_err("Exiting Time2Retain handler because"
762 " session_reinstatement=1\n");
763 spin_unlock_bh(&se_tpg->session_lock);
764 return;
765 }
766 sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
767
768 pr_err("Time2Retain timer expired for SID: %u, cleaning up"
769 " iSCSI session.\n", sess->sid);
770 {
771 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
772
773 if (tiqn) {
774 spin_lock(&tiqn->sess_err_stats.lock);
775 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
776 (void *)sess->sess_ops->InitiatorName);
777 tiqn->sess_err_stats.last_sess_failure_type =
778 ISCSI_SESS_ERR_CXN_TIMEOUT;
779 tiqn->sess_err_stats.cxn_timeout_errors++;
780 sess->conn_timeout_errors++;
781 spin_unlock(&tiqn->sess_err_stats.lock);
782 }
783 }
784
785 spin_unlock_bh(&se_tpg->session_lock);
786 iscsit_close_session(sess);
787}
788
789extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
790{
791 int tpg_active;
792 /*
793 * Only start Time2Retain timer when the assoicated TPG is still in
794 * an ACTIVE (eg: not disabled or shutdown) state.
795 */
796 spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
797 tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
798 spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
799
800 if (!tpg_active)
801 return;
802
803 if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
804 return;
805
806 pr_debug("Starting Time2Retain timer for %u seconds on"
807 " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
808
809 init_timer(&sess->time2retain_timer);
810 sess->time2retain_timer.expires =
811 (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
812 sess->time2retain_timer.data = (unsigned long)sess;
813 sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
814 sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
815 sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
816 add_timer(&sess->time2retain_timer);
817}
818
819/*
820 * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
821 */
822extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
823{
824 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
825 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
826
827 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
828 return -1;
829
830 if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
831 return 0;
832
833 sess->time2retain_timer_flags |= ISCSI_TF_STOP;
834 spin_unlock_bh(&se_tpg->session_lock);
835
836 del_timer_sync(&sess->time2retain_timer);
837
838 spin_lock_bh(&se_tpg->session_lock);
839 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
840 pr_debug("Stopped Time2Retain Timer for SID: %u\n",
841 sess->sid);
842 return 0;
843}
844
845void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
846{
847 spin_lock_bh(&conn->state_lock);
848 if (atomic_read(&conn->connection_exit)) {
849 spin_unlock_bh(&conn->state_lock);
850 goto sleep;
851 }
852
853 if (atomic_read(&conn->transport_failed)) {
854 spin_unlock_bh(&conn->state_lock);
855 goto sleep;
856 }
857 spin_unlock_bh(&conn->state_lock);
858
859 iscsi_thread_set_force_reinstatement(conn);
860
861sleep:
862 wait_for_completion(&conn->conn_wait_rcfr_comp);
863 complete(&conn->conn_post_wait_comp);
864}
865
866void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
867{
868 spin_lock_bh(&conn->state_lock);
869 if (atomic_read(&conn->connection_exit)) {
870 spin_unlock_bh(&conn->state_lock);
871 return;
872 }
873
874 if (atomic_read(&conn->transport_failed)) {
875 spin_unlock_bh(&conn->state_lock);
876 return;
877 }
878
879 if (atomic_read(&conn->connection_reinstatement)) {
880 spin_unlock_bh(&conn->state_lock);
881 return;
882 }
883
884 if (iscsi_thread_set_force_reinstatement(conn) < 0) {
885 spin_unlock_bh(&conn->state_lock);
886 return;
887 }
888
889 atomic_set(&conn->connection_reinstatement, 1);
890 if (!sleep) {
891 spin_unlock_bh(&conn->state_lock);
892 return;
893 }
894
895 atomic_set(&conn->sleep_on_conn_wait_comp, 1);
896 spin_unlock_bh(&conn->state_lock);
897
898 wait_for_completion(&conn->conn_wait_comp);
899 complete(&conn->conn_post_wait_comp);
900}
901
902void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
903{
904 pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
905 " %u\n", sess->sid);
906
907 atomic_set(&sess->session_fall_back_to_erl0, 1);
908}
909
910static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
911{
912 struct iscsi_session *sess = conn->sess;
913
914 if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
915 !atomic_read(&sess->session_reinstatement) &&
916 !atomic_read(&sess->session_fall_back_to_erl0))
917 iscsit_connection_recovery_transport_reset(conn);
918 else {
919 pr_debug("Performing cleanup for failed iSCSI"
920 " Connection ID: %hu from %s\n", conn->cid,
921 sess->sess_ops->InitiatorName);
922 iscsit_close_connection(conn);
923 }
924}
925
926extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
927{
928 spin_lock_bh(&conn->state_lock);
929 if (atomic_read(&conn->connection_exit)) {
930 spin_unlock_bh(&conn->state_lock);
931 return;
932 }
933 atomic_set(&conn->connection_exit, 1);
934
935 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
936 spin_unlock_bh(&conn->state_lock);
937 iscsit_close_connection(conn);
938 return;
939 }
940
941 if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
942 spin_unlock_bh(&conn->state_lock);
943 return;
944 }
945
946 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
947 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
948 spin_unlock_bh(&conn->state_lock);
949
950 iscsit_handle_connection_cleanup(conn);
951}
952
953/*
954 * This is the simple function that makes the magic of
955 * sync and steering happen in the follow paradoxical order:
956 *
957 * 0) Receive conn->of_marker (bytes left until next OFMarker)
958 * bytes into an offload buffer. When we pass the exact number
959 * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
960 * rx_data() will automatically receive the identical u32 marker
961 * values and store it in conn->of_marker_offset;
962 * 1) Now conn->of_marker_offset will contain the offset to the start
963 * of the next iSCSI PDU. Dump these remaining bytes into another
964 * offload buffer.
965 * 2) We are done!
966 * Next byte in the TCP stream will contain the next iSCSI PDU!
967 * Cool Huh?!
968 */
969int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
970{
971 /*
972 * Make sure the remaining bytes to next maker is a sane value.
973 */
974 if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
975 pr_err("Remaining bytes to OFMarker: %u exceeds"
976 " OFMarkInt bytes: %u.\n", conn->of_marker,
977 conn->conn_ops->OFMarkInt * 4);
978 return -1;
979 }
980
981 pr_debug("Advancing %u bytes in TCP stream to get to the"
982 " next OFMarker.\n", conn->of_marker);
983
984 if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
985 return -1;
986
987 /*
988 * Make sure the offset marker we retrived is a valid value.
989 */
990 if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
991 conn->conn_ops->MaxRecvDataSegmentLength)) {
992 pr_err("OfMarker offset value: %u exceeds limit.\n",
993 conn->of_marker_offset);
994 return -1;
995 }
996
997 pr_debug("Discarding %u bytes of TCP stream to get to the"
998 " next iSCSI Opcode.\n", conn->of_marker_offset);
999
1000 if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
1001 return -1;
1002
1003 return 0;
1004}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 000000000000..21acc9a06376
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,15 @@
1#ifndef ISCSI_TARGET_ERL0_H
2#define ISCSI_TARGET_ERL0_H
3
4extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
5extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
6extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
7extern void iscsit_start_time2retain_handler(struct iscsi_session *);
8extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
9extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
10extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
11extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
12extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
13extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
14
15#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 000000000000..980650792cf6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1299 @@
1/*******************************************************************************
2 * This file contains error recovery level one used by the iSCSI Target driver.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/list.h>
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_datain_values.h"
29#include "iscsi_target_device.h"
30#include "iscsi_target_tpg.h"
31#include "iscsi_target_util.h"
32#include "iscsi_target_erl0.h"
33#include "iscsi_target_erl1.h"
34#include "iscsi_target_erl2.h"
35#include "iscsi_target.h"
36
37#define OFFLOAD_BUF_SIZE 32768
38
39/*
40 * Used to dump excess datain payload for certain error recovery
41 * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
42 *
43 * dump_padding_digest denotes if padding and data digests need
44 * to be dumped.
45 */
46int iscsit_dump_data_payload(
47 struct iscsi_conn *conn,
48 u32 buf_len,
49 int dump_padding_digest)
50{
51 char *buf, pad_bytes[4];
52 int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
53 u32 length, padding, offset = 0, size;
54 struct kvec iov;
55
56 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
57
58 buf = kzalloc(length, GFP_ATOMIC);
59 if (!buf) {
60 pr_err("Unable to allocate %u bytes for offload"
61 " buffer.\n", length);
62 return -1;
63 }
64 memset(&iov, 0, sizeof(struct kvec));
65
66 while (offset < buf_len) {
67 size = ((offset + length) > buf_len) ?
68 (buf_len - offset) : length;
69
70 iov.iov_len = size;
71 iov.iov_base = buf;
72
73 rx_got = rx_data(conn, &iov, 1, size);
74 if (rx_got != size) {
75 ret = DATAOUT_CANNOT_RECOVER;
76 goto out;
77 }
78
79 offset += size;
80 }
81
82 if (!dump_padding_digest)
83 goto out;
84
85 padding = ((-buf_len) & 3);
86 if (padding != 0) {
87 iov.iov_len = padding;
88 iov.iov_base = pad_bytes;
89
90 rx_got = rx_data(conn, &iov, 1, padding);
91 if (rx_got != padding) {
92 ret = DATAOUT_CANNOT_RECOVER;
93 goto out;
94 }
95 }
96
97 if (conn->conn_ops->DataDigest) {
98 u32 data_crc;
99
100 iov.iov_len = ISCSI_CRC_LEN;
101 iov.iov_base = &data_crc;
102
103 rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
104 if (rx_got != ISCSI_CRC_LEN) {
105 ret = DATAOUT_CANNOT_RECOVER;
106 goto out;
107 }
108 }
109
110out:
111 kfree(buf);
112 return ret;
113}
114
115/*
116 * Used for retransmitting R2Ts from a R2T SNACK request.
117 */
118static int iscsit_send_recovery_r2t_for_snack(
119 struct iscsi_cmd *cmd,
120 struct iscsi_r2t *r2t)
121{
122 /*
123 * If the struct iscsi_r2t has not been sent yet, we can safely
124 * ignore retransmission
125 * of the R2TSN in question.
126 */
127 spin_lock_bh(&cmd->r2t_lock);
128 if (!r2t->sent_r2t) {
129 spin_unlock_bh(&cmd->r2t_lock);
130 return 0;
131 }
132 r2t->sent_r2t = 0;
133 spin_unlock_bh(&cmd->r2t_lock);
134
135 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
136
137 return 0;
138}
139
140static int iscsit_handle_r2t_snack(
141 struct iscsi_cmd *cmd,
142 unsigned char *buf,
143 u32 begrun,
144 u32 runlength)
145{
146 u32 last_r2tsn;
147 struct iscsi_r2t *r2t;
148
149 /*
150 * Make sure the initiator is not requesting retransmission
151 * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
152 */
153 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
154 (begrun <= cmd->acked_data_sn)) {
155 pr_err("ITT: 0x%08x, R2T SNACK requesting"
156 " retransmission of R2TSN: 0x%08x to 0x%08x but already"
157 " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
158 " protocol error.\n", cmd->init_task_tag, begrun,
159 (begrun + runlength), cmd->acked_data_sn);
160
161 return iscsit_add_reject_from_cmd(
162 ISCSI_REASON_PROTOCOL_ERROR,
163 1, 0, buf, cmd);
164 }
165
166 if (runlength) {
167 if ((begrun + runlength) > cmd->r2t_sn) {
168 pr_err("Command ITT: 0x%08x received R2T SNACK"
169 " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
170 " current R2TSN: 0x%08x, protocol error.\n",
171 cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
172 return iscsit_add_reject_from_cmd(
173 ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
174 }
175 last_r2tsn = (begrun + runlength);
176 } else
177 last_r2tsn = cmd->r2t_sn;
178
179 while (begrun < last_r2tsn) {
180 r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
181 if (!r2t)
182 return -1;
183 if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
184 return -1;
185
186 begrun++;
187 }
188
189 return 0;
190}
191
192/*
193 * Generates Offsets and NextBurstLength based on Begrun and Runlength
194 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
195 *
196 * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
197 *
198 * FIXME: How is this handled for a RData SNACK?
199 */
200int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
201 struct iscsi_cmd *cmd,
202 struct iscsi_datain_req *dr)
203{
204 u32 data_sn = 0, data_sn_count = 0;
205 u32 pdu_start = 0, seq_no = 0;
206 u32 begrun = dr->begrun;
207 struct iscsi_conn *conn = cmd->conn;
208
209 while (begrun > data_sn++) {
210 data_sn_count++;
211 if ((dr->next_burst_len +
212 conn->conn_ops->MaxRecvDataSegmentLength) <
213 conn->sess->sess_ops->MaxBurstLength) {
214 dr->read_data_done +=
215 conn->conn_ops->MaxRecvDataSegmentLength;
216 dr->next_burst_len +=
217 conn->conn_ops->MaxRecvDataSegmentLength;
218 } else {
219 dr->read_data_done +=
220 (conn->sess->sess_ops->MaxBurstLength -
221 dr->next_burst_len);
222 dr->next_burst_len = 0;
223 pdu_start += data_sn_count;
224 data_sn_count = 0;
225 seq_no++;
226 }
227 }
228
229 if (!conn->sess->sess_ops->DataPDUInOrder) {
230 cmd->seq_no = seq_no;
231 cmd->pdu_start = pdu_start;
232 cmd->pdu_send_order = data_sn_count;
233 }
234
235 return 0;
236}
237
238/*
239 * Generates Offsets and NextBurstLength based on Begrun and Runlength
240 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
241 *
242 * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
243 *
244 * FIXME: How is this handled for a RData SNACK?
245 */
246int iscsit_create_recovery_datain_values_datasequenceinorder_no(
247 struct iscsi_cmd *cmd,
248 struct iscsi_datain_req *dr)
249{
250 int found_seq = 0, i;
251 u32 data_sn, read_data_done = 0, seq_send_order = 0;
252 u32 begrun = dr->begrun;
253 u32 runlength = dr->runlength;
254 struct iscsi_conn *conn = cmd->conn;
255 struct iscsi_seq *first_seq = NULL, *seq = NULL;
256
257 if (!cmd->seq_list) {
258 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
259 return -1;
260 }
261
262 /*
263 * Calculate read_data_done for all sequences containing a
264 * first_datasn and last_datasn less than the BegRun.
265 *
266 * Locate the struct iscsi_seq the BegRun lies within and calculate
267 * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
268 *
269 * Also use struct iscsi_seq->seq_send_order to determine where to start.
270 */
271 for (i = 0; i < cmd->seq_count; i++) {
272 seq = &cmd->seq_list[i];
273
274 if (!seq->seq_send_order)
275 first_seq = seq;
276
277 /*
278 * No data has been transferred for this DataIN sequence, so the
279 * seq->first_datasn and seq->last_datasn have not been set.
280 */
281 if (!seq->sent) {
282#if 0
283 pr_err("Ignoring non-sent sequence 0x%08x ->"
284 " 0x%08x\n\n", seq->first_datasn,
285 seq->last_datasn);
286#endif
287 continue;
288 }
289
290 /*
291 * This DataIN sequence is precedes the received BegRun, add the
292 * total xfer_len of the sequence to read_data_done and reset
293 * seq->pdu_send_order.
294 */
295 if ((seq->first_datasn < begrun) &&
296 (seq->last_datasn < begrun)) {
297#if 0
298 pr_err("Pre BegRun sequence 0x%08x ->"
299 " 0x%08x\n", seq->first_datasn,
300 seq->last_datasn);
301#endif
302 read_data_done += cmd->seq_list[i].xfer_len;
303 seq->next_burst_len = seq->pdu_send_order = 0;
304 continue;
305 }
306
307 /*
308 * The BegRun lies within this DataIN sequence.
309 */
310 if ((seq->first_datasn <= begrun) &&
311 (seq->last_datasn >= begrun)) {
312#if 0
313 pr_err("Found sequence begrun: 0x%08x in"
314 " 0x%08x -> 0x%08x\n", begrun,
315 seq->first_datasn, seq->last_datasn);
316#endif
317 seq_send_order = seq->seq_send_order;
318 data_sn = seq->first_datasn;
319 seq->next_burst_len = seq->pdu_send_order = 0;
320 found_seq = 1;
321
322 /*
323 * For DataPDUInOrder=Yes, while the first DataSN of
324 * the sequence is less than the received BegRun, add
325 * the MaxRecvDataSegmentLength to read_data_done and
326 * to the sequence's next_burst_len;
327 *
328 * For DataPDUInOrder=No, while the first DataSN of the
329 * sequence is less than the received BegRun, find the
330 * struct iscsi_pdu of the DataSN in question and add the
331 * MaxRecvDataSegmentLength to read_data_done and to the
332 * sequence's next_burst_len;
333 */
334 if (conn->sess->sess_ops->DataPDUInOrder) {
335 while (data_sn < begrun) {
336 seq->pdu_send_order++;
337 read_data_done +=
338 conn->conn_ops->MaxRecvDataSegmentLength;
339 seq->next_burst_len +=
340 conn->conn_ops->MaxRecvDataSegmentLength;
341 data_sn++;
342 }
343 } else {
344 int j;
345 struct iscsi_pdu *pdu;
346
347 while (data_sn < begrun) {
348 seq->pdu_send_order++;
349
350 for (j = 0; j < seq->pdu_count; j++) {
351 pdu = &cmd->pdu_list[
352 seq->pdu_start + j];
353 if (pdu->data_sn == data_sn) {
354 read_data_done +=
355 pdu->length;
356 seq->next_burst_len +=
357 pdu->length;
358 }
359 }
360 data_sn++;
361 }
362 }
363 continue;
364 }
365
366 /*
367 * This DataIN sequence is larger than the received BegRun,
368 * reset seq->pdu_send_order and continue.
369 */
370 if ((seq->first_datasn > begrun) ||
371 (seq->last_datasn > begrun)) {
372#if 0
373 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
374 seq->first_datasn, seq->last_datasn);
375#endif
376 seq->next_burst_len = seq->pdu_send_order = 0;
377 continue;
378 }
379 }
380
381 if (!found_seq) {
382 if (!begrun) {
383 if (!first_seq) {
384 pr_err("ITT: 0x%08x, Begrun: 0x%08x"
385 " but first_seq is NULL\n",
386 cmd->init_task_tag, begrun);
387 return -1;
388 }
389 seq_send_order = first_seq->seq_send_order;
390 seq->next_burst_len = seq->pdu_send_order = 0;
391 goto done;
392 }
393
394 pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
395 " BegRun: 0x%08x, RunLength: 0x%08x while"
396 " DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
397 cmd->init_task_tag, begrun, runlength,
398 (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
399 return -1;
400 }
401
402done:
403 dr->read_data_done = read_data_done;
404 dr->seq_send_order = seq_send_order;
405
406 return 0;
407}
408
409static int iscsit_handle_recovery_datain(
410 struct iscsi_cmd *cmd,
411 unsigned char *buf,
412 u32 begrun,
413 u32 runlength)
414{
415 struct iscsi_conn *conn = cmd->conn;
416 struct iscsi_datain_req *dr;
417 struct se_cmd *se_cmd = &cmd->se_cmd;
418
419 if (!atomic_read(&se_cmd->t_transport_complete)) {
420 pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
421 cmd->init_task_tag);
422 return 0;
423 }
424
425 /*
426 * Make sure the initiator is not requesting retransmission
427 * of DataSNs already acknowledged by a Data ACK SNACK.
428 */
429 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
430 (begrun <= cmd->acked_data_sn)) {
431 pr_err("ITT: 0x%08x, Data SNACK requesting"
432 " retransmission of DataSN: 0x%08x to 0x%08x but"
433 " already acked to DataSN: 0x%08x by Data ACK SNACK,"
434 " protocol error.\n", cmd->init_task_tag, begrun,
435 (begrun + runlength), cmd->acked_data_sn);
436
437 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
438 1, 0, buf, cmd);
439 }
440
441 /*
442 * Make sure BegRun and RunLength in the Data SNACK are sane.
443 * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
444 */
445 if ((begrun + runlength) > (cmd->data_sn - 1)) {
446 pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
447 ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
448 begrun, runlength, (cmd->data_sn - 1));
449 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
450 1, 0, buf, cmd);
451 }
452
453 dr = iscsit_allocate_datain_req();
454 if (!dr)
455 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
456 1, 0, buf, cmd);
457
458 dr->data_sn = dr->begrun = begrun;
459 dr->runlength = runlength;
460 dr->generate_recovery_values = 1;
461 dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
462
463 iscsit_attach_datain_req(cmd, dr);
464
465 cmd->i_state = ISTATE_SEND_DATAIN;
466 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
467
468 return 0;
469}
470
471int iscsit_handle_recovery_datain_or_r2t(
472 struct iscsi_conn *conn,
473 unsigned char *buf,
474 u32 init_task_tag,
475 u32 targ_xfer_tag,
476 u32 begrun,
477 u32 runlength)
478{
479 struct iscsi_cmd *cmd;
480
481 cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
482 if (!cmd)
483 return 0;
484
485 /*
486 * FIXME: This will not work for bidi commands.
487 */
488 switch (cmd->data_direction) {
489 case DMA_TO_DEVICE:
490 return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
491 case DMA_FROM_DEVICE:
492 return iscsit_handle_recovery_datain(cmd, buf, begrun,
493 runlength);
494 default:
495 pr_err("Unknown cmd->data_direction: 0x%02x\n",
496 cmd->data_direction);
497 return -1;
498 }
499
500 return 0;
501}
502
503/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
504int iscsit_handle_status_snack(
505 struct iscsi_conn *conn,
506 u32 init_task_tag,
507 u32 targ_xfer_tag,
508 u32 begrun,
509 u32 runlength)
510{
511 struct iscsi_cmd *cmd = NULL;
512 u32 last_statsn;
513 int found_cmd;
514
515 if (conn->exp_statsn > begrun) {
516 pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
517 " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
518 " %hu.\n", begrun, runlength, conn->exp_statsn,
519 conn->cid);
520 return 0;
521 }
522
523 last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
524
525 while (begrun < last_statsn) {
526 found_cmd = 0;
527
528 spin_lock_bh(&conn->cmd_lock);
529 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
530 if (cmd->stat_sn == begrun) {
531 found_cmd = 1;
532 break;
533 }
534 }
535 spin_unlock_bh(&conn->cmd_lock);
536
537 if (!found_cmd) {
538 pr_err("Unable to find StatSN: 0x%08x for"
539 " a Status SNACK, assuming this was a"
540 " protactic SNACK for an untransmitted"
541 " StatSN, ignoring.\n", begrun);
542 begrun++;
543 continue;
544 }
545
546 spin_lock_bh(&cmd->istate_lock);
547 if (cmd->i_state == ISTATE_SEND_DATAIN) {
548 spin_unlock_bh(&cmd->istate_lock);
549 pr_err("Ignoring Status SNACK for BegRun:"
550 " 0x%08x, RunLength: 0x%08x, assuming this was"
551 " a protactic SNACK for an untransmitted"
552 " StatSN\n", begrun, runlength);
553 begrun++;
554 continue;
555 }
556 spin_unlock_bh(&cmd->istate_lock);
557
558 cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
559 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
560 begrun++;
561 }
562
563 return 0;
564}
565
566int iscsit_handle_data_ack(
567 struct iscsi_conn *conn,
568 u32 targ_xfer_tag,
569 u32 begrun,
570 u32 runlength)
571{
572 struct iscsi_cmd *cmd = NULL;
573
574 cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
575 if (!cmd) {
576 pr_err("Data ACK SNACK for TTT: 0x%08x is"
577 " invalid.\n", targ_xfer_tag);
578 return -1;
579 }
580
581 if (begrun <= cmd->acked_data_sn) {
582 pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
583 " less than the already acked DataSN: 0x%08x.\n",
584 cmd->init_task_tag, begrun, cmd->acked_data_sn);
585 return -1;
586 }
587
588 /*
589 * For Data ACK SNACK, BegRun is the next expected DataSN.
590 * (see iSCSI v19: 10.16.6)
591 */
592 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
593 cmd->acked_data_sn = (begrun - 1);
594
595 pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
596 " updated acked DataSN to 0x%08x.\n",
597 cmd->init_task_tag, cmd->acked_data_sn);
598
599 return 0;
600}
601
602static int iscsit_send_recovery_r2t(
603 struct iscsi_cmd *cmd,
604 u32 offset,
605 u32 xfer_len)
606{
607 int ret;
608
609 spin_lock_bh(&cmd->r2t_lock);
610 ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
611 spin_unlock_bh(&cmd->r2t_lock);
612
613 return ret;
614}
615
616int iscsit_dataout_datapduinorder_no_fbit(
617 struct iscsi_cmd *cmd,
618 struct iscsi_pdu *pdu)
619{
620 int i, send_recovery_r2t = 0, recovery = 0;
621 u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
622 struct iscsi_conn *conn = cmd->conn;
623 struct iscsi_pdu *first_pdu = NULL;
624
625 /*
626 * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
627 * of the DataOUT sequence.
628 */
629 if (conn->sess->sess_ops->DataSequenceInOrder) {
630 for (i = 0; i < cmd->pdu_count; i++) {
631 if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
632 if (!first_pdu)
633 first_pdu = &cmd->pdu_list[i];
634 xfer_len += cmd->pdu_list[i].length;
635 pdu_count++;
636 } else if (pdu_count)
637 break;
638 }
639 } else {
640 struct iscsi_seq *seq = cmd->seq_ptr;
641
642 first_pdu = &cmd->pdu_list[seq->pdu_start];
643 pdu_count = seq->pdu_count;
644 }
645
646 if (!first_pdu || !pdu_count)
647 return DATAOUT_CANNOT_RECOVER;
648
649 /*
650 * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
651 * The following ugly logic does batching of not received PDUs.
652 */
653 for (i = 0; i < pdu_count; i++) {
654 if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
655 if (!send_recovery_r2t)
656 continue;
657
658 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
659 return DATAOUT_CANNOT_RECOVER;
660
661 send_recovery_r2t = length = offset = 0;
662 continue;
663 }
664 /*
665 * Set recovery = 1 for any missing, CRC failed, or timed
666 * out PDUs to let the DataOUT logic know that this sequence
667 * has not been completed yet.
668 *
669 * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
670 * We assume if the PDU either failed CRC or timed out
671 * that a Recovery R2T has already been sent.
672 */
673 recovery = 1;
674
675 if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
676 continue;
677
678 if (!offset)
679 offset = first_pdu[i].offset;
680 length += first_pdu[i].length;
681
682 send_recovery_r2t = 1;
683 }
684
685 if (send_recovery_r2t)
686 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
687 return DATAOUT_CANNOT_RECOVER;
688
689 return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
690}
691
692static int iscsit_recalculate_dataout_values(
693 struct iscsi_cmd *cmd,
694 u32 pdu_offset,
695 u32 pdu_length,
696 u32 *r2t_offset,
697 u32 *r2t_length)
698{
699 int i;
700 struct iscsi_conn *conn = cmd->conn;
701 struct iscsi_pdu *pdu = NULL;
702
703 if (conn->sess->sess_ops->DataSequenceInOrder) {
704 cmd->data_sn = 0;
705
706 if (conn->sess->sess_ops->DataPDUInOrder) {
707 *r2t_offset = cmd->write_data_done;
708 *r2t_length = (cmd->seq_end_offset -
709 cmd->write_data_done);
710 return 0;
711 }
712
713 *r2t_offset = cmd->seq_start_offset;
714 *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
715
716 for (i = 0; i < cmd->pdu_count; i++) {
717 pdu = &cmd->pdu_list[i];
718
719 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
720 continue;
721
722 if ((pdu->offset >= cmd->seq_start_offset) &&
723 ((pdu->offset + pdu->length) <=
724 cmd->seq_end_offset)) {
725 if (!cmd->unsolicited_data)
726 cmd->next_burst_len -= pdu->length;
727 else
728 cmd->first_burst_len -= pdu->length;
729
730 cmd->write_data_done -= pdu->length;
731 pdu->status = ISCSI_PDU_NOT_RECEIVED;
732 }
733 }
734 } else {
735 struct iscsi_seq *seq = NULL;
736
737 seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
738 if (!seq)
739 return -1;
740
741 *r2t_offset = seq->orig_offset;
742 *r2t_length = seq->xfer_len;
743
744 cmd->write_data_done -= (seq->offset - seq->orig_offset);
745 if (cmd->immediate_data)
746 cmd->first_burst_len = cmd->write_data_done;
747
748 seq->data_sn = 0;
749 seq->offset = seq->orig_offset;
750 seq->next_burst_len = 0;
751 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
752
753 if (conn->sess->sess_ops->DataPDUInOrder)
754 return 0;
755
756 for (i = 0; i < seq->pdu_count; i++) {
757 pdu = &cmd->pdu_list[i+seq->pdu_start];
758
759 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
760 continue;
761
762 pdu->status = ISCSI_PDU_NOT_RECEIVED;
763 }
764 }
765
766 return 0;
767}
768
769int iscsit_recover_dataout_sequence(
770 struct iscsi_cmd *cmd,
771 u32 pdu_offset,
772 u32 pdu_length)
773{
774 u32 r2t_length = 0, r2t_offset = 0;
775
776 spin_lock_bh(&cmd->istate_lock);
777 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
778 spin_unlock_bh(&cmd->istate_lock);
779
780 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
781 &r2t_offset, &r2t_length) < 0)
782 return DATAOUT_CANNOT_RECOVER;
783
784 iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
785
786 return DATAOUT_WITHIN_COMMAND_RECOVERY;
787}
788
789static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
790{
791 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
792
793 ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
794 if (!ooo_cmdsn) {
795 pr_err("Unable to allocate memory for"
796 " struct iscsi_ooo_cmdsn.\n");
797 return NULL;
798 }
799 INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
800
801 return ooo_cmdsn;
802}
803
804/*
805 * Called with sess->cmdsn_mutex held.
806 */
807static int iscsit_attach_ooo_cmdsn(
808 struct iscsi_session *sess,
809 struct iscsi_ooo_cmdsn *ooo_cmdsn)
810{
811 struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
812 /*
813 * We attach the struct iscsi_ooo_cmdsn entry to the out of order
814 * list in increasing CmdSN order.
815 * This allows iscsi_execute_ooo_cmdsns() to detect any
816 * additional CmdSN holes while performing delayed execution.
817 */
818 if (list_empty(&sess->sess_ooo_cmdsn_list))
819 list_add_tail(&ooo_cmdsn->ooo_list,
820 &sess->sess_ooo_cmdsn_list);
821 else {
822 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
823 typeof(*ooo_tail), ooo_list);
824 /*
825 * CmdSN is greater than the tail of the list.
826 */
827 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
828 list_add_tail(&ooo_cmdsn->ooo_list,
829 &sess->sess_ooo_cmdsn_list);
830 else {
831 /*
832 * CmdSN is either lower than the head, or somewhere
833 * in the middle.
834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) {
837 while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue;
839
840 list_add(&ooo_cmdsn->ooo_list,
841 &ooo_tmp->ooo_list);
842 break;
843 }
844 }
845 }
846
847 return 0;
848}
849
850/*
851 * Removes an struct iscsi_ooo_cmdsn from a session's list,
852 * called with struct iscsi_session->cmdsn_mutex held.
853 */
854void iscsit_remove_ooo_cmdsn(
855 struct iscsi_session *sess,
856 struct iscsi_ooo_cmdsn *ooo_cmdsn)
857{
858 list_del(&ooo_cmdsn->ooo_list);
859 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
860}
861
862void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
863{
864 struct iscsi_ooo_cmdsn *ooo_cmdsn;
865 struct iscsi_session *sess = conn->sess;
866
867 mutex_lock(&sess->cmdsn_mutex);
868 list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
869 if (ooo_cmdsn->cid != conn->cid)
870 continue;
871
872 ooo_cmdsn->cmd = NULL;
873 }
874 mutex_unlock(&sess->cmdsn_mutex);
875}
876
877/*
878 * Called with sess->cmdsn_mutex held.
879 */
880int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
881{
882 int ooo_count = 0;
883 struct iscsi_cmd *cmd = NULL;
884 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
885
886 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
887 &sess->sess_ooo_cmdsn_list, ooo_list) {
888 if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
889 continue;
890
891 if (!ooo_cmdsn->cmd) {
892 sess->exp_cmd_sn++;
893 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
894 continue;
895 }
896
897 cmd = ooo_cmdsn->cmd;
898 cmd->i_state = cmd->deferred_i_state;
899 ooo_count++;
900 sess->exp_cmd_sn++;
901 pr_debug("Executing out of order CmdSN: 0x%08x,"
902 " incremented ExpCmdSN to 0x%08x.\n",
903 cmd->cmd_sn, sess->exp_cmd_sn);
904
905 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
906
907 if (iscsit_execute_cmd(cmd, 1) < 0)
908 return -1;
909
910 continue;
911 }
912
913 return ooo_count;
914}
915
916/*
917 * Called either:
918 *
919 * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
920 * or iscsi_check_received_cmdsn().
921 * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
922 * for immediate commands.
923 */
924int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
925{
926 struct se_cmd *se_cmd = &cmd->se_cmd;
927 int lr = 0;
928
929 spin_lock_bh(&cmd->istate_lock);
930 if (ooo)
931 cmd->cmd_flags &= ~ICF_OOO_CMDSN;
932
933 switch (cmd->iscsi_opcode) {
934 case ISCSI_OP_SCSI_CMD:
935 /*
936 * Go ahead and send the CHECK_CONDITION status for
937 * any SCSI CDB exceptions that may have occurred, also
938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
939 */
940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
941 if (se_cmd->se_cmd_flags &
942 SCF_SCSI_RESERVATION_CONFLICT) {
943 cmd->i_state = ISTATE_SEND_STATUS;
944 spin_unlock_bh(&cmd->istate_lock);
945 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
946 cmd->i_state);
947 return 0;
948 }
949 spin_unlock_bh(&cmd->istate_lock);
950 /*
951 * Determine if delayed TASK_ABORTED status for WRITEs
952 * should be sent now if no unsolicited data out
953 * payloads are expected, or if the delayed status
954 * should be sent after unsolicited data out with
955 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
956 */
957 if (transport_check_aborted_status(se_cmd,
958 (cmd->unsolicited_data == 0)) != 0)
959 return 0;
960 /*
961 * Otherwise send CHECK_CONDITION and sense for
962 * exception
963 */
964 return transport_send_check_condition_and_sense(se_cmd,
965 se_cmd->scsi_sense_reason, 0);
966 }
967 /*
968 * Special case for delayed CmdSN with Immediate
969 * Data and/or Unsolicited Data Out attached.
970 */
971 if (cmd->immediate_data) {
972 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
973 spin_unlock_bh(&cmd->istate_lock);
974 return transport_generic_handle_data(
975 &cmd->se_cmd);
976 }
977 spin_unlock_bh(&cmd->istate_lock);
978
979 if (!(cmd->cmd_flags &
980 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
981 /*
982 * Send the delayed TASK_ABORTED status for
983 * WRITEs if no more unsolicitied data is
984 * expected.
985 */
986 if (transport_check_aborted_status(se_cmd, 1)
987 != 0)
988 return 0;
989
990 iscsit_set_dataout_sequence_values(cmd);
991 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
992 }
993 return 0;
994 }
995 /*
996 * The default handler.
997 */
998 spin_unlock_bh(&cmd->istate_lock);
999
1000 if ((cmd->data_direction == DMA_TO_DEVICE) &&
1001 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
1002 /*
1003 * Send the delayed TASK_ABORTED status for WRITEs if
1004 * no more nsolicitied data is expected.
1005 */
1006 if (transport_check_aborted_status(se_cmd, 1) != 0)
1007 return 0;
1008
1009 iscsit_set_dataout_sequence_values(cmd);
1010 spin_lock_bh(&cmd->dataout_timeout_lock);
1011 iscsit_start_dataout_timer(cmd, cmd->conn);
1012 spin_unlock_bh(&cmd->dataout_timeout_lock);
1013 }
1014 return transport_handle_cdb_direct(&cmd->se_cmd);
1015
1016 case ISCSI_OP_NOOP_OUT:
1017 case ISCSI_OP_TEXT:
1018 spin_unlock_bh(&cmd->istate_lock);
1019 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1020 break;
1021 case ISCSI_OP_SCSI_TMFUNC:
1022 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
1023 spin_unlock_bh(&cmd->istate_lock);
1024 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
1025 cmd->i_state);
1026 return 0;
1027 }
1028 spin_unlock_bh(&cmd->istate_lock);
1029
1030 return transport_generic_handle_tmr(&cmd->se_cmd);
1031 case ISCSI_OP_LOGOUT:
1032 spin_unlock_bh(&cmd->istate_lock);
1033 switch (cmd->logout_reason) {
1034 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
1035 lr = iscsit_logout_closesession(cmd, cmd->conn);
1036 break;
1037 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
1038 lr = iscsit_logout_closeconnection(cmd, cmd->conn);
1039 break;
1040 case ISCSI_LOGOUT_REASON_RECOVERY:
1041 lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
1042 break;
1043 default:
1044 pr_err("Unknown iSCSI Logout Request Code:"
1045 " 0x%02x\n", cmd->logout_reason);
1046 return -1;
1047 }
1048
1049 return lr;
1050 default:
1051 spin_unlock_bh(&cmd->istate_lock);
1052 pr_err("Cannot perform out of order execution for"
1053 " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
1054 return -1;
1055 }
1056
1057 return 0;
1058}
1059
1060void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
1061{
1062 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
1063
1064 mutex_lock(&sess->cmdsn_mutex);
1065 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
1066 &sess->sess_ooo_cmdsn_list, ooo_list) {
1067
1068 list_del(&ooo_cmdsn->ooo_list);
1069 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
1070 }
1071 mutex_unlock(&sess->cmdsn_mutex);
1072}
1073
1074int iscsit_handle_ooo_cmdsn(
1075 struct iscsi_session *sess,
1076 struct iscsi_cmd *cmd,
1077 u32 cmdsn)
1078{
1079 int batch = 0;
1080 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
1081
1082 cmd->deferred_i_state = cmd->i_state;
1083 cmd->i_state = ISTATE_DEFERRED_CMD;
1084 cmd->cmd_flags |= ICF_OOO_CMDSN;
1085
1086 if (list_empty(&sess->sess_ooo_cmdsn_list))
1087 batch = 1;
1088 else {
1089 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
1090 typeof(*ooo_tail), ooo_list);
1091 if (ooo_tail->cmdsn != (cmdsn - 1))
1092 batch = 1;
1093 }
1094
1095 ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
1096 if (!ooo_cmdsn)
1097 return CMDSN_ERROR_CANNOT_RECOVER;
1098
1099 ooo_cmdsn->cmd = cmd;
1100 ooo_cmdsn->batch_count = (batch) ?
1101 (cmdsn - sess->exp_cmd_sn) : 1;
1102 ooo_cmdsn->cid = cmd->conn->cid;
1103 ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
1104 ooo_cmdsn->cmdsn = cmdsn;
1105
1106 if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
1107 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
1108 return CMDSN_ERROR_CANNOT_RECOVER;
1109 }
1110
1111 return CMDSN_HIGHER_THAN_EXP;
1112}
1113
1114static int iscsit_set_dataout_timeout_values(
1115 struct iscsi_cmd *cmd,
1116 u32 *offset,
1117 u32 *length)
1118{
1119 struct iscsi_conn *conn = cmd->conn;
1120 struct iscsi_r2t *r2t;
1121
1122 if (cmd->unsolicited_data) {
1123 *offset = 0;
1124 *length = (conn->sess->sess_ops->FirstBurstLength >
1125 cmd->data_length) ?
1126 cmd->data_length :
1127 conn->sess->sess_ops->FirstBurstLength;
1128 return 0;
1129 }
1130
1131 spin_lock_bh(&cmd->r2t_lock);
1132 if (list_empty(&cmd->cmd_r2t_list)) {
1133 pr_err("cmd->cmd_r2t_list is empty!\n");
1134 spin_unlock_bh(&cmd->r2t_lock);
1135 return -1;
1136 }
1137
1138 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
1139 if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
1140 *offset = r2t->offset;
1141 *length = r2t->xfer_len;
1142 spin_unlock_bh(&cmd->r2t_lock);
1143 return 0;
1144 }
1145 }
1146 spin_unlock_bh(&cmd->r2t_lock);
1147
1148 pr_err("Unable to locate any incomplete DataOUT"
1149 " sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
1150
1151 return -1;
1152}
1153
1154/*
1155 * NOTE: Called from interrupt (timer) context.
1156 */
1157static void iscsit_handle_dataout_timeout(unsigned long data)
1158{
1159 u32 pdu_length = 0, pdu_offset = 0;
1160 u32 r2t_length = 0, r2t_offset = 0;
1161 struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
1162 struct iscsi_conn *conn = cmd->conn;
1163 struct iscsi_session *sess = NULL;
1164 struct iscsi_node_attrib *na;
1165
1166 iscsit_inc_conn_usage_count(conn);
1167
1168 spin_lock_bh(&cmd->dataout_timeout_lock);
1169 if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
1170 spin_unlock_bh(&cmd->dataout_timeout_lock);
1171 iscsit_dec_conn_usage_count(conn);
1172 return;
1173 }
1174 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
1175 sess = conn->sess;
1176 na = iscsit_tpg_get_node_attrib(sess);
1177
1178 if (!sess->sess_ops->ErrorRecoveryLevel) {
1179 pr_debug("Unable to recover from DataOut timeout while"
1180 " in ERL=0.\n");
1181 goto failure;
1182 }
1183
1184 if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
1185 pr_debug("Command ITT: 0x%08x exceeded max retries"
1186 " for DataOUT timeout %u, closing iSCSI connection.\n",
1187 cmd->init_task_tag, na->dataout_timeout_retries);
1188 goto failure;
1189 }
1190
1191 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
1192
1193 if (conn->sess->sess_ops->DataSequenceInOrder) {
1194 if (conn->sess->sess_ops->DataPDUInOrder) {
1195 pdu_offset = cmd->write_data_done;
1196 if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
1197 cmd->next_burst_len)) > cmd->data_length)
1198 pdu_length = (cmd->data_length -
1199 cmd->write_data_done);
1200 else
1201 pdu_length = (conn->sess->sess_ops->MaxBurstLength -
1202 cmd->next_burst_len);
1203 } else {
1204 pdu_offset = cmd->seq_start_offset;
1205 pdu_length = (cmd->seq_end_offset -
1206 cmd->seq_start_offset);
1207 }
1208 } else {
1209 if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
1210 &pdu_length) < 0)
1211 goto failure;
1212 }
1213
1214 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
1215 &r2t_offset, &r2t_length) < 0)
1216 goto failure;
1217
1218 pr_debug("Command ITT: 0x%08x timed out waiting for"
1219 " completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
1220 cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
1221 "", r2t_offset, r2t_length);
1222
1223 if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
1224 goto failure;
1225
1226 iscsit_start_dataout_timer(cmd, conn);
1227 spin_unlock_bh(&cmd->dataout_timeout_lock);
1228 iscsit_dec_conn_usage_count(conn);
1229
1230 return;
1231
1232failure:
1233 spin_unlock_bh(&cmd->dataout_timeout_lock);
1234 iscsit_cause_connection_reinstatement(conn, 0);
1235 iscsit_dec_conn_usage_count(conn);
1236}
1237
1238void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
1239{
1240 struct iscsi_conn *conn = cmd->conn;
1241 struct iscsi_session *sess = conn->sess;
1242 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
1243
1244 spin_lock_bh(&cmd->dataout_timeout_lock);
1245 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
1246 spin_unlock_bh(&cmd->dataout_timeout_lock);
1247 return;
1248 }
1249
1250 mod_timer(&cmd->dataout_timer,
1251 (get_jiffies_64() + na->dataout_timeout * HZ));
1252 pr_debug("Updated DataOUT timer for ITT: 0x%08x",
1253 cmd->init_task_tag);
1254 spin_unlock_bh(&cmd->dataout_timeout_lock);
1255}
1256
1257/*
1258 * Called with cmd->dataout_timeout_lock held.
1259 */
1260void iscsit_start_dataout_timer(
1261 struct iscsi_cmd *cmd,
1262 struct iscsi_conn *conn)
1263{
1264 struct iscsi_session *sess = conn->sess;
1265 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
1266
1267 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
1268 return;
1269
1270 pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
1271 " CID: %hu.\n", cmd->init_task_tag, conn->cid);
1272
1273 init_timer(&cmd->dataout_timer);
1274 cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
1275 cmd->dataout_timer.data = (unsigned long)cmd;
1276 cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
1277 cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
1278 cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
1279 add_timer(&cmd->dataout_timer);
1280}
1281
1282void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
1283{
1284 spin_lock_bh(&cmd->dataout_timeout_lock);
1285 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
1286 spin_unlock_bh(&cmd->dataout_timeout_lock);
1287 return;
1288 }
1289 cmd->dataout_timer_flags |= ISCSI_TF_STOP;
1290 spin_unlock_bh(&cmd->dataout_timeout_lock);
1291
1292 del_timer_sync(&cmd->dataout_timer);
1293
1294 spin_lock_bh(&cmd->dataout_timeout_lock);
1295 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
1296 pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
1297 cmd->init_task_tag);
1298 spin_unlock_bh(&cmd->dataout_timeout_lock);
1299}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 000000000000..85e67e29de6b
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
1#ifndef ISCSI_TARGET_ERL1_H
2#define ISCSI_TARGET_ERL1_H
3
4extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
5extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
6 struct iscsi_cmd *, struct iscsi_datain_req *);
7extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
8 struct iscsi_cmd *, struct iscsi_datain_req *);
9extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
10 u32, u32, u32, u32);
11extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
12 u32, u32);
13extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
14extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
15extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
16extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
17extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
18extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
19extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
20extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
21extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
22extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
23extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
24extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
25
26#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 000000000000..91a4d170bda4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,474 @@
1/*******************************************************************************
2 * This file contains error recovery level two functions used by
3 * the iSCSI Target driver.
4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_datain_values.h"
28#include "iscsi_target_util.h"
29#include "iscsi_target_erl0.h"
30#include "iscsi_target_erl1.h"
31#include "iscsi_target_erl2.h"
32#include "iscsi_target.h"
33
34/*
35 * FIXME: Does RData SNACK apply here as well?
36 */
37void iscsit_create_conn_recovery_datain_values(
38 struct iscsi_cmd *cmd,
39 u32 exp_data_sn)
40{
41 u32 data_sn = 0;
42 struct iscsi_conn *conn = cmd->conn;
43
44 cmd->next_burst_len = 0;
45 cmd->read_data_done = 0;
46
47 while (exp_data_sn > data_sn) {
48 if ((cmd->next_burst_len +
49 conn->conn_ops->MaxRecvDataSegmentLength) <
50 conn->sess->sess_ops->MaxBurstLength) {
51 cmd->read_data_done +=
52 conn->conn_ops->MaxRecvDataSegmentLength;
53 cmd->next_burst_len +=
54 conn->conn_ops->MaxRecvDataSegmentLength;
55 } else {
56 cmd->read_data_done +=
57 (conn->sess->sess_ops->MaxBurstLength -
58 cmd->next_burst_len);
59 cmd->next_burst_len = 0;
60 }
61 data_sn++;
62 }
63}
64
65void iscsit_create_conn_recovery_dataout_values(
66 struct iscsi_cmd *cmd)
67{
68 u32 write_data_done = 0;
69 struct iscsi_conn *conn = cmd->conn;
70
71 cmd->data_sn = 0;
72 cmd->next_burst_len = 0;
73
74 while (cmd->write_data_done > write_data_done) {
75 if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
76 cmd->write_data_done)
77 write_data_done += conn->sess->sess_ops->MaxBurstLength;
78 else
79 break;
80 }
81
82 cmd->write_data_done = write_data_done;
83}
84
85static int iscsit_attach_active_connection_recovery_entry(
86 struct iscsi_session *sess,
87 struct iscsi_conn_recovery *cr)
88{
89 spin_lock(&sess->cr_a_lock);
90 list_add_tail(&cr->cr_list, &sess->cr_active_list);
91 spin_unlock(&sess->cr_a_lock);
92
93 return 0;
94}
95
96static int iscsit_attach_inactive_connection_recovery_entry(
97 struct iscsi_session *sess,
98 struct iscsi_conn_recovery *cr)
99{
100 spin_lock(&sess->cr_i_lock);
101 list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
102
103 sess->conn_recovery_count++;
104 pr_debug("Incremented connection recovery count to %u for"
105 " SID: %u\n", sess->conn_recovery_count, sess->sid);
106 spin_unlock(&sess->cr_i_lock);
107
108 return 0;
109}
110
111struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
112 struct iscsi_session *sess,
113 u16 cid)
114{
115 struct iscsi_conn_recovery *cr;
116
117 spin_lock(&sess->cr_i_lock);
118 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
119 if (cr->cid == cid) {
120 spin_unlock(&sess->cr_i_lock);
121 return cr;
122 }
123 }
124 spin_unlock(&sess->cr_i_lock);
125
126 return NULL;
127}
128
129void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
130{
131 struct iscsi_cmd *cmd, *cmd_tmp;
132 struct iscsi_conn_recovery *cr, *cr_tmp;
133
134 spin_lock(&sess->cr_a_lock);
135 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
136 list_del(&cr->cr_list);
137 spin_unlock(&sess->cr_a_lock);
138
139 spin_lock(&cr->conn_recovery_cmd_lock);
140 list_for_each_entry_safe(cmd, cmd_tmp,
141 &cr->conn_recovery_cmd_list, i_list) {
142
143 list_del(&cmd->i_list);
144 cmd->conn = NULL;
145 spin_unlock(&cr->conn_recovery_cmd_lock);
146 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
147 !(cmd->se_cmd.transport_wait_for_tasks))
148 iscsit_release_cmd(cmd);
149 else
150 cmd->se_cmd.transport_wait_for_tasks(
151 &cmd->se_cmd, 1, 1);
152 spin_lock(&cr->conn_recovery_cmd_lock);
153 }
154 spin_unlock(&cr->conn_recovery_cmd_lock);
155 spin_lock(&sess->cr_a_lock);
156
157 kfree(cr);
158 }
159 spin_unlock(&sess->cr_a_lock);
160
161 spin_lock(&sess->cr_i_lock);
162 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
163 list_del(&cr->cr_list);
164 spin_unlock(&sess->cr_i_lock);
165
166 spin_lock(&cr->conn_recovery_cmd_lock);
167 list_for_each_entry_safe(cmd, cmd_tmp,
168 &cr->conn_recovery_cmd_list, i_list) {
169
170 list_del(&cmd->i_list);
171 cmd->conn = NULL;
172 spin_unlock(&cr->conn_recovery_cmd_lock);
173 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
174 !(cmd->se_cmd.transport_wait_for_tasks))
175 iscsit_release_cmd(cmd);
176 else
177 cmd->se_cmd.transport_wait_for_tasks(
178 &cmd->se_cmd, 1, 1);
179 spin_lock(&cr->conn_recovery_cmd_lock);
180 }
181 spin_unlock(&cr->conn_recovery_cmd_lock);
182 spin_lock(&sess->cr_i_lock);
183
184 kfree(cr);
185 }
186 spin_unlock(&sess->cr_i_lock);
187}
188
189int iscsit_remove_active_connection_recovery_entry(
190 struct iscsi_conn_recovery *cr,
191 struct iscsi_session *sess)
192{
193 spin_lock(&sess->cr_a_lock);
194 list_del(&cr->cr_list);
195
196 sess->conn_recovery_count--;
197 pr_debug("Decremented connection recovery count to %u for"
198 " SID: %u\n", sess->conn_recovery_count, sess->sid);
199 spin_unlock(&sess->cr_a_lock);
200
201 kfree(cr);
202
203 return 0;
204}
205
206int iscsit_remove_inactive_connection_recovery_entry(
207 struct iscsi_conn_recovery *cr,
208 struct iscsi_session *sess)
209{
210 spin_lock(&sess->cr_i_lock);
211 list_del(&cr->cr_list);
212 spin_unlock(&sess->cr_i_lock);
213
214 return 0;
215}
216
217/*
218 * Called with cr->conn_recovery_cmd_lock help.
219 */
220int iscsit_remove_cmd_from_connection_recovery(
221 struct iscsi_cmd *cmd,
222 struct iscsi_session *sess)
223{
224 struct iscsi_conn_recovery *cr;
225
226 if (!cmd->cr) {
227 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
228 " is NULL!\n", cmd->init_task_tag);
229 BUG();
230 }
231 cr = cmd->cr;
232
233 list_del(&cmd->i_list);
234 return --cr->cmd_count;
235}
236
237void iscsit_discard_cr_cmds_by_expstatsn(
238 struct iscsi_conn_recovery *cr,
239 u32 exp_statsn)
240{
241 u32 dropped_count = 0;
242 struct iscsi_cmd *cmd, *cmd_tmp;
243 struct iscsi_session *sess = cr->sess;
244
245 spin_lock(&cr->conn_recovery_cmd_lock);
246 list_for_each_entry_safe(cmd, cmd_tmp,
247 &cr->conn_recovery_cmd_list, i_list) {
248
249 if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
250 (cmd->deferred_i_state != ISTATE_REMOVE)) ||
251 (cmd->stat_sn >= exp_statsn)) {
252 continue;
253 }
254
255 dropped_count++;
256 pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
257 " 0x%08x, CID: %hu.\n", cmd->init_task_tag,
258 cmd->stat_sn, cr->cid);
259
260 iscsit_remove_cmd_from_connection_recovery(cmd, sess);
261
262 spin_unlock(&cr->conn_recovery_cmd_lock);
263 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
264 !(cmd->se_cmd.transport_wait_for_tasks))
265 iscsit_release_cmd(cmd);
266 else
267 cmd->se_cmd.transport_wait_for_tasks(
268 &cmd->se_cmd, 1, 0);
269 spin_lock(&cr->conn_recovery_cmd_lock);
270 }
271 spin_unlock(&cr->conn_recovery_cmd_lock);
272
273 pr_debug("Dropped %u total acknowledged commands on"
274 " CID: %hu less than old ExpStatSN: 0x%08x\n",
275 dropped_count, cr->cid, exp_statsn);
276
277 if (!cr->cmd_count) {
278 pr_debug("No commands to be reassigned for failed"
279 " connection CID: %hu on SID: %u\n",
280 cr->cid, sess->sid);
281 iscsit_remove_inactive_connection_recovery_entry(cr, sess);
282 iscsit_attach_active_connection_recovery_entry(sess, cr);
283 pr_debug("iSCSI connection recovery successful for CID:"
284 " %hu on SID: %u\n", cr->cid, sess->sid);
285 iscsit_remove_active_connection_recovery_entry(cr, sess);
286 } else {
287 iscsit_remove_inactive_connection_recovery_entry(cr, sess);
288 iscsit_attach_active_connection_recovery_entry(sess, cr);
289 }
290}
291
292int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
293{
294 u32 dropped_count = 0;
295 struct iscsi_cmd *cmd, *cmd_tmp;
296 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
297 struct iscsi_session *sess = conn->sess;
298
299 mutex_lock(&sess->cmdsn_mutex);
300 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
301 &sess->sess_ooo_cmdsn_list, ooo_list) {
302
303 if (ooo_cmdsn->cid != conn->cid)
304 continue;
305
306 dropped_count++;
307 pr_debug("Dropping unacknowledged CmdSN:"
308 " 0x%08x during connection recovery on CID: %hu\n",
309 ooo_cmdsn->cmdsn, conn->cid);
310 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
311 }
312 mutex_unlock(&sess->cmdsn_mutex);
313
314 spin_lock_bh(&conn->cmd_lock);
315 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
316 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
317 continue;
318
319 list_del(&cmd->i_list);
320
321 spin_unlock_bh(&conn->cmd_lock);
322 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
323 !(cmd->se_cmd.transport_wait_for_tasks))
324 iscsit_release_cmd(cmd);
325 else
326 cmd->se_cmd.transport_wait_for_tasks(
327 &cmd->se_cmd, 1, 1);
328 spin_lock_bh(&conn->cmd_lock);
329 }
330 spin_unlock_bh(&conn->cmd_lock);
331
332 pr_debug("Dropped %u total unacknowledged commands on CID:"
333 " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
334 sess->exp_cmd_sn);
335 return 0;
336}
337
338int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
339{
340 u32 cmd_count = 0;
341 struct iscsi_cmd *cmd, *cmd_tmp;
342 struct iscsi_conn_recovery *cr;
343
344 /*
345 * Allocate an struct iscsi_conn_recovery for this connection.
346 * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
347 * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
348 * connection's command list for connection recovery.
349 */
350 cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
351 if (!cr) {
352 pr_err("Unable to allocate memory for"
353 " struct iscsi_conn_recovery.\n");
354 return -1;
355 }
356 INIT_LIST_HEAD(&cr->cr_list);
357 INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
358 spin_lock_init(&cr->conn_recovery_cmd_lock);
359 /*
360 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
361 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
362 * list_del(&cmd->i_list); to release the command to the
363 * session pool and remove it from the connection's list.
364 *
365 * Also stop the DataOUT timer, which will be restarted after
366 * sending the TMR response.
367 */
368 spin_lock_bh(&conn->cmd_lock);
369 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
370
371 if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
372 (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
373 pr_debug("Not performing realligence on"
374 " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
375 " CID: %hu\n", cmd->iscsi_opcode,
376 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
377
378 list_del(&cmd->i_list);
379 spin_unlock_bh(&conn->cmd_lock);
380
381 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
382 !(cmd->se_cmd.transport_wait_for_tasks))
383 iscsit_release_cmd(cmd);
384 else
385 cmd->se_cmd.transport_wait_for_tasks(
386 &cmd->se_cmd, 1, 0);
387 spin_lock_bh(&conn->cmd_lock);
388 continue;
389 }
390
391 /*
392 * Special case where commands greater than or equal to
393 * the session's ExpCmdSN are attached to the connection
394 * list but not to the out of order CmdSN list. The one
395 * obvious case is when a command with immediate data
396 * attached must only check the CmdSN against ExpCmdSN
397 * after the data is received. The special case below
398 * is when the connection fails before data is received,
399 * but also may apply to other PDUs, so it has been
400 * made generic here.
401 */
402 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
403 (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
404 list_del(&cmd->i_list);
405 spin_unlock_bh(&conn->cmd_lock);
406
407 if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
408 !(cmd->se_cmd.transport_wait_for_tasks))
409 iscsit_release_cmd(cmd);
410 else
411 cmd->se_cmd.transport_wait_for_tasks(
412 &cmd->se_cmd, 1, 1);
413 spin_lock_bh(&conn->cmd_lock);
414 continue;
415 }
416
417 cmd_count++;
418 pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
419 " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
420 " realligence.\n", cmd->iscsi_opcode,
421 cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
422 conn->cid);
423
424 cmd->deferred_i_state = cmd->i_state;
425 cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
426
427 if (cmd->data_direction == DMA_TO_DEVICE)
428 iscsit_stop_dataout_timer(cmd);
429
430 cmd->sess = conn->sess;
431
432 list_del(&cmd->i_list);
433 spin_unlock_bh(&conn->cmd_lock);
434
435 iscsit_free_all_datain_reqs(cmd);
436
437 if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
438 cmd->se_cmd.transport_wait_for_tasks)
439 cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
440 0, 0);
441 /*
442 * Add the struct iscsi_cmd to the connection recovery cmd list
443 */
444 spin_lock(&cr->conn_recovery_cmd_lock);
445 list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
446 spin_unlock(&cr->conn_recovery_cmd_lock);
447
448 spin_lock_bh(&conn->cmd_lock);
449 cmd->cr = cr;
450 cmd->conn = NULL;
451 }
452 spin_unlock_bh(&conn->cmd_lock);
453 /*
454 * Fill in the various values in the preallocated struct iscsi_conn_recovery.
455 */
456 cr->cid = conn->cid;
457 cr->cmd_count = cmd_count;
458 cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
459 cr->sess = conn->sess;
460
461 iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
462
463 return 0;
464}
465
466int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
467{
468 atomic_set(&conn->connection_recovery, 1);
469
470 if (iscsit_close_connection(conn) < 0)
471 return -1;
472
473 return 0;
474}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 000000000000..22f8d24780a6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
1#ifndef ISCSI_TARGET_ERL2_H
2#define ISCSI_TARGET_ERL2_H
3
4extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
5extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
6extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
7 struct iscsi_session *, u16);
8extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
9extern int iscsit_remove_active_connection_recovery_entry(
10 struct iscsi_conn_recovery *, struct iscsi_session *);
11extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
12 struct iscsi_session *);
13extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
14extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
15extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
16extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
17
18#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 000000000000..bcaf82f47037
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1232 @@
1/*******************************************************************************
2 * This file contains the login functions used by the iSCSI Target driver.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/string.h>
22#include <linux/kthread.h>
23#include <linux/crypto.h>
24#include <scsi/iscsi_proto.h>
25#include <target/target_core_base.h>
26#include <target/target_core_transport.h>
27
28#include "iscsi_target_core.h"
29#include "iscsi_target_tq.h"
30#include "iscsi_target_device.h"
31#include "iscsi_target_nego.h"
32#include "iscsi_target_erl0.h"
33#include "iscsi_target_erl2.h"
34#include "iscsi_target_login.h"
35#include "iscsi_target_stat.h"
36#include "iscsi_target_tpg.h"
37#include "iscsi_target_util.h"
38#include "iscsi_target.h"
39#include "iscsi_target_parameters.h"
40
41extern struct idr sess_idr;
42extern struct mutex auth_id_lock;
43extern spinlock_t sess_idr_lock;
44
45static int iscsi_login_init_conn(struct iscsi_conn *conn)
46{
47 INIT_LIST_HEAD(&conn->conn_list);
48 INIT_LIST_HEAD(&conn->conn_cmd_list);
49 INIT_LIST_HEAD(&conn->immed_queue_list);
50 INIT_LIST_HEAD(&conn->response_queue_list);
51 init_completion(&conn->conn_post_wait_comp);
52 init_completion(&conn->conn_wait_comp);
53 init_completion(&conn->conn_wait_rcfr_comp);
54 init_completion(&conn->conn_waiting_on_uc_comp);
55 init_completion(&conn->conn_logout_comp);
56 init_completion(&conn->rx_half_close_comp);
57 init_completion(&conn->tx_half_close_comp);
58 spin_lock_init(&conn->cmd_lock);
59 spin_lock_init(&conn->conn_usage_lock);
60 spin_lock_init(&conn->immed_queue_lock);
61 spin_lock_init(&conn->nopin_timer_lock);
62 spin_lock_init(&conn->response_queue_lock);
63 spin_lock_init(&conn->state_lock);
64
65 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
66 pr_err("Unable to allocate conn->conn_cpumask\n");
67 return -ENOMEM;
68 }
69
70 return 0;
71}
72
73/*
74 * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
75 * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
76 */
77int iscsi_login_setup_crypto(struct iscsi_conn *conn)
78{
79 /*
80 * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
81 * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
82 * to software 1x8 byte slicing from crc32c.ko
83 */
84 conn->conn_rx_hash.flags = 0;
85 conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
86 CRYPTO_ALG_ASYNC);
87 if (IS_ERR(conn->conn_rx_hash.tfm)) {
88 pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
89 return -ENOMEM;
90 }
91
92 conn->conn_tx_hash.flags = 0;
93 conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
94 CRYPTO_ALG_ASYNC);
95 if (IS_ERR(conn->conn_tx_hash.tfm)) {
96 pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
97 crypto_free_hash(conn->conn_rx_hash.tfm);
98 return -ENOMEM;
99 }
100
101 return 0;
102}
103
104static int iscsi_login_check_initiator_version(
105 struct iscsi_conn *conn,
106 u8 version_max,
107 u8 version_min)
108{
109 if ((version_max != 0x00) || (version_min != 0x00)) {
110 pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
111 " version Min/Max 0x%02x/0x%02x, rejecting login.\n",
112 version_min, version_max);
113 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
114 ISCSI_LOGIN_STATUS_NO_VERSION);
115 return -1;
116 }
117
118 return 0;
119}
120
121int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
122{
123 int sessiontype;
124 struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
125 struct iscsi_portal_group *tpg = conn->tpg;
126 struct iscsi_session *sess = NULL, *sess_p = NULL;
127 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
128 struct se_session *se_sess, *se_sess_tmp;
129
130 initiatorname_param = iscsi_find_param_from_key(
131 INITIATORNAME, conn->param_list);
132 if (!initiatorname_param)
133 return -1;
134
135 sessiontype_param = iscsi_find_param_from_key(
136 SESSIONTYPE, conn->param_list);
137 if (!sessiontype_param)
138 return -1;
139
140 sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
141
142 spin_lock_bh(&se_tpg->session_lock);
143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
144 sess_list) {
145
146 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
147 spin_lock(&sess_p->conn_lock);
148 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
149 atomic_read(&sess_p->session_logout) ||
150 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
151 spin_unlock(&sess_p->conn_lock);
152 continue;
153 }
154 if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
155 (!strcmp((void *)sess_p->sess_ops->InitiatorName,
156 (void *)initiatorname_param->value) &&
157 (sess_p->sess_ops->SessionType == sessiontype))) {
158 atomic_set(&sess_p->session_reinstatement, 1);
159 spin_unlock(&sess_p->conn_lock);
160 iscsit_inc_session_usage_count(sess_p);
161 iscsit_stop_time2retain_timer(sess_p);
162 sess = sess_p;
163 break;
164 }
165 spin_unlock(&sess_p->conn_lock);
166 }
167 spin_unlock_bh(&se_tpg->session_lock);
168 /*
169 * If the Time2Retain handler has expired, the session is already gone.
170 */
171 if (!sess)
172 return 0;
173
174 pr_debug("%s iSCSI Session SID %u is still active for %s,"
175 " preforming session reinstatement.\n", (sessiontype) ?
176 "Discovery" : "Normal", sess->sid,
177 sess->sess_ops->InitiatorName);
178
179 spin_lock_bh(&sess->conn_lock);
180 if (sess->session_state == TARG_SESS_STATE_FAILED) {
181 spin_unlock_bh(&sess->conn_lock);
182 iscsit_dec_session_usage_count(sess);
183 return iscsit_close_session(sess);
184 }
185 spin_unlock_bh(&sess->conn_lock);
186
187 iscsit_stop_session(sess, 1, 1);
188 iscsit_dec_session_usage_count(sess);
189
190 return iscsit_close_session(sess);
191}
192
193static void iscsi_login_set_conn_values(
194 struct iscsi_session *sess,
195 struct iscsi_conn *conn,
196 u16 cid)
197{
198 conn->sess = sess;
199 conn->cid = cid;
200 /*
201 * Generate a random Status sequence number (statsn) for the new
202 * iSCSI connection.
203 */
204 get_random_bytes(&conn->stat_sn, sizeof(u32));
205
206 mutex_lock(&auth_id_lock);
207 conn->auth_id = iscsit_global->auth_id++;
208 mutex_unlock(&auth_id_lock);
209}
210
211/*
212 * This is the leading connection of a new session,
213 * or session reinstatement.
214 */
215static int iscsi_login_zero_tsih_s1(
216 struct iscsi_conn *conn,
217 unsigned char *buf)
218{
219 struct iscsi_session *sess = NULL;
220 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
221
222 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
223 if (!sess) {
224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
225 ISCSI_LOGIN_STATUS_NO_RESOURCES);
226 pr_err("Could not allocate memory for session\n");
227 return -1;
228 }
229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
231 sess->init_task_tag = pdu->itt;
232 memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
233 sess->exp_cmd_sn = pdu->cmdsn;
234 INIT_LIST_HEAD(&sess->sess_conn_list);
235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
236 INIT_LIST_HEAD(&sess->cr_active_list);
237 INIT_LIST_HEAD(&sess->cr_inactive_list);
238 init_completion(&sess->async_msg_comp);
239 init_completion(&sess->reinstatement_comp);
240 init_completion(&sess->session_wait_comp);
241 init_completion(&sess->session_waiting_on_uc_comp);
242 mutex_init(&sess->cmdsn_mutex);
243 spin_lock_init(&sess->conn_lock);
244 spin_lock_init(&sess->cr_a_lock);
245 spin_lock_init(&sess->cr_i_lock);
246 spin_lock_init(&sess->session_usage_lock);
247 spin_lock_init(&sess->ttt_lock);
248
249 if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
250 pr_err("idr_pre_get() for sess_idr failed\n");
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
252 ISCSI_LOGIN_STATUS_NO_RESOURCES);
253 return -1;
254 }
255 spin_lock(&sess_idr_lock);
256 idr_get_new(&sess_idr, NULL, &sess->session_index);
257 spin_unlock(&sess_idr_lock);
258
259 sess->creation_time = get_jiffies_64();
260 spin_lock_init(&sess->session_stats_lock);
261 /*
262 * The FFP CmdSN window values will be allocated from the TPG's
263 * Initiator Node's ACL once the login has been successfully completed.
264 */
265 sess->max_cmd_sn = pdu->cmdsn;
266
267 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
268 if (!sess->sess_ops) {
269 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
270 ISCSI_LOGIN_STATUS_NO_RESOURCES);
271 pr_err("Unable to allocate memory for"
272 " struct iscsi_sess_ops.\n");
273 return -1;
274 }
275
276 sess->se_sess = transport_init_session();
277 if (!sess->se_sess) {
278 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
279 ISCSI_LOGIN_STATUS_NO_RESOURCES);
280 return -1;
281 }
282
283 return 0;
284}
285
286static int iscsi_login_zero_tsih_s2(
287 struct iscsi_conn *conn)
288{
289 struct iscsi_node_attrib *na;
290 struct iscsi_session *sess = conn->sess;
291 unsigned char buf[32];
292
293 sess->tpg = conn->tpg;
294
295 /*
296 * Assign a new TPG Session Handle. Note this is protected with
297 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
298 */
299 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
300 if (!sess->tsih)
301 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
302
303 /*
304 * Create the default params from user defined values..
305 */
306 if (iscsi_copy_param_list(&conn->param_list,
307 ISCSI_TPG_C(conn)->param_list, 1) < 0) {
308 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
309 ISCSI_LOGIN_STATUS_NO_RESOURCES);
310 return -1;
311 }
312
313 iscsi_set_keys_to_negotiate(0, conn->param_list);
314
315 if (sess->sess_ops->SessionType)
316 return iscsi_set_keys_irrelevant_for_discovery(
317 conn->param_list);
318
319 na = iscsit_tpg_get_node_attrib(sess);
320
321 /*
322 * Need to send TargetPortalGroupTag back in first login response
323 * on any iSCSI connection where the Initiator provides TargetName.
324 * See 5.3.1. Login Phase Start
325 *
326 * In our case, we have already located the struct iscsi_tiqn at this point.
327 */
328 memset(buf, 0, 32);
329 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
330 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
331 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
332 ISCSI_LOGIN_STATUS_NO_RESOURCES);
333 return -1;
334 }
335
336 /*
337 * Workaround for Initiators that have broken connection recovery logic.
338 *
339 * "We would really like to get rid of this." Linux-iSCSI.org team
340 */
341 memset(buf, 0, 32);
342 sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
343 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
344 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
345 ISCSI_LOGIN_STATUS_NO_RESOURCES);
346 return -1;
347 }
348
349 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
350 return -1;
351
352 return 0;
353}
354
355/*
356 * Remove PSTATE_NEGOTIATE for the four FIM related keys.
357 * The Initiator node will be able to enable FIM by proposing them itself.
358 */
359int iscsi_login_disable_FIM_keys(
360 struct iscsi_param_list *param_list,
361 struct iscsi_conn *conn)
362{
363 struct iscsi_param *param;
364
365 param = iscsi_find_param_from_key("OFMarker", param_list);
366 if (!param) {
367 pr_err("iscsi_find_param_from_key() for"
368 " OFMarker failed\n");
369 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
370 ISCSI_LOGIN_STATUS_NO_RESOURCES);
371 return -1;
372 }
373 param->state &= ~PSTATE_NEGOTIATE;
374
375 param = iscsi_find_param_from_key("OFMarkInt", param_list);
376 if (!param) {
377 pr_err("iscsi_find_param_from_key() for"
378 " IFMarker failed\n");
379 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
380 ISCSI_LOGIN_STATUS_NO_RESOURCES);
381 return -1;
382 }
383 param->state &= ~PSTATE_NEGOTIATE;
384
385 param = iscsi_find_param_from_key("IFMarker", param_list);
386 if (!param) {
387 pr_err("iscsi_find_param_from_key() for"
388 " IFMarker failed\n");
389 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
390 ISCSI_LOGIN_STATUS_NO_RESOURCES);
391 return -1;
392 }
393 param->state &= ~PSTATE_NEGOTIATE;
394
395 param = iscsi_find_param_from_key("IFMarkInt", param_list);
396 if (!param) {
397 pr_err("iscsi_find_param_from_key() for"
398 " IFMarker failed\n");
399 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
400 ISCSI_LOGIN_STATUS_NO_RESOURCES);
401 return -1;
402 }
403 param->state &= ~PSTATE_NEGOTIATE;
404
405 return 0;
406}
407
408static int iscsi_login_non_zero_tsih_s1(
409 struct iscsi_conn *conn,
410 unsigned char *buf)
411{
412 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
413
414 iscsi_login_set_conn_values(NULL, conn, pdu->cid);
415 return 0;
416}
417
418/*
419 * Add a new connection to an existing session.
420 */
421static int iscsi_login_non_zero_tsih_s2(
422 struct iscsi_conn *conn,
423 unsigned char *buf)
424{
425 struct iscsi_portal_group *tpg = conn->tpg;
426 struct iscsi_session *sess = NULL, *sess_p = NULL;
427 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
428 struct se_session *se_sess, *se_sess_tmp;
429 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
430
431 spin_lock_bh(&se_tpg->session_lock);
432 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
433 sess_list) {
434
435 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
436 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
437 atomic_read(&sess_p->session_logout) ||
438 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
439 continue;
440 if (!memcmp((const void *)sess_p->isid,
441 (const void *)pdu->isid, 6) &&
442 (sess_p->tsih == pdu->tsih)) {
443 iscsit_inc_session_usage_count(sess_p);
444 iscsit_stop_time2retain_timer(sess_p);
445 sess = sess_p;
446 break;
447 }
448 }
449 spin_unlock_bh(&se_tpg->session_lock);
450
451 /*
452 * If the Time2Retain handler has expired, the session is already gone.
453 */
454 if (!sess) {
455 pr_err("Initiator attempting to add a connection to"
456 " a non-existent session, rejecting iSCSI Login.\n");
457 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
458 ISCSI_LOGIN_STATUS_NO_SESSION);
459 return -1;
460 }
461
462 /*
463 * Stop the Time2Retain timer if this is a failed session, we restart
464 * the timer if the login is not successful.
465 */
466 spin_lock_bh(&sess->conn_lock);
467 if (sess->session_state == TARG_SESS_STATE_FAILED)
468 atomic_set(&sess->session_continuation, 1);
469 spin_unlock_bh(&sess->conn_lock);
470
471 iscsi_login_set_conn_values(sess, conn, pdu->cid);
472
473 if (iscsi_copy_param_list(&conn->param_list,
474 ISCSI_TPG_C(conn)->param_list, 0) < 0) {
475 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
476 ISCSI_LOGIN_STATUS_NO_RESOURCES);
477 return -1;
478 }
479
480 iscsi_set_keys_to_negotiate(0, conn->param_list);
481 /*
482 * Need to send TargetPortalGroupTag back in first login response
483 * on any iSCSI connection where the Initiator provides TargetName.
484 * See 5.3.1. Login Phase Start
485 *
486 * In our case, we have already located the struct iscsi_tiqn at this point.
487 */
488 memset(buf, 0, 32);
489 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
490 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
491 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
492 ISCSI_LOGIN_STATUS_NO_RESOURCES);
493 return -1;
494 }
495
496 return iscsi_login_disable_FIM_keys(conn->param_list, conn);
497}
498
499int iscsi_login_post_auth_non_zero_tsih(
500 struct iscsi_conn *conn,
501 u16 cid,
502 u32 exp_statsn)
503{
504 struct iscsi_conn *conn_ptr = NULL;
505 struct iscsi_conn_recovery *cr = NULL;
506 struct iscsi_session *sess = conn->sess;
507
508 /*
509 * By following item 5 in the login table, if we have found
510 * an existing ISID and a valid/existing TSIH and an existing
511 * CID we do connection reinstatement. Currently we dont not
512 * support it so we send back an non-zero status class to the
513 * initiator and release the new connection.
514 */
515 conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
516 if ((conn_ptr)) {
517 pr_err("Connection exists with CID %hu for %s,"
518 " performing connection reinstatement.\n",
519 conn_ptr->cid, sess->sess_ops->InitiatorName);
520
521 iscsit_connection_reinstatement_rcfr(conn_ptr);
522 iscsit_dec_conn_usage_count(conn_ptr);
523 }
524
525 /*
526 * Check for any connection recovery entires containing CID.
527 * We use the original ExpStatSN sent in the first login request
528 * to acknowledge commands for the failed connection.
529 *
530 * Also note that an explict logout may have already been sent,
531 * but the response may not be sent due to additional connection
532 * loss.
533 */
534 if (sess->sess_ops->ErrorRecoveryLevel == 2) {
535 cr = iscsit_get_inactive_connection_recovery_entry(
536 sess, cid);
537 if ((cr)) {
538 pr_debug("Performing implicit logout"
539 " for connection recovery on CID: %hu\n",
540 conn->cid);
541 iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
542 }
543 }
544
545 /*
546 * Else we follow item 4 from the login table in that we have
547 * found an existing ISID and a valid/existing TSIH and a new
548 * CID we go ahead and continue to add a new connection to the
549 * session.
550 */
551 pr_debug("Adding CID %hu to existing session for %s.\n",
552 cid, sess->sess_ops->InitiatorName);
553
554 if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
555 pr_err("Adding additional connection to this session"
556 " would exceed MaxConnections %d, login failed.\n",
557 sess->sess_ops->MaxConnections);
558 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
559 ISCSI_LOGIN_STATUS_ISID_ERROR);
560 return -1;
561 }
562
563 return 0;
564}
565
566static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
567{
568 struct iscsi_session *sess = conn->sess;
569
570 if (!sess->sess_ops->SessionType)
571 iscsit_start_nopin_timer(conn);
572}
573
574static int iscsi_post_login_handler(
575 struct iscsi_np *np,
576 struct iscsi_conn *conn,
577 u8 zero_tsih)
578{
579 int stop_timer = 0;
580 struct iscsi_session *sess = conn->sess;
581 struct se_session *se_sess = sess->se_sess;
582 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
583 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
584 struct iscsi_thread_set *ts;
585
586 iscsit_inc_conn_usage_count(conn);
587
588 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
589 ISCSI_LOGIN_STATUS_ACCEPT);
590
591 pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
592 conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
593
594 iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
595 iscsit_set_sync_and_steering_values(conn);
596 /*
597 * SCSI Initiator -> SCSI Target Port Mapping
598 */
599 ts = iscsi_get_thread_set();
600 if (!zero_tsih) {
601 iscsi_set_session_parameters(sess->sess_ops,
602 conn->param_list, 0);
603 iscsi_release_param_list(conn->param_list);
604 conn->param_list = NULL;
605
606 spin_lock_bh(&sess->conn_lock);
607 atomic_set(&sess->session_continuation, 0);
608 if (sess->session_state == TARG_SESS_STATE_FAILED) {
609 pr_debug("Moving to"
610 " TARG_SESS_STATE_LOGGED_IN.\n");
611 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
612 stop_timer = 1;
613 }
614
615 pr_debug("iSCSI Login successful on CID: %hu from %s to"
616 " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
617 np->np_port, tpg->tpgt);
618
619 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
620 atomic_inc(&sess->nconn);
621 pr_debug("Incremented iSCSI Connection count to %hu"
622 " from node: %s\n", atomic_read(&sess->nconn),
623 sess->sess_ops->InitiatorName);
624 spin_unlock_bh(&sess->conn_lock);
625
626 iscsi_post_login_start_timers(conn);
627 iscsi_activate_thread_set(conn, ts);
628 /*
629 * Determine CPU mask to ensure connection's RX and TX kthreads
630 * are scheduled on the same CPU.
631 */
632 iscsit_thread_get_cpumask(conn);
633 conn->conn_rx_reset_cpumask = 1;
634 conn->conn_tx_reset_cpumask = 1;
635
636 iscsit_dec_conn_usage_count(conn);
637 if (stop_timer) {
638 spin_lock_bh(&se_tpg->session_lock);
639 iscsit_stop_time2retain_timer(sess);
640 spin_unlock_bh(&se_tpg->session_lock);
641 }
642 iscsit_dec_session_usage_count(sess);
643 return 0;
644 }
645
646 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
647 iscsi_release_param_list(conn->param_list);
648 conn->param_list = NULL;
649
650 iscsit_determine_maxcmdsn(sess);
651
652 spin_lock_bh(&se_tpg->session_lock);
653 __transport_register_session(&sess->tpg->tpg_se_tpg,
654 se_sess->se_node_acl, se_sess, (void *)sess);
655 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
656 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
657
658 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
659 conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
660
661 spin_lock_bh(&sess->conn_lock);
662 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
663 atomic_inc(&sess->nconn);
664 pr_debug("Incremented iSCSI Connection count to %hu from node:"
665 " %s\n", atomic_read(&sess->nconn),
666 sess->sess_ops->InitiatorName);
667 spin_unlock_bh(&sess->conn_lock);
668
669 sess->sid = tpg->sid++;
670 if (!sess->sid)
671 sess->sid = tpg->sid++;
672 pr_debug("Established iSCSI session from node: %s\n",
673 sess->sess_ops->InitiatorName);
674
675 tpg->nsessions++;
676 if (tpg->tpg_tiqn)
677 tpg->tpg_tiqn->tiqn_nsessions++;
678
679 pr_debug("Incremented number of active iSCSI sessions to %u on"
680 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
681 spin_unlock_bh(&se_tpg->session_lock);
682
683 iscsi_post_login_start_timers(conn);
684 iscsi_activate_thread_set(conn, ts);
685 /*
686 * Determine CPU mask to ensure connection's RX and TX kthreads
687 * are scheduled on the same CPU.
688 */
689 iscsit_thread_get_cpumask(conn);
690 conn->conn_rx_reset_cpumask = 1;
691 conn->conn_tx_reset_cpumask = 1;
692
693 iscsit_dec_conn_usage_count(conn);
694
695 return 0;
696}
697
698static void iscsi_handle_login_thread_timeout(unsigned long data)
699{
700 struct iscsi_np *np = (struct iscsi_np *) data;
701
702 spin_lock_bh(&np->np_thread_lock);
703 pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
704 np->np_ip, np->np_port);
705
706 if (np->np_login_timer_flags & ISCSI_TF_STOP) {
707 spin_unlock_bh(&np->np_thread_lock);
708 return;
709 }
710
711 if (np->np_thread)
712 send_sig(SIGINT, np->np_thread, 1);
713
714 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
715 spin_unlock_bh(&np->np_thread_lock);
716}
717
718static void iscsi_start_login_thread_timer(struct iscsi_np *np)
719{
720 /*
721 * This used the TA_LOGIN_TIMEOUT constant because at this
722 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
723 */
724 spin_lock_bh(&np->np_thread_lock);
725 init_timer(&np->np_login_timer);
726 np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
727 np->np_login_timer.data = (unsigned long)np;
728 np->np_login_timer.function = iscsi_handle_login_thread_timeout;
729 np->np_login_timer_flags &= ~ISCSI_TF_STOP;
730 np->np_login_timer_flags |= ISCSI_TF_RUNNING;
731 add_timer(&np->np_login_timer);
732
733 pr_debug("Added timeout timer to iSCSI login request for"
734 " %u seconds.\n", TA_LOGIN_TIMEOUT);
735 spin_unlock_bh(&np->np_thread_lock);
736}
737
738static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
739{
740 spin_lock_bh(&np->np_thread_lock);
741 if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
742 spin_unlock_bh(&np->np_thread_lock);
743 return;
744 }
745 np->np_login_timer_flags |= ISCSI_TF_STOP;
746 spin_unlock_bh(&np->np_thread_lock);
747
748 del_timer_sync(&np->np_login_timer);
749
750 spin_lock_bh(&np->np_thread_lock);
751 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
752 spin_unlock_bh(&np->np_thread_lock);
753}
754
755int iscsi_target_setup_login_socket(
756 struct iscsi_np *np,
757 struct __kernel_sockaddr_storage *sockaddr)
758{
759 struct socket *sock;
760 int backlog = 5, ret, opt = 0, len;
761
762 switch (np->np_network_transport) {
763 case ISCSI_TCP:
764 np->np_ip_proto = IPPROTO_TCP;
765 np->np_sock_type = SOCK_STREAM;
766 break;
767 case ISCSI_SCTP_TCP:
768 np->np_ip_proto = IPPROTO_SCTP;
769 np->np_sock_type = SOCK_STREAM;
770 break;
771 case ISCSI_SCTP_UDP:
772 np->np_ip_proto = IPPROTO_SCTP;
773 np->np_sock_type = SOCK_SEQPACKET;
774 break;
775 case ISCSI_IWARP_TCP:
776 case ISCSI_IWARP_SCTP:
777 case ISCSI_INFINIBAND:
778 default:
779 pr_err("Unsupported network_transport: %d\n",
780 np->np_network_transport);
781 return -EINVAL;
782 }
783
784 ret = sock_create(sockaddr->ss_family, np->np_sock_type,
785 np->np_ip_proto, &sock);
786 if (ret < 0) {
787 pr_err("sock_create() failed.\n");
788 return ret;
789 }
790 np->np_socket = sock;
791 /*
792 * The SCTP stack needs struct socket->file.
793 */
794 if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
795 (np->np_network_transport == ISCSI_SCTP_UDP)) {
796 if (!sock->file) {
797 sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
798 if (!sock->file) {
799 pr_err("Unable to allocate struct"
800 " file for SCTP\n");
801 ret = -ENOMEM;
802 goto fail;
803 }
804 np->np_flags |= NPF_SCTP_STRUCT_FILE;
805 }
806 }
807 /*
808 * Setup the np->np_sockaddr from the passed sockaddr setup
809 * in iscsi_target_configfs.c code..
810 */
811 memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
812 sizeof(struct __kernel_sockaddr_storage));
813
814 if (sockaddr->ss_family == AF_INET6)
815 len = sizeof(struct sockaddr_in6);
816 else
817 len = sizeof(struct sockaddr_in);
818 /*
819 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
820 */
821 opt = 1;
822 if (np->np_network_transport == ISCSI_TCP) {
823 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
824 (char *)&opt, sizeof(opt));
825 if (ret < 0) {
826 pr_err("kernel_setsockopt() for TCP_NODELAY"
827 " failed: %d\n", ret);
828 goto fail;
829 }
830 }
831
832 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
833 (char *)&opt, sizeof(opt));
834 if (ret < 0) {
835 pr_err("kernel_setsockopt() for SO_REUSEADDR"
836 " failed\n");
837 goto fail;
838 }
839
840 ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
841 if (ret < 0) {
842 pr_err("kernel_bind() failed: %d\n", ret);
843 goto fail;
844 }
845
846 ret = kernel_listen(sock, backlog);
847 if (ret != 0) {
848 pr_err("kernel_listen() failed: %d\n", ret);
849 goto fail;
850 }
851
852 return 0;
853
854fail:
855 np->np_socket = NULL;
856 if (sock) {
857 if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
858 kfree(sock->file);
859 sock->file = NULL;
860 }
861
862 sock_release(sock);
863 }
864 return ret;
865}
866
867static int __iscsi_target_login_thread(struct iscsi_np *np)
868{
869 u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
870 int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
871 struct iscsi_conn *conn = NULL;
872 struct iscsi_login *login;
873 struct iscsi_portal_group *tpg = NULL;
874 struct socket *new_sock, *sock;
875 struct kvec iov;
876 struct iscsi_login_req *pdu;
877 struct sockaddr_in sock_in;
878 struct sockaddr_in6 sock_in6;
879
880 flush_signals(current);
881 set_sctp_conn_flag = 0;
882 sock = np->np_socket;
883 ip_proto = np->np_ip_proto;
884 sock_type = np->np_sock_type;
885
886 spin_lock_bh(&np->np_thread_lock);
887 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
888 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
889 complete(&np->np_restart_comp);
890 } else {
891 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
892 }
893 spin_unlock_bh(&np->np_thread_lock);
894
895 if (kernel_accept(sock, &new_sock, 0) < 0) {
896 spin_lock_bh(&np->np_thread_lock);
897 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
898 spin_unlock_bh(&np->np_thread_lock);
899 complete(&np->np_restart_comp);
900 /* Get another socket */
901 return 1;
902 }
903 spin_unlock_bh(&np->np_thread_lock);
904 goto out;
905 }
906 /*
907 * The SCTP stack needs struct socket->file.
908 */
909 if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
910 (np->np_network_transport == ISCSI_SCTP_UDP)) {
911 if (!new_sock->file) {
912 new_sock->file = kzalloc(
913 sizeof(struct file), GFP_KERNEL);
914 if (!new_sock->file) {
915 pr_err("Unable to allocate struct"
916 " file for SCTP\n");
917 sock_release(new_sock);
918 /* Get another socket */
919 return 1;
920 }
921 set_sctp_conn_flag = 1;
922 }
923 }
924
925 iscsi_start_login_thread_timer(np);
926
927 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
928 if (!conn) {
929 pr_err("Could not allocate memory for"
930 " new connection\n");
931 if (set_sctp_conn_flag) {
932 kfree(new_sock->file);
933 new_sock->file = NULL;
934 }
935 sock_release(new_sock);
936 /* Get another socket */
937 return 1;
938 }
939
940 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
941 conn->conn_state = TARG_CONN_STATE_FREE;
942 conn->sock = new_sock;
943
944 if (set_sctp_conn_flag)
945 conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
946
947 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
948 conn->conn_state = TARG_CONN_STATE_XPT_UP;
949
950 /*
951 * Allocate conn->conn_ops early as a failure calling
952 * iscsit_tx_login_rsp() below will call tx_data().
953 */
954 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
955 if (!conn->conn_ops) {
956 pr_err("Unable to allocate memory for"
957 " struct iscsi_conn_ops.\n");
958 goto new_sess_out;
959 }
960 /*
961 * Perform the remaining iSCSI connection initialization items..
962 */
963 if (iscsi_login_init_conn(conn) < 0)
964 goto new_sess_out;
965
966 memset(buffer, 0, ISCSI_HDR_LEN);
967 memset(&iov, 0, sizeof(struct kvec));
968 iov.iov_base = buffer;
969 iov.iov_len = ISCSI_HDR_LEN;
970
971 if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
972 pr_err("rx_data() returned an error.\n");
973 goto new_sess_out;
974 }
975
976 iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
977 if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
978 pr_err("First opcode is not login request,"
979 " failing login request.\n");
980 goto new_sess_out;
981 }
982
983 pdu = (struct iscsi_login_req *) buffer;
984 pdu->cid = be16_to_cpu(pdu->cid);
985 pdu->tsih = be16_to_cpu(pdu->tsih);
986 pdu->itt = be32_to_cpu(pdu->itt);
987 pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
988 pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
989 /*
990 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
991 * when Status-Class != 0.
992 */
993 conn->login_itt = pdu->itt;
994
995 spin_lock_bh(&np->np_thread_lock);
996 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
997 spin_unlock_bh(&np->np_thread_lock);
998 pr_err("iSCSI Network Portal on %s:%hu currently not"
999 " active.\n", np->np_ip, np->np_port);
1000 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1001 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
1002 goto new_sess_out;
1003 }
1004 spin_unlock_bh(&np->np_thread_lock);
1005
1006 if (np->np_sockaddr.ss_family == AF_INET6) {
1007 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
1008
1009 if (conn->sock->ops->getname(conn->sock,
1010 (struct sockaddr *)&sock_in6, &err, 1) < 0) {
1011 pr_err("sock_ops->getname() failed.\n");
1012 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1013 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1014 goto new_sess_out;
1015 }
1016#if 0
1017 if (!iscsi_ntop6((const unsigned char *)
1018 &sock_in6.sin6_addr.in6_u,
1019 (char *)&conn->ipv6_login_ip[0],
1020 IPV6_ADDRESS_SPACE)) {
1021 pr_err("iscsi_ntop6() failed\n");
1022 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1023 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1024 goto new_sess_out;
1025 }
1026#else
1027 pr_debug("Skipping iscsi_ntop6()\n");
1028#endif
1029 } else {
1030 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1031
1032 if (conn->sock->ops->getname(conn->sock,
1033 (struct sockaddr *)&sock_in, &err, 1) < 0) {
1034 pr_err("sock_ops->getname() failed.\n");
1035 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1036 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1037 goto new_sess_out;
1038 }
1039 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
1040 conn->login_port = ntohs(sock_in.sin_port);
1041 }
1042
1043 conn->network_transport = np->np_network_transport;
1044
1045 pr_debug("Received iSCSI login request from %s on %s Network"
1046 " Portal %s:%hu\n", conn->login_ip,
1047 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
1048 np->np_ip, np->np_port);
1049
1050 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
1051 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
1052
1053 if (iscsi_login_check_initiator_version(conn, pdu->max_version,
1054 pdu->min_version) < 0)
1055 goto new_sess_out;
1056
1057 zero_tsih = (pdu->tsih == 0x0000);
1058 if ((zero_tsih)) {
1059 /*
1060 * This is the leading connection of a new session.
1061 * We wait until after authentication to check for
1062 * session reinstatement.
1063 */
1064 if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
1065 goto new_sess_out;
1066 } else {
1067 /*
1068 * Add a new connection to an existing session.
1069 * We check for a non-existant session in
1070 * iscsi_login_non_zero_tsih_s2() below based
1071 * on ISID/TSIH, but wait until after authentication
1072 * to check for connection reinstatement, etc.
1073 */
1074 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
1075 goto new_sess_out;
1076 }
1077
1078 /*
1079 * This will process the first login request, and call
1080 * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
1081 */
1082 login = iscsi_target_init_negotiation(np, conn, buffer);
1083 if (!login) {
1084 tpg = conn->tpg;
1085 goto new_sess_out;
1086 }
1087
1088 tpg = conn->tpg;
1089 if (!tpg) {
1090 pr_err("Unable to locate struct iscsi_conn->tpg\n");
1091 goto new_sess_out;
1092 }
1093
1094 if (zero_tsih) {
1095 if (iscsi_login_zero_tsih_s2(conn) < 0) {
1096 iscsi_target_nego_release(login, conn);
1097 goto new_sess_out;
1098 }
1099 } else {
1100 if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
1101 iscsi_target_nego_release(login, conn);
1102 goto old_sess_out;
1103 }
1104 }
1105
1106 if (iscsi_target_start_negotiation(login, conn) < 0)
1107 goto new_sess_out;
1108
1109 if (!conn->sess) {
1110 pr_err("struct iscsi_conn session pointer is NULL!\n");
1111 goto new_sess_out;
1112 }
1113
1114 iscsi_stop_login_thread_timer(np);
1115
1116 if (signal_pending(current))
1117 goto new_sess_out;
1118
1119 ret = iscsi_post_login_handler(np, conn, zero_tsih);
1120
1121 if (ret < 0)
1122 goto new_sess_out;
1123
1124 iscsit_deaccess_np(np, tpg);
1125 tpg = NULL;
1126 /* Get another socket */
1127 return 1;
1128
1129new_sess_out:
1130 pr_err("iSCSI Login negotiation failed.\n");
1131 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
1132 ISCSI_LOGIN_STATUS_INIT_ERR);
1133 if (!zero_tsih || !conn->sess)
1134 goto old_sess_out;
1135 if (conn->sess->se_sess)
1136 transport_free_session(conn->sess->se_sess);
1137 if (conn->sess->session_index != 0) {
1138 spin_lock_bh(&sess_idr_lock);
1139 idr_remove(&sess_idr, conn->sess->session_index);
1140 spin_unlock_bh(&sess_idr_lock);
1141 }
1142 if (conn->sess->sess_ops)
1143 kfree(conn->sess->sess_ops);
1144 if (conn->sess)
1145 kfree(conn->sess);
1146old_sess_out:
1147 iscsi_stop_login_thread_timer(np);
1148 /*
1149 * If login negotiation fails check if the Time2Retain timer
1150 * needs to be restarted.
1151 */
1152 if (!zero_tsih && conn->sess) {
1153 spin_lock_bh(&conn->sess->conn_lock);
1154 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1155 struct se_portal_group *se_tpg =
1156 &ISCSI_TPG_C(conn)->tpg_se_tpg;
1157
1158 atomic_set(&conn->sess->session_continuation, 0);
1159 spin_unlock_bh(&conn->sess->conn_lock);
1160 spin_lock_bh(&se_tpg->session_lock);
1161 iscsit_start_time2retain_handler(conn->sess);
1162 spin_unlock_bh(&se_tpg->session_lock);
1163 } else
1164 spin_unlock_bh(&conn->sess->conn_lock);
1165 iscsit_dec_session_usage_count(conn->sess);
1166 }
1167
1168 if (!IS_ERR(conn->conn_rx_hash.tfm))
1169 crypto_free_hash(conn->conn_rx_hash.tfm);
1170 if (!IS_ERR(conn->conn_tx_hash.tfm))
1171 crypto_free_hash(conn->conn_tx_hash.tfm);
1172
1173 if (conn->conn_cpumask)
1174 free_cpumask_var(conn->conn_cpumask);
1175
1176 kfree(conn->conn_ops);
1177
1178 if (conn->param_list) {
1179 iscsi_release_param_list(conn->param_list);
1180 conn->param_list = NULL;
1181 }
1182 if (conn->sock) {
1183 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
1184 kfree(conn->sock->file);
1185 conn->sock->file = NULL;
1186 }
1187 sock_release(conn->sock);
1188 }
1189 kfree(conn);
1190
1191 if (tpg) {
1192 iscsit_deaccess_np(np, tpg);
1193 tpg = NULL;
1194 }
1195
1196out:
1197 stop = kthread_should_stop();
1198 if (!stop && signal_pending(current)) {
1199 spin_lock_bh(&np->np_thread_lock);
1200 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1201 spin_unlock_bh(&np->np_thread_lock);
1202 }
1203 /* Wait for another socket.. */
1204 if (!stop)
1205 return 1;
1206
1207 iscsi_stop_login_thread_timer(np);
1208 spin_lock_bh(&np->np_thread_lock);
1209 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1210 spin_unlock_bh(&np->np_thread_lock);
1211 return 0;
1212}
1213
1214int iscsi_target_login_thread(void *arg)
1215{
1216 struct iscsi_np *np = (struct iscsi_np *)arg;
1217 int ret;
1218
1219 allow_signal(SIGINT);
1220
1221 while (!kthread_should_stop()) {
1222 ret = __iscsi_target_login_thread(np);
1223 /*
1224 * We break and exit here unless another sock_accept() call
1225 * is expected.
1226 */
1227 if (ret != 1)
1228 break;
1229 }
1230
1231 return 0;
1232}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 000000000000..091dcae2532b
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,12 @@
1#ifndef ISCSI_TARGET_LOGIN_H
2#define ISCSI_TARGET_LOGIN_H
3
4extern int iscsi_login_setup_crypto(struct iscsi_conn *);
5extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
6extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
7extern int iscsi_target_setup_login_socket(struct iscsi_np *,
8 struct __kernel_sockaddr_storage *);
9extern int iscsi_target_login_thread(void *);
10extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
11
12#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 000000000000..713a4d23557a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1067 @@
1/*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/ctype.h>
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_tpg.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_parameters.h"
28#include "iscsi_target_login.h"
29#include "iscsi_target_nego.h"
30#include "iscsi_target_tpg.h"
31#include "iscsi_target_util.h"
32#include "iscsi_target.h"
33#include "iscsi_target_auth.h"
34
35#define MAX_LOGIN_PDUS 7
36#define TEXT_LEN 4096
37
38void convert_null_to_semi(char *buf, int len)
39{
40 int i;
41
42 for (i = 0; i < len; i++)
43 if (buf[i] == '\0')
44 buf[i] = ';';
45}
46
47int strlen_semi(char *buf)
48{
49 int i = 0;
50
51 while (buf[i] != '\0') {
52 if (buf[i] == ';')
53 return i;
54 i++;
55 }
56
57 return -1;
58}
59
60int extract_param(
61 const char *in_buf,
62 const char *pattern,
63 unsigned int max_length,
64 char *out_buf,
65 unsigned char *type)
66{
67 char *ptr;
68 int len;
69
70 if (!in_buf || !pattern || !out_buf || !type)
71 return -1;
72
73 ptr = strstr(in_buf, pattern);
74 if (!ptr)
75 return -1;
76
77 ptr = strstr(ptr, "=");
78 if (!ptr)
79 return -1;
80
81 ptr += 1;
82 if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
83 ptr += 2; /* skip 0x */
84 *type = HEX;
85 } else
86 *type = DECIMAL;
87
88 len = strlen_semi(ptr);
89 if (len < 0)
90 return -1;
91
92 if (len > max_length) {
93 pr_err("Length of input: %d exeeds max_length:"
94 " %d\n", len, max_length);
95 return -1;
96 }
97 memcpy(out_buf, ptr, len);
98 out_buf[len] = '\0';
99
100 return 0;
101}
102
103static u32 iscsi_handle_authentication(
104 struct iscsi_conn *conn,
105 char *in_buf,
106 char *out_buf,
107 int in_length,
108 int *out_length,
109 unsigned char *authtype)
110{
111 struct iscsi_session *sess = conn->sess;
112 struct iscsi_node_auth *auth;
113 struct iscsi_node_acl *iscsi_nacl;
114 struct se_node_acl *se_nacl;
115
116 if (!sess->sess_ops->SessionType) {
117 /*
118 * For SessionType=Normal
119 */
120 se_nacl = conn->sess->se_sess->se_node_acl;
121 if (!se_nacl) {
122 pr_err("Unable to locate struct se_node_acl for"
123 " CHAP auth\n");
124 return -1;
125 }
126 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
127 se_node_acl);
128 if (!iscsi_nacl) {
129 pr_err("Unable to locate struct iscsi_node_acl for"
130 " CHAP auth\n");
131 return -1;
132 }
133
134 auth = ISCSI_NODE_AUTH(iscsi_nacl);
135 } else {
136 /*
137 * For SessionType=Discovery
138 */
139 auth = &iscsit_global->discovery_acl.node_auth;
140 }
141
142 if (strstr("CHAP", authtype))
143 strcpy(conn->sess->auth_type, "CHAP");
144 else
145 strcpy(conn->sess->auth_type, NONE);
146
147 if (strstr("None", authtype))
148 return 1;
149#ifdef CANSRP
150 else if (strstr("SRP", authtype))
151 return srp_main_loop(conn, auth, in_buf, out_buf,
152 &in_length, out_length);
153#endif
154 else if (strstr("CHAP", authtype))
155 return chap_main_loop(conn, auth, in_buf, out_buf,
156 &in_length, out_length);
157 else if (strstr("SPKM1", authtype))
158 return 2;
159 else if (strstr("SPKM2", authtype))
160 return 2;
161 else if (strstr("KRB5", authtype))
162 return 2;
163 else
164 return 2;
165}
166
167static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
168{
169 kfree(conn->auth_protocol);
170}
171
172static int iscsi_target_check_login_request(
173 struct iscsi_conn *conn,
174 struct iscsi_login *login)
175{
176 int req_csg, req_nsg, rsp_csg, rsp_nsg;
177 u32 payload_length;
178 struct iscsi_login_req *login_req;
179 struct iscsi_login_rsp *login_rsp;
180
181 login_req = (struct iscsi_login_req *) login->req;
182 login_rsp = (struct iscsi_login_rsp *) login->rsp;
183 payload_length = ntoh24(login_req->dlength);
184
185 switch (login_req->opcode & ISCSI_OPCODE_MASK) {
186 case ISCSI_OP_LOGIN:
187 break;
188 default:
189 pr_err("Received unknown opcode 0x%02x.\n",
190 login_req->opcode & ISCSI_OPCODE_MASK);
191 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
192 ISCSI_LOGIN_STATUS_INIT_ERR);
193 return -1;
194 }
195
196 if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
197 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
198 pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
199 " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
200 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
201 ISCSI_LOGIN_STATUS_INIT_ERR);
202 return -1;
203 }
204
205 req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
206 rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
207 req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
208 rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
209
210 if (req_csg != login->current_stage) {
211 pr_err("Initiator unexpectedly changed login stage"
212 " from %d to %d, login failed.\n", login->current_stage,
213 req_csg);
214 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
215 ISCSI_LOGIN_STATUS_INIT_ERR);
216 return -1;
217 }
218
219 if ((req_nsg == 2) || (req_csg >= 2) ||
220 ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
221 (req_nsg <= req_csg))) {
222 pr_err("Illegal login_req->flags Combination, CSG: %d,"
223 " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
224 req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
225 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
226 ISCSI_LOGIN_STATUS_INIT_ERR);
227 return -1;
228 }
229
230 if ((login_req->max_version != login->version_max) ||
231 (login_req->min_version != login->version_min)) {
232 pr_err("Login request changed Version Max/Nin"
233 " unexpectedly to 0x%02x/0x%02x, protocol error\n",
234 login_req->max_version, login_req->min_version);
235 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
236 ISCSI_LOGIN_STATUS_INIT_ERR);
237 return -1;
238 }
239
240 if (memcmp(login_req->isid, login->isid, 6) != 0) {
241 pr_err("Login request changed ISID unexpectedly,"
242 " protocol error.\n");
243 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
244 ISCSI_LOGIN_STATUS_INIT_ERR);
245 return -1;
246 }
247
248 if (login_req->itt != login->init_task_tag) {
249 pr_err("Login request changed ITT unexpectedly to"
250 " 0x%08x, protocol error.\n", login_req->itt);
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
252 ISCSI_LOGIN_STATUS_INIT_ERR);
253 return -1;
254 }
255
256 if (payload_length > MAX_KEY_VALUE_PAIRS) {
257 pr_err("Login request payload exceeds default"
258 " MaxRecvDataSegmentLength: %u, protocol error.\n",
259 MAX_KEY_VALUE_PAIRS);
260 return -1;
261 }
262
263 return 0;
264}
265
266static int iscsi_target_check_first_request(
267 struct iscsi_conn *conn,
268 struct iscsi_login *login)
269{
270 struct iscsi_param *param = NULL;
271 struct se_node_acl *se_nacl;
272
273 login->first_request = 0;
274
275 list_for_each_entry(param, &conn->param_list->param_list, p_list) {
276 if (!strncmp(param->name, SESSIONTYPE, 11)) {
277 if (!IS_PSTATE_ACCEPTOR(param)) {
278 pr_err("SessionType key not received"
279 " in first login request.\n");
280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
281 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
282 return -1;
283 }
284 if (!strncmp(param->value, DISCOVERY, 9))
285 return 0;
286 }
287
288 if (!strncmp(param->name, INITIATORNAME, 13)) {
289 if (!IS_PSTATE_ACCEPTOR(param)) {
290 if (!login->leading_connection)
291 continue;
292
293 pr_err("InitiatorName key not received"
294 " in first login request.\n");
295 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
296 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
297 return -1;
298 }
299
300 /*
301 * For non-leading connections, double check that the
302 * received InitiatorName matches the existing session's
303 * struct iscsi_node_acl.
304 */
305 if (!login->leading_connection) {
306 se_nacl = conn->sess->se_sess->se_node_acl;
307 if (!se_nacl) {
308 pr_err("Unable to locate"
309 " struct se_node_acl\n");
310 iscsit_tx_login_rsp(conn,
311 ISCSI_STATUS_CLS_INITIATOR_ERR,
312 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
313 return -1;
314 }
315
316 if (strcmp(param->value,
317 se_nacl->initiatorname)) {
318 pr_err("Incorrect"
319 " InitiatorName: %s for this"
320 " iSCSI Initiator Node.\n",
321 param->value);
322 iscsit_tx_login_rsp(conn,
323 ISCSI_STATUS_CLS_INITIATOR_ERR,
324 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
325 return -1;
326 }
327 }
328 }
329 }
330
331 return 0;
332}
333
334static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
335{
336 u32 padding = 0;
337 struct iscsi_session *sess = conn->sess;
338 struct iscsi_login_rsp *login_rsp;
339
340 login_rsp = (struct iscsi_login_rsp *) login->rsp;
341
342 login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
343 hton24(login_rsp->dlength, login->rsp_length);
344 memcpy(login_rsp->isid, login->isid, 6);
345 login_rsp->tsih = cpu_to_be16(login->tsih);
346 login_rsp->itt = cpu_to_be32(login->init_task_tag);
347 login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
348 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
349 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
350
351 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
352 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
353 " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
354 ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
355 ntohl(login_rsp->statsn), login->rsp_length);
356
357 padding = ((-login->rsp_length) & 3);
358
359 if (iscsi_login_tx_data(
360 conn,
361 login->rsp,
362 login->rsp_buf,
363 login->rsp_length + padding) < 0)
364 return -1;
365
366 login->rsp_length = 0;
367 login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
368 login_rsp->itt = be32_to_cpu(login_rsp->itt);
369 login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
370 mutex_lock(&sess->cmdsn_mutex);
371 login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
372 login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
373 mutex_unlock(&sess->cmdsn_mutex);
374
375 return 0;
376}
377
378static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
379{
380 u32 padding = 0, payload_length;
381 struct iscsi_login_req *login_req;
382
383 if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
384 return -1;
385
386 login_req = (struct iscsi_login_req *) login->req;
387 payload_length = ntoh24(login_req->dlength);
388 login_req->tsih = be16_to_cpu(login_req->tsih);
389 login_req->itt = be32_to_cpu(login_req->itt);
390 login_req->cid = be16_to_cpu(login_req->cid);
391 login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
392 login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
393
394 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
395 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
396 login_req->flags, login_req->itt, login_req->cmdsn,
397 login_req->exp_statsn, login_req->cid, payload_length);
398
399 if (iscsi_target_check_login_request(conn, login) < 0)
400 return -1;
401
402 padding = ((-payload_length) & 3);
403 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
404
405 if (iscsi_login_rx_data(
406 conn,
407 login->req_buf,
408 payload_length + padding) < 0)
409 return -1;
410
411 return 0;
412}
413
414static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
415{
416 if (iscsi_target_do_tx_login_io(conn, login) < 0)
417 return -1;
418
419 if (iscsi_target_do_rx_login_io(conn, login) < 0)
420 return -1;
421
422 return 0;
423}
424
425static int iscsi_target_get_initial_payload(
426 struct iscsi_conn *conn,
427 struct iscsi_login *login)
428{
429 u32 padding = 0, payload_length;
430 struct iscsi_login_req *login_req;
431
432 login_req = (struct iscsi_login_req *) login->req;
433 payload_length = ntoh24(login_req->dlength);
434
435 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
436 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
437 login_req->flags, login_req->itt, login_req->cmdsn,
438 login_req->exp_statsn, payload_length);
439
440 if (iscsi_target_check_login_request(conn, login) < 0)
441 return -1;
442
443 padding = ((-payload_length) & 3);
444
445 if (iscsi_login_rx_data(
446 conn,
447 login->req_buf,
448 payload_length + padding) < 0)
449 return -1;
450
451 return 0;
452}
453
454/*
455 * NOTE: We check for existing sessions or connections AFTER the initiator
456 * has been successfully authenticated in order to protect against faked
457 * ISID/TSIH combinations.
458 */
459static int iscsi_target_check_for_existing_instances(
460 struct iscsi_conn *conn,
461 struct iscsi_login *login)
462{
463 if (login->checked_for_existing)
464 return 0;
465
466 login->checked_for_existing = 1;
467
468 if (!login->tsih)
469 return iscsi_check_for_session_reinstatement(conn);
470 else
471 return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
472 login->initial_exp_statsn);
473}
474
475static int iscsi_target_do_authentication(
476 struct iscsi_conn *conn,
477 struct iscsi_login *login)
478{
479 int authret;
480 u32 payload_length;
481 struct iscsi_param *param;
482 struct iscsi_login_req *login_req;
483 struct iscsi_login_rsp *login_rsp;
484
485 login_req = (struct iscsi_login_req *) login->req;
486 login_rsp = (struct iscsi_login_rsp *) login->rsp;
487 payload_length = ntoh24(login_req->dlength);
488
489 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
490 if (!param)
491 return -1;
492
493 authret = iscsi_handle_authentication(
494 conn,
495 login->req_buf,
496 login->rsp_buf,
497 payload_length,
498 &login->rsp_length,
499 param->value);
500 switch (authret) {
501 case 0:
502 pr_debug("Received OK response"
503 " from LIO Authentication, continuing.\n");
504 break;
505 case 1:
506 pr_debug("iSCSI security negotiation"
507 " completed sucessfully.\n");
508 login->auth_complete = 1;
509 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
510 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
511 login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
512 ISCSI_FLAG_LOGIN_TRANSIT);
513 login->current_stage = 1;
514 }
515 return iscsi_target_check_for_existing_instances(
516 conn, login);
517 case 2:
518 pr_err("Security negotiation"
519 " failed.\n");
520 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
521 ISCSI_LOGIN_STATUS_AUTH_FAILED);
522 return -1;
523 default:
524 pr_err("Received unknown error %d from LIO"
525 " Authentication\n", authret);
526 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
527 ISCSI_LOGIN_STATUS_TARGET_ERROR);
528 return -1;
529 }
530
531 return 0;
532}
533
534static int iscsi_target_handle_csg_zero(
535 struct iscsi_conn *conn,
536 struct iscsi_login *login)
537{
538 int ret;
539 u32 payload_length;
540 struct iscsi_param *param;
541 struct iscsi_login_req *login_req;
542 struct iscsi_login_rsp *login_rsp;
543
544 login_req = (struct iscsi_login_req *) login->req;
545 login_rsp = (struct iscsi_login_rsp *) login->rsp;
546 payload_length = ntoh24(login_req->dlength);
547
548 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
549 if (!param)
550 return -1;
551
552 ret = iscsi_decode_text_input(
553 PHASE_SECURITY|PHASE_DECLARATIVE,
554 SENDER_INITIATOR|SENDER_RECEIVER,
555 login->req_buf,
556 payload_length,
557 conn->param_list);
558 if (ret < 0)
559 return -1;
560
561 if (ret > 0) {
562 if (login->auth_complete) {
563 pr_err("Initiator has already been"
564 " successfully authenticated, but is still"
565 " sending %s keys.\n", param->value);
566 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
567 ISCSI_LOGIN_STATUS_INIT_ERR);
568 return -1;
569 }
570
571 goto do_auth;
572 }
573
574 if (login->first_request)
575 if (iscsi_target_check_first_request(conn, login) < 0)
576 return -1;
577
578 ret = iscsi_encode_text_output(
579 PHASE_SECURITY|PHASE_DECLARATIVE,
580 SENDER_TARGET,
581 login->rsp_buf,
582 &login->rsp_length,
583 conn->param_list);
584 if (ret < 0)
585 return -1;
586
587 if (!iscsi_check_negotiated_keys(conn->param_list)) {
588 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
589 !strncmp(param->value, NONE, 4)) {
590 pr_err("Initiator sent AuthMethod=None but"
591 " Target is enforcing iSCSI Authentication,"
592 " login failed.\n");
593 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
594 ISCSI_LOGIN_STATUS_AUTH_FAILED);
595 return -1;
596 }
597
598 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
599 !login->auth_complete)
600 return 0;
601
602 if (strncmp(param->value, NONE, 4) && !login->auth_complete)
603 return 0;
604
605 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
606 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
607 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
608 ISCSI_FLAG_LOGIN_TRANSIT;
609 login->current_stage = 1;
610 }
611 }
612
613 return 0;
614do_auth:
615 return iscsi_target_do_authentication(conn, login);
616}
617
618static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
619{
620 int ret;
621 u32 payload_length;
622 struct iscsi_login_req *login_req;
623 struct iscsi_login_rsp *login_rsp;
624
625 login_req = (struct iscsi_login_req *) login->req;
626 login_rsp = (struct iscsi_login_rsp *) login->rsp;
627 payload_length = ntoh24(login_req->dlength);
628
629 ret = iscsi_decode_text_input(
630 PHASE_OPERATIONAL|PHASE_DECLARATIVE,
631 SENDER_INITIATOR|SENDER_RECEIVER,
632 login->req_buf,
633 payload_length,
634 conn->param_list);
635 if (ret < 0)
636 return -1;
637
638 if (login->first_request)
639 if (iscsi_target_check_first_request(conn, login) < 0)
640 return -1;
641
642 if (iscsi_target_check_for_existing_instances(conn, login) < 0)
643 return -1;
644
645 ret = iscsi_encode_text_output(
646 PHASE_OPERATIONAL|PHASE_DECLARATIVE,
647 SENDER_TARGET,
648 login->rsp_buf,
649 &login->rsp_length,
650 conn->param_list);
651 if (ret < 0)
652 return -1;
653
654 if (!login->auth_complete &&
655 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
656 pr_err("Initiator is requesting CSG: 1, has not been"
657 " successfully authenticated, and the Target is"
658 " enforcing iSCSI Authentication, login failed.\n");
659 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
660 ISCSI_LOGIN_STATUS_AUTH_FAILED);
661 return -1;
662 }
663
664 if (!iscsi_check_negotiated_keys(conn->param_list))
665 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
666 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
667 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
668 ISCSI_FLAG_LOGIN_TRANSIT;
669
670 return 0;
671}
672
673static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
674{
675 int pdu_count = 0;
676 struct iscsi_login_req *login_req;
677 struct iscsi_login_rsp *login_rsp;
678
679 login_req = (struct iscsi_login_req *) login->req;
680 login_rsp = (struct iscsi_login_rsp *) login->rsp;
681
682 while (1) {
683 if (++pdu_count > MAX_LOGIN_PDUS) {
684 pr_err("MAX_LOGIN_PDUS count reached.\n");
685 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
686 ISCSI_LOGIN_STATUS_TARGET_ERROR);
687 return -1;
688 }
689
690 switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
691 case 0:
692 login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
693 if (iscsi_target_handle_csg_zero(conn, login) < 0)
694 return -1;
695 break;
696 case 1:
697 login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
698 if (iscsi_target_handle_csg_one(conn, login) < 0)
699 return -1;
700 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
701 login->tsih = conn->sess->tsih;
702 if (iscsi_target_do_tx_login_io(conn,
703 login) < 0)
704 return -1;
705 return 0;
706 }
707 break;
708 default:
709 pr_err("Illegal CSG: %d received from"
710 " Initiator, protocol error.\n",
711 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
712 >> 2);
713 break;
714 }
715
716 if (iscsi_target_do_login_io(conn, login) < 0)
717 return -1;
718
719 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
720 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
721 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
722 }
723 }
724
725 return 0;
726}
727
728static void iscsi_initiatorname_tolower(
729 char *param_buf)
730{
731 char *c;
732 u32 iqn_size = strlen(param_buf), i;
733
734 for (i = 0; i < iqn_size; i++) {
735 c = (char *)&param_buf[i];
736 if (!isupper(*c))
737 continue;
738
739 *c = tolower(*c);
740 }
741}
742
743/*
744 * Processes the first Login Request..
745 */
746static int iscsi_target_locate_portal(
747 struct iscsi_np *np,
748 struct iscsi_conn *conn,
749 struct iscsi_login *login)
750{
751 char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
752 char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
753 struct iscsi_session *sess = conn->sess;
754 struct iscsi_tiqn *tiqn;
755 struct iscsi_login_req *login_req;
756 struct iscsi_targ_login_rsp *login_rsp;
757 u32 payload_length;
758 int sessiontype = 0, ret = 0;
759
760 login_req = (struct iscsi_login_req *) login->req;
761 login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
762 payload_length = ntoh24(login_req->dlength);
763
764 login->first_request = 1;
765 login->leading_connection = (!login_req->tsih) ? 1 : 0;
766 login->current_stage =
767 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
768 login->version_min = login_req->min_version;
769 login->version_max = login_req->max_version;
770 memcpy(login->isid, login_req->isid, 6);
771 login->cmd_sn = login_req->cmdsn;
772 login->init_task_tag = login_req->itt;
773 login->initial_exp_statsn = login_req->exp_statsn;
774 login->cid = login_req->cid;
775 login->tsih = login_req->tsih;
776
777 if (iscsi_target_get_initial_payload(conn, login) < 0)
778 return -1;
779
780 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
781 if (!tmpbuf) {
782 pr_err("Unable to allocate memory for tmpbuf.\n");
783 return -1;
784 }
785
786 memcpy(tmpbuf, login->req_buf, payload_length);
787 tmpbuf[payload_length] = '\0';
788 start = tmpbuf;
789 end = (start + payload_length);
790
791 /*
792 * Locate the initial keys expected from the Initiator node in
793 * the first login request in order to progress with the login phase.
794 */
795 while (start < end) {
796 if (iscsi_extract_key_value(start, &key, &value) < 0) {
797 ret = -1;
798 goto out;
799 }
800
801 if (!strncmp(key, "InitiatorName", 13))
802 i_buf = value;
803 else if (!strncmp(key, "SessionType", 11))
804 s_buf = value;
805 else if (!strncmp(key, "TargetName", 10))
806 t_buf = value;
807
808 start += strlen(key) + strlen(value) + 2;
809 }
810
811 /*
812 * See 5.3. Login Phase.
813 */
814 if (!i_buf) {
815 pr_err("InitiatorName key not received"
816 " in first login request.\n");
817 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
818 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
819 ret = -1;
820 goto out;
821 }
822 /*
823 * Convert the incoming InitiatorName to lowercase following
824 * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
825 * are NOT case sensitive.
826 */
827 iscsi_initiatorname_tolower(i_buf);
828
829 if (!s_buf) {
830 if (!login->leading_connection)
831 goto get_target;
832
833 pr_err("SessionType key not received"
834 " in first login request.\n");
835 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
836 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
837 ret = -1;
838 goto out;
839 }
840
841 /*
842 * Use default portal group for discovery sessions.
843 */
844 sessiontype = strncmp(s_buf, DISCOVERY, 9);
845 if (!sessiontype) {
846 conn->tpg = iscsit_global->discovery_tpg;
847 if (!login->leading_connection)
848 goto get_target;
849
850 sess->sess_ops->SessionType = 1;
851 /*
852 * Setup crc32c modules from libcrypto
853 */
854 if (iscsi_login_setup_crypto(conn) < 0) {
855 pr_err("iscsi_login_setup_crypto() failed\n");
856 ret = -1;
857 goto out;
858 }
859 /*
860 * Serialize access across the discovery struct iscsi_portal_group to
861 * process login attempt.
862 */
863 if (iscsit_access_np(np, conn->tpg) < 0) {
864 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
865 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
866 ret = -1;
867 goto out;
868 }
869 ret = 0;
870 goto out;
871 }
872
873get_target:
874 if (!t_buf) {
875 pr_err("TargetName key not received"
876 " in first login request while"
877 " SessionType=Normal.\n");
878 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
879 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
880 ret = -1;
881 goto out;
882 }
883
884 /*
885 * Locate Target IQN from Storage Node.
886 */
887 tiqn = iscsit_get_tiqn_for_login(t_buf);
888 if (!tiqn) {
889 pr_err("Unable to locate Target IQN: %s in"
890 " Storage Node\n", t_buf);
891 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
892 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
893 ret = -1;
894 goto out;
895 }
896 pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
897
898 /*
899 * Locate Target Portal Group from Storage Node.
900 */
901 conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
902 if (!conn->tpg) {
903 pr_err("Unable to locate Target Portal Group"
904 " on %s\n", tiqn->tiqn);
905 iscsit_put_tiqn_for_login(tiqn);
906 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
907 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
908 ret = -1;
909 goto out;
910 }
911 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
912 /*
913 * Setup crc32c modules from libcrypto
914 */
915 if (iscsi_login_setup_crypto(conn) < 0) {
916 pr_err("iscsi_login_setup_crypto() failed\n");
917 ret = -1;
918 goto out;
919 }
920 /*
921 * Serialize access across the struct iscsi_portal_group to
922 * process login attempt.
923 */
924 if (iscsit_access_np(np, conn->tpg) < 0) {
925 iscsit_put_tiqn_for_login(tiqn);
926 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
927 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
928 ret = -1;
929 conn->tpg = NULL;
930 goto out;
931 }
932
933 /*
934 * conn->sess->node_acl will be set when the referenced
935 * struct iscsi_session is located from received ISID+TSIH in
936 * iscsi_login_non_zero_tsih_s2().
937 */
938 if (!login->leading_connection) {
939 ret = 0;
940 goto out;
941 }
942
943 /*
944 * This value is required in iscsi_login_zero_tsih_s2()
945 */
946 sess->sess_ops->SessionType = 0;
947
948 /*
949 * Locate incoming Initiator IQN reference from Storage Node.
950 */
951 sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
952 &conn->tpg->tpg_se_tpg, i_buf);
953 if (!sess->se_sess->se_node_acl) {
954 pr_err("iSCSI Initiator Node: %s is not authorized to"
955 " access iSCSI target portal group: %hu.\n",
956 i_buf, conn->tpg->tpgt);
957 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
958 ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
959 ret = -1;
960 goto out;
961 }
962
963 ret = 0;
964out:
965 kfree(tmpbuf);
966 return ret;
967}
968
969struct iscsi_login *iscsi_target_init_negotiation(
970 struct iscsi_np *np,
971 struct iscsi_conn *conn,
972 char *login_pdu)
973{
974 struct iscsi_login *login;
975
976 login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
977 if (!login) {
978 pr_err("Unable to allocate memory for struct iscsi_login.\n");
979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
980 ISCSI_LOGIN_STATUS_NO_RESOURCES);
981 goto out;
982 }
983
984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
985 if (!login->req) {
986 pr_err("Unable to allocate memory for Login Request.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out;
990 }
991 memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
992
993 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
994 if (!login->req_buf) {
995 pr_err("Unable to allocate memory for response buffer.\n");
996 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
997 ISCSI_LOGIN_STATUS_NO_RESOURCES);
998 goto out;
999 }
1000 /*
1001 * SessionType: Discovery
1002 *
1003 * Locates Default Portal
1004 *
1005 * SessionType: Normal
1006 *
1007 * Locates Target Portal from NP -> Target IQN
1008 */
1009 if (iscsi_target_locate_portal(np, conn, login) < 0) {
1010 pr_err("iSCSI Login negotiation failed.\n");
1011 goto out;
1012 }
1013
1014 return login;
1015out:
1016 kfree(login->req);
1017 kfree(login->req_buf);
1018 kfree(login);
1019
1020 return NULL;
1021}
1022
1023int iscsi_target_start_negotiation(
1024 struct iscsi_login *login,
1025 struct iscsi_conn *conn)
1026{
1027 int ret = -1;
1028
1029 login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
1030 if (!login->rsp) {
1031 pr_err("Unable to allocate memory for"
1032 " Login Response.\n");
1033 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1034 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1035 ret = -1;
1036 goto out;
1037 }
1038
1039 login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
1040 if (!login->rsp_buf) {
1041 pr_err("Unable to allocate memory for"
1042 " request buffer.\n");
1043 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1044 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1045 ret = -1;
1046 goto out;
1047 }
1048
1049 ret = iscsi_target_do_login(conn, login);
1050out:
1051 if (ret != 0)
1052 iscsi_remove_failed_auth_entry(conn);
1053
1054 iscsi_target_nego_release(login, conn);
1055 return ret;
1056}
1057
1058void iscsi_target_nego_release(
1059 struct iscsi_login *login,
1060 struct iscsi_conn *conn)
1061{
1062 kfree(login->req);
1063 kfree(login->rsp);
1064 kfree(login->req_buf);
1065 kfree(login->rsp_buf);
1066 kfree(login);
1067}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 000000000000..92e133a5158f
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,17 @@
1#ifndef ISCSI_TARGET_NEGO_H
2#define ISCSI_TARGET_NEGO_H
3
4#define DECIMAL 0
5#define HEX 1
6
7extern void convert_null_to_semi(char *, int);
8extern int extract_param(const char *, const char *, unsigned int, char *,
9 unsigned char *);
10extern struct iscsi_login *iscsi_target_init_negotiation(
11 struct iscsi_np *, struct iscsi_conn *, char *);
12extern int iscsi_target_start_negotiation(
13 struct iscsi_login *, struct iscsi_conn *);
14extern void iscsi_target_nego_release(
15 struct iscsi_login *, struct iscsi_conn *);
16
17#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 000000000000..aeafbe0cd7d1
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,263 @@
1/*******************************************************************************
2 * This file contains the main functions related to Initiator Node Attributes.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <target/target_core_base.h>
22#include <target/target_core_transport.h>
23
24#include "iscsi_target_core.h"
25#include "iscsi_target_device.h"
26#include "iscsi_target_tpg.h"
27#include "iscsi_target_util.h"
28#include "iscsi_target_nodeattrib.h"
29
30static inline char *iscsit_na_get_initiatorname(
31 struct iscsi_node_acl *nacl)
32{
33 struct se_node_acl *se_nacl = &nacl->se_node_acl;
34
35 return &se_nacl->initiatorname[0];
36}
37
38void iscsit_set_default_node_attribues(
39 struct iscsi_node_acl *acl)
40{
41 struct iscsi_node_attrib *a = &acl->node_attrib;
42
43 a->dataout_timeout = NA_DATAOUT_TIMEOUT;
44 a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
45 a->nopin_timeout = NA_NOPIN_TIMEOUT;
46 a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
47 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
48 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
49 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
50 a->default_erl = NA_DEFAULT_ERL;
51}
52
53extern int iscsit_na_dataout_timeout(
54 struct iscsi_node_acl *acl,
55 u32 dataout_timeout)
56{
57 struct iscsi_node_attrib *a = &acl->node_attrib;
58
59 if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
60 pr_err("Requested DataOut Timeout %u larger than"
61 " maximum %u\n", dataout_timeout,
62 NA_DATAOUT_TIMEOUT_MAX);
63 return -EINVAL;
64 } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
65 pr_err("Requested DataOut Timeout %u smaller than"
66 " minimum %u\n", dataout_timeout,
67 NA_DATAOUT_TIMEOUT_MIX);
68 return -EINVAL;
69 }
70
71 a->dataout_timeout = dataout_timeout;
72 pr_debug("Set DataOut Timeout to %u for Initiator Node"
73 " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
74
75 return 0;
76}
77
78extern int iscsit_na_dataout_timeout_retries(
79 struct iscsi_node_acl *acl,
80 u32 dataout_timeout_retries)
81{
82 struct iscsi_node_attrib *a = &acl->node_attrib;
83
84 if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
85 pr_err("Requested DataOut Timeout Retries %u larger"
86 " than maximum %u", dataout_timeout_retries,
87 NA_DATAOUT_TIMEOUT_RETRIES_MAX);
88 return -EINVAL;
89 } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
90 pr_err("Requested DataOut Timeout Retries %u smaller"
91 " than minimum %u", dataout_timeout_retries,
92 NA_DATAOUT_TIMEOUT_RETRIES_MIN);
93 return -EINVAL;
94 }
95
96 a->dataout_timeout_retries = dataout_timeout_retries;
97 pr_debug("Set DataOut Timeout Retries to %u for"
98 " Initiator Node %s\n", a->dataout_timeout_retries,
99 iscsit_na_get_initiatorname(acl));
100
101 return 0;
102}
103
104extern int iscsit_na_nopin_timeout(
105 struct iscsi_node_acl *acl,
106 u32 nopin_timeout)
107{
108 struct iscsi_node_attrib *a = &acl->node_attrib;
109 struct iscsi_session *sess;
110 struct iscsi_conn *conn;
111 struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
112 struct se_session *se_sess;
113 u32 orig_nopin_timeout = a->nopin_timeout;
114
115 if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
116 pr_err("Requested NopIn Timeout %u larger than maximum"
117 " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
118 return -EINVAL;
119 } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
120 (nopin_timeout != 0)) {
121 pr_err("Requested NopIn Timeout %u smaller than"
122 " minimum %u and not 0\n", nopin_timeout,
123 NA_NOPIN_TIMEOUT_MIN);
124 return -EINVAL;
125 }
126
127 a->nopin_timeout = nopin_timeout;
128 pr_debug("Set NopIn Timeout to %u for Initiator"
129 " Node %s\n", a->nopin_timeout,
130 iscsit_na_get_initiatorname(acl));
131 /*
132 * Reenable disabled nopin_timeout timer for all iSCSI connections.
133 */
134 if (!orig_nopin_timeout) {
135 spin_lock_bh(&se_nacl->nacl_sess_lock);
136 se_sess = se_nacl->nacl_sess;
137 if (se_sess) {
138 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
139
140 spin_lock(&sess->conn_lock);
141 list_for_each_entry(conn, &sess->sess_conn_list,
142 conn_list) {
143 if (conn->conn_state !=
144 TARG_CONN_STATE_LOGGED_IN)
145 continue;
146
147 spin_lock(&conn->nopin_timer_lock);
148 __iscsit_start_nopin_timer(conn);
149 spin_unlock(&conn->nopin_timer_lock);
150 }
151 spin_unlock(&sess->conn_lock);
152 }
153 spin_unlock_bh(&se_nacl->nacl_sess_lock);
154 }
155
156 return 0;
157}
158
159extern int iscsit_na_nopin_response_timeout(
160 struct iscsi_node_acl *acl,
161 u32 nopin_response_timeout)
162{
163 struct iscsi_node_attrib *a = &acl->node_attrib;
164
165 if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
166 pr_err("Requested NopIn Response Timeout %u larger"
167 " than maximum %u\n", nopin_response_timeout,
168 NA_NOPIN_RESPONSE_TIMEOUT_MAX);
169 return -EINVAL;
170 } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
171 pr_err("Requested NopIn Response Timeout %u smaller"
172 " than minimum %u\n", nopin_response_timeout,
173 NA_NOPIN_RESPONSE_TIMEOUT_MIN);
174 return -EINVAL;
175 }
176
177 a->nopin_response_timeout = nopin_response_timeout;
178 pr_debug("Set NopIn Response Timeout to %u for"
179 " Initiator Node %s\n", a->nopin_timeout,
180 iscsit_na_get_initiatorname(acl));
181
182 return 0;
183}
184
185extern int iscsit_na_random_datain_pdu_offsets(
186 struct iscsi_node_acl *acl,
187 u32 random_datain_pdu_offsets)
188{
189 struct iscsi_node_attrib *a = &acl->node_attrib;
190
191 if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
192 pr_err("Requested Random DataIN PDU Offsets: %u not"
193 " 0 or 1\n", random_datain_pdu_offsets);
194 return -EINVAL;
195 }
196
197 a->random_datain_pdu_offsets = random_datain_pdu_offsets;
198 pr_debug("Set Random DataIN PDU Offsets to %u for"
199 " Initiator Node %s\n", a->random_datain_pdu_offsets,
200 iscsit_na_get_initiatorname(acl));
201
202 return 0;
203}
204
205extern int iscsit_na_random_datain_seq_offsets(
206 struct iscsi_node_acl *acl,
207 u32 random_datain_seq_offsets)
208{
209 struct iscsi_node_attrib *a = &acl->node_attrib;
210
211 if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
212 pr_err("Requested Random DataIN Sequence Offsets: %u"
213 " not 0 or 1\n", random_datain_seq_offsets);
214 return -EINVAL;
215 }
216
217 a->random_datain_seq_offsets = random_datain_seq_offsets;
218 pr_debug("Set Random DataIN Sequence Offsets to %u for"
219 " Initiator Node %s\n", a->random_datain_seq_offsets,
220 iscsit_na_get_initiatorname(acl));
221
222 return 0;
223}
224
225extern int iscsit_na_random_r2t_offsets(
226 struct iscsi_node_acl *acl,
227 u32 random_r2t_offsets)
228{
229 struct iscsi_node_attrib *a = &acl->node_attrib;
230
231 if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
232 pr_err("Requested Random R2T Offsets: %u not"
233 " 0 or 1\n", random_r2t_offsets);
234 return -EINVAL;
235 }
236
237 a->random_r2t_offsets = random_r2t_offsets;
238 pr_debug("Set Random R2T Offsets to %u for"
239 " Initiator Node %s\n", a->random_r2t_offsets,
240 iscsit_na_get_initiatorname(acl));
241
242 return 0;
243}
244
245extern int iscsit_na_default_erl(
246 struct iscsi_node_acl *acl,
247 u32 default_erl)
248{
249 struct iscsi_node_attrib *a = &acl->node_attrib;
250
251 if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
252 pr_err("Requested default ERL: %u not 0, 1, or 2\n",
253 default_erl);
254 return -EINVAL;
255 }
256
257 a->default_erl = default_erl;
258 pr_debug("Set use ERL0 flag to %u for Initiator"
259 " Node %s\n", a->default_erl,
260 iscsit_na_get_initiatorname(acl));
261
262 return 0;
263}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 000000000000..c970b326ef23
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,14 @@
1#ifndef ISCSI_TARGET_NODEATTRIB_H
2#define ISCSI_TARGET_NODEATTRIB_H
3
4extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
5extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
6extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
7extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
8extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
9extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
10extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
11extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
12extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
13
14#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 000000000000..252e246cf51e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1905 @@
1/*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/slab.h>
22
23#include "iscsi_target_core.h"
24#include "iscsi_target_util.h"
25#include "iscsi_target_parameters.h"
26
27int iscsi_login_rx_data(
28 struct iscsi_conn *conn,
29 char *buf,
30 int length)
31{
32 int rx_got;
33 struct kvec iov;
34
35 memset(&iov, 0, sizeof(struct kvec));
36 iov.iov_len = length;
37 iov.iov_base = buf;
38
39 /*
40 * Initial Marker-less Interval.
41 * Add the values regardless of IFMarker/OFMarker, considering
42 * it may not be negoitated yet.
43 */
44 conn->of_marker += length;
45
46 rx_got = rx_data(conn, &iov, 1, length);
47 if (rx_got != length) {
48 pr_err("rx_data returned %d, expecting %d.\n",
49 rx_got, length);
50 return -1;
51 }
52
53 return 0 ;
54}
55
56int iscsi_login_tx_data(
57 struct iscsi_conn *conn,
58 char *pdu_buf,
59 char *text_buf,
60 int text_length)
61{
62 int length, tx_sent;
63 struct kvec iov[2];
64
65 length = (ISCSI_HDR_LEN + text_length);
66
67 memset(&iov[0], 0, 2 * sizeof(struct kvec));
68 iov[0].iov_len = ISCSI_HDR_LEN;
69 iov[0].iov_base = pdu_buf;
70 iov[1].iov_len = text_length;
71 iov[1].iov_base = text_buf;
72
73 /*
74 * Initial Marker-less Interval.
75 * Add the values regardless of IFMarker/OFMarker, considering
76 * it may not be negoitated yet.
77 */
78 conn->if_marker += length;
79
80 tx_sent = tx_data(conn, &iov[0], 2, length);
81 if (tx_sent != length) {
82 pr_err("tx_data returned %d, expecting %d.\n",
83 tx_sent, length);
84 return -1;
85 }
86
87 return 0;
88}
89
90void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
91{
92 pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
93 "CRC32C" : "None");
94 pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
95 "CRC32C" : "None");
96 pr_debug("MaxRecvDataSegmentLength: %u\n",
97 conn_ops->MaxRecvDataSegmentLength);
98 pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
99 pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
100 if (conn_ops->OFMarker)
101 pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
102 if (conn_ops->IFMarker)
103 pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
104}
105
106void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
107{
108 pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
109 pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
110 pr_debug("TargetName: %s\n", sess_ops->TargetName);
111 pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
112 pr_debug("TargetPortalGroupTag: %hu\n",
113 sess_ops->TargetPortalGroupTag);
114 pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
115 pr_debug("InitialR2T: %s\n",
116 (sess_ops->InitialR2T) ? "Yes" : "No");
117 pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
118 "Yes" : "No");
119 pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
120 pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
121 pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
122 pr_debug("DefaultTime2Retain: %hu\n",
123 sess_ops->DefaultTime2Retain);
124 pr_debug("MaxOutstandingR2T: %hu\n",
125 sess_ops->MaxOutstandingR2T);
126 pr_debug("DataPDUInOrder: %s\n",
127 (sess_ops->DataPDUInOrder) ? "Yes" : "No");
128 pr_debug("DataSequenceInOrder: %s\n",
129 (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
130 pr_debug("ErrorRecoveryLevel: %hu\n",
131 sess_ops->ErrorRecoveryLevel);
132 pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
133 "Discovery" : "Normal");
134}
135
136void iscsi_print_params(struct iscsi_param_list *param_list)
137{
138 struct iscsi_param *param;
139
140 list_for_each_entry(param, &param_list->param_list, p_list)
141 pr_debug("%s: %s\n", param->name, param->value);
142}
143
144static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
145 char *name, char *value, u8 phase, u8 scope, u8 sender,
146 u16 type_range, u8 use)
147{
148 struct iscsi_param *param = NULL;
149
150 param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
151 if (!param) {
152 pr_err("Unable to allocate memory for parameter.\n");
153 goto out;
154 }
155 INIT_LIST_HEAD(&param->p_list);
156
157 param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
158 if (!param->name) {
159 pr_err("Unable to allocate memory for parameter name.\n");
160 goto out;
161 }
162
163 param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
164 if (!param->value) {
165 pr_err("Unable to allocate memory for parameter value.\n");
166 goto out;
167 }
168
169 memcpy(param->name, name, strlen(name));
170 param->name[strlen(name)] = '\0';
171 memcpy(param->value, value, strlen(value));
172 param->value[strlen(value)] = '\0';
173 param->phase = phase;
174 param->scope = scope;
175 param->sender = sender;
176 param->use = use;
177 param->type_range = type_range;
178
179 switch (param->type_range) {
180 case TYPERANGE_BOOL_AND:
181 param->type = TYPE_BOOL_AND;
182 break;
183 case TYPERANGE_BOOL_OR:
184 param->type = TYPE_BOOL_OR;
185 break;
186 case TYPERANGE_0_TO_2:
187 case TYPERANGE_0_TO_3600:
188 case TYPERANGE_0_TO_32767:
189 case TYPERANGE_0_TO_65535:
190 case TYPERANGE_1_TO_65535:
191 case TYPERANGE_2_TO_3600:
192 case TYPERANGE_512_TO_16777215:
193 param->type = TYPE_NUMBER;
194 break;
195 case TYPERANGE_AUTH:
196 case TYPERANGE_DIGEST:
197 param->type = TYPE_VALUE_LIST | TYPE_STRING;
198 break;
199 case TYPERANGE_MARKINT:
200 param->type = TYPE_NUMBER_RANGE;
201 param->type_range |= TYPERANGE_1_TO_65535;
202 break;
203 case TYPERANGE_ISCSINAME:
204 case TYPERANGE_SESSIONTYPE:
205 case TYPERANGE_TARGETADDRESS:
206 case TYPERANGE_UTF8:
207 param->type = TYPE_STRING;
208 break;
209 default:
210 pr_err("Unknown type_range 0x%02x\n",
211 param->type_range);
212 goto out;
213 }
214 list_add_tail(&param->p_list, &param_list->param_list);
215
216 return param;
217out:
218 if (param) {
219 kfree(param->value);
220 kfree(param->name);
221 kfree(param);
222 }
223
224 return NULL;
225}
226
227/* #warning Add extension keys */
228int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
229{
230 struct iscsi_param *param = NULL;
231 struct iscsi_param_list *pl;
232
233 pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
234 if (!pl) {
235 pr_err("Unable to allocate memory for"
236 " struct iscsi_param_list.\n");
237 return -1 ;
238 }
239 INIT_LIST_HEAD(&pl->param_list);
240 INIT_LIST_HEAD(&pl->extra_response_list);
241
242 /*
243 * The format for setting the initial parameter definitions are:
244 *
245 * Parameter name:
246 * Initial value:
247 * Allowable phase:
248 * Scope:
249 * Allowable senders:
250 * Typerange:
251 * Use:
252 */
253 param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
254 PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
255 TYPERANGE_AUTH, USE_INITIAL_ONLY);
256 if (!param)
257 goto out;
258
259 param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
260 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
261 TYPERANGE_DIGEST, USE_INITIAL_ONLY);
262 if (!param)
263 goto out;
264
265 param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
266 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
267 TYPERANGE_DIGEST, USE_INITIAL_ONLY);
268 if (!param)
269 goto out;
270
271 param = iscsi_set_default_param(pl, MAXCONNECTIONS,
272 INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
273 SCOPE_SESSION_WIDE, SENDER_BOTH,
274 TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
275 if (!param)
276 goto out;
277
278 param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
279 PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
280 TYPERANGE_UTF8, 0);
281 if (!param)
282 goto out;
283
284 param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
285 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
286 TYPERANGE_ISCSINAME, USE_ALL);
287 if (!param)
288 goto out;
289
290 param = iscsi_set_default_param(pl, INITIATORNAME,
291 INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
292 SCOPE_SESSION_WIDE, SENDER_INITIATOR,
293 TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
294 if (!param)
295 goto out;
296
297 param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
298 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
299 TYPERANGE_UTF8, USE_ALL);
300 if (!param)
301 goto out;
302
303 param = iscsi_set_default_param(pl, INITIATORALIAS,
304 INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
305 SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
306 USE_ALL);
307 if (!param)
308 goto out;
309
310 param = iscsi_set_default_param(pl, TARGETADDRESS,
311 INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
312 SCOPE_SESSION_WIDE, SENDER_TARGET,
313 TYPERANGE_TARGETADDRESS, USE_ALL);
314 if (!param)
315 goto out;
316
317 param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
318 INITIAL_TARGETPORTALGROUPTAG,
319 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
320 TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
321 if (!param)
322 goto out;
323
324 param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
325 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
326 TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
327 if (!param)
328 goto out;
329
330 param = iscsi_set_default_param(pl, IMMEDIATEDATA,
331 INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
332 SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
333 USE_LEADING_ONLY);
334 if (!param)
335 goto out;
336
337 param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
338 INITIAL_MAXRECVDATASEGMENTLENGTH,
339 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
340 TYPERANGE_512_TO_16777215, USE_ALL);
341 if (!param)
342 goto out;
343
344 param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
345 INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
346 SCOPE_SESSION_WIDE, SENDER_BOTH,
347 TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
348 if (!param)
349 goto out;
350
351 param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
352 INITIAL_FIRSTBURSTLENGTH,
353 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
354 TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
355 if (!param)
356 goto out;
357
358 param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
359 INITIAL_DEFAULTTIME2WAIT,
360 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
361 TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
362 if (!param)
363 goto out;
364
365 param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
366 INITIAL_DEFAULTTIME2RETAIN,
367 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
368 TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
369 if (!param)
370 goto out;
371
372 param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
373 INITIAL_MAXOUTSTANDINGR2T,
374 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
375 TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
376 if (!param)
377 goto out;
378
379 param = iscsi_set_default_param(pl, DATAPDUINORDER,
380 INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
381 SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
382 USE_LEADING_ONLY);
383 if (!param)
384 goto out;
385
386 param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
387 INITIAL_DATASEQUENCEINORDER,
388 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
389 TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
390 if (!param)
391 goto out;
392
393 param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
394 INITIAL_ERRORRECOVERYLEVEL,
395 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
396 TYPERANGE_0_TO_2, USE_LEADING_ONLY);
397 if (!param)
398 goto out;
399
400 param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
401 PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
402 TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
403 if (!param)
404 goto out;
405
406 param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
407 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
408 TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
409 if (!param)
410 goto out;
411
412 param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
413 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
414 TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
415 if (!param)
416 goto out;
417
418 param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
419 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
420 TYPERANGE_MARKINT, USE_INITIAL_ONLY);
421 if (!param)
422 goto out;
423
424 param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
425 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
426 TYPERANGE_MARKINT, USE_INITIAL_ONLY);
427 if (!param)
428 goto out;
429
430 *param_list_ptr = pl;
431 return 0;
432out:
433 iscsi_release_param_list(pl);
434 return -1;
435}
436
437int iscsi_set_keys_to_negotiate(
438 int sessiontype,
439 struct iscsi_param_list *param_list)
440{
441 struct iscsi_param *param;
442
443 list_for_each_entry(param, &param_list->param_list, p_list) {
444 param->state = 0;
445 if (!strcmp(param->name, AUTHMETHOD)) {
446 SET_PSTATE_NEGOTIATE(param);
447 } else if (!strcmp(param->name, HEADERDIGEST)) {
448 SET_PSTATE_NEGOTIATE(param);
449 } else if (!strcmp(param->name, DATADIGEST)) {
450 SET_PSTATE_NEGOTIATE(param);
451 } else if (!strcmp(param->name, MAXCONNECTIONS)) {
452 SET_PSTATE_NEGOTIATE(param);
453 } else if (!strcmp(param->name, TARGETNAME)) {
454 continue;
455 } else if (!strcmp(param->name, INITIATORNAME)) {
456 continue;
457 } else if (!strcmp(param->name, TARGETALIAS)) {
458 if (param->value)
459 SET_PSTATE_NEGOTIATE(param);
460 } else if (!strcmp(param->name, INITIATORALIAS)) {
461 continue;
462 } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
463 SET_PSTATE_NEGOTIATE(param);
464 } else if (!strcmp(param->name, INITIALR2T)) {
465 SET_PSTATE_NEGOTIATE(param);
466 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
467 SET_PSTATE_NEGOTIATE(param);
468 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
469 SET_PSTATE_NEGOTIATE(param);
470 } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
471 SET_PSTATE_NEGOTIATE(param);
472 } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
473 SET_PSTATE_NEGOTIATE(param);
474 } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
475 SET_PSTATE_NEGOTIATE(param);
476 } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
477 SET_PSTATE_NEGOTIATE(param);
478 } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
479 SET_PSTATE_NEGOTIATE(param);
480 } else if (!strcmp(param->name, DATAPDUINORDER)) {
481 SET_PSTATE_NEGOTIATE(param);
482 } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
483 SET_PSTATE_NEGOTIATE(param);
484 } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
485 SET_PSTATE_NEGOTIATE(param);
486 } else if (!strcmp(param->name, SESSIONTYPE)) {
487 SET_PSTATE_NEGOTIATE(param);
488 } else if (!strcmp(param->name, IFMARKER)) {
489 SET_PSTATE_NEGOTIATE(param);
490 } else if (!strcmp(param->name, OFMARKER)) {
491 SET_PSTATE_NEGOTIATE(param);
492 } else if (!strcmp(param->name, IFMARKINT)) {
493 SET_PSTATE_NEGOTIATE(param);
494 } else if (!strcmp(param->name, OFMARKINT)) {
495 SET_PSTATE_NEGOTIATE(param);
496 }
497 }
498
499 return 0;
500}
501
502int iscsi_set_keys_irrelevant_for_discovery(
503 struct iscsi_param_list *param_list)
504{
505 struct iscsi_param *param;
506
507 list_for_each_entry(param, &param_list->param_list, p_list) {
508 if (!strcmp(param->name, MAXCONNECTIONS))
509 param->state &= ~PSTATE_NEGOTIATE;
510 else if (!strcmp(param->name, INITIALR2T))
511 param->state &= ~PSTATE_NEGOTIATE;
512 else if (!strcmp(param->name, IMMEDIATEDATA))
513 param->state &= ~PSTATE_NEGOTIATE;
514 else if (!strcmp(param->name, MAXBURSTLENGTH))
515 param->state &= ~PSTATE_NEGOTIATE;
516 else if (!strcmp(param->name, FIRSTBURSTLENGTH))
517 param->state &= ~PSTATE_NEGOTIATE;
518 else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
519 param->state &= ~PSTATE_NEGOTIATE;
520 else if (!strcmp(param->name, DATAPDUINORDER))
521 param->state &= ~PSTATE_NEGOTIATE;
522 else if (!strcmp(param->name, DATASEQUENCEINORDER))
523 param->state &= ~PSTATE_NEGOTIATE;
524 else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
525 param->state &= ~PSTATE_NEGOTIATE;
526 else if (!strcmp(param->name, DEFAULTTIME2WAIT))
527 param->state &= ~PSTATE_NEGOTIATE;
528 else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
529 param->state &= ~PSTATE_NEGOTIATE;
530 else if (!strcmp(param->name, IFMARKER))
531 param->state &= ~PSTATE_NEGOTIATE;
532 else if (!strcmp(param->name, OFMARKER))
533 param->state &= ~PSTATE_NEGOTIATE;
534 else if (!strcmp(param->name, IFMARKINT))
535 param->state &= ~PSTATE_NEGOTIATE;
536 else if (!strcmp(param->name, OFMARKINT))
537 param->state &= ~PSTATE_NEGOTIATE;
538 }
539
540 return 0;
541}
542
543int iscsi_copy_param_list(
544 struct iscsi_param_list **dst_param_list,
545 struct iscsi_param_list *src_param_list,
546 int leading)
547{
548 struct iscsi_param *new_param = NULL, *param = NULL;
549 struct iscsi_param_list *param_list = NULL;
550
551 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
552 if (!param_list) {
553 pr_err("Unable to allocate memory for"
554 " struct iscsi_param_list.\n");
555 goto err_out;
556 }
557 INIT_LIST_HEAD(&param_list->param_list);
558 INIT_LIST_HEAD(&param_list->extra_response_list);
559
560 list_for_each_entry(param, &src_param_list->param_list, p_list) {
561 if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
562 if ((strcmp(param->name, "TargetName") != 0) &&
563 (strcmp(param->name, "InitiatorName") != 0) &&
564 (strcmp(param->name, "TargetPortalGroupTag") != 0))
565 continue;
566 }
567
568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
569 if (!new_param) {
570 pr_err("Unable to allocate memory for"
571 " struct iscsi_param.\n");
572 goto err_out;
573 }
574
575 new_param->set_param = param->set_param;
576 new_param->phase = param->phase;
577 new_param->scope = param->scope;
578 new_param->sender = param->sender;
579 new_param->type = param->type;
580 new_param->use = param->use;
581 new_param->type_range = param->type_range;
582
583 new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
584 if (!new_param->name) {
585 pr_err("Unable to allocate memory for"
586 " parameter name.\n");
587 goto err_out;
588 }
589
590 new_param->value = kzalloc(strlen(param->value) + 1,
591 GFP_KERNEL);
592 if (!new_param->value) {
593 pr_err("Unable to allocate memory for"
594 " parameter value.\n");
595 goto err_out;
596 }
597
598 memcpy(new_param->name, param->name, strlen(param->name));
599 new_param->name[strlen(param->name)] = '\0';
600 memcpy(new_param->value, param->value, strlen(param->value));
601 new_param->value[strlen(param->value)] = '\0';
602
603 list_add_tail(&new_param->p_list, &param_list->param_list);
604 }
605
606 if (!list_empty(&param_list->param_list))
607 *dst_param_list = param_list;
608 else {
609 pr_err("No parameters allocated.\n");
610 goto err_out;
611 }
612
613 return 0;
614
615err_out:
616 iscsi_release_param_list(param_list);
617 return -1;
618}
619
620static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
621{
622 struct iscsi_extra_response *er, *er_tmp;
623
624 list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
625 er_list) {
626 list_del(&er->er_list);
627 kfree(er);
628 }
629}
630
631void iscsi_release_param_list(struct iscsi_param_list *param_list)
632{
633 struct iscsi_param *param, *param_tmp;
634
635 list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
636 p_list) {
637 list_del(&param->p_list);
638
639 kfree(param->name);
640 param->name = NULL;
641 kfree(param->value);
642 param->value = NULL;
643 kfree(param);
644 param = NULL;
645 }
646
647 iscsi_release_extra_responses(param_list);
648
649 kfree(param_list);
650}
651
652struct iscsi_param *iscsi_find_param_from_key(
653 char *key,
654 struct iscsi_param_list *param_list)
655{
656 struct iscsi_param *param;
657
658 if (!key || !param_list) {
659 pr_err("Key or parameter list pointer is NULL.\n");
660 return NULL;
661 }
662
663 list_for_each_entry(param, &param_list->param_list, p_list) {
664 if (!strcmp(key, param->name))
665 return param;
666 }
667
668 pr_err("Unable to locate key \"%s\".\n", key);
669 return NULL;
670}
671
672int iscsi_extract_key_value(char *textbuf, char **key, char **value)
673{
674 *value = strchr(textbuf, '=');
675 if (!*value) {
676 pr_err("Unable to locate \"=\" seperator for key,"
677 " ignoring request.\n");
678 return -1;
679 }
680
681 *key = textbuf;
682 **value = '\0';
683 *value = *value + 1;
684
685 return 0;
686}
687
688int iscsi_update_param_value(struct iscsi_param *param, char *value)
689{
690 kfree(param->value);
691
692 param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
693 if (!param->value) {
694 pr_err("Unable to allocate memory for value.\n");
695 return -1;
696 }
697
698 memcpy(param->value, value, strlen(value));
699 param->value[strlen(value)] = '\0';
700
701 pr_debug("iSCSI Parameter updated to %s=%s\n",
702 param->name, param->value);
703 return 0;
704}
705
706static int iscsi_add_notunderstood_response(
707 char *key,
708 char *value,
709 struct iscsi_param_list *param_list)
710{
711 struct iscsi_extra_response *extra_response;
712
713 if (strlen(value) > VALUE_MAXLEN) {
714 pr_err("Value for notunderstood key \"%s\" exceeds %d,"
715 " protocol error.\n", key, VALUE_MAXLEN);
716 return -1;
717 }
718
719 extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
720 if (!extra_response) {
721 pr_err("Unable to allocate memory for"
722 " struct iscsi_extra_response.\n");
723 return -1;
724 }
725 INIT_LIST_HEAD(&extra_response->er_list);
726
727 strncpy(extra_response->key, key, strlen(key) + 1);
728 strncpy(extra_response->value, NOTUNDERSTOOD,
729 strlen(NOTUNDERSTOOD) + 1);
730
731 list_add_tail(&extra_response->er_list,
732 &param_list->extra_response_list);
733 return 0;
734}
735
736static int iscsi_check_for_auth_key(char *key)
737{
738 /*
739 * RFC 1994
740 */
741 if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
742 !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
743 !strcmp(key, "CHAP_R"))
744 return 1;
745
746 /*
747 * RFC 2945
748 */
749 if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
750 !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
751 !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
752 !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
753 return 1;
754
755 return 0;
756}
757
758static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
759{
760 if (IS_TYPE_BOOL_AND(param)) {
761 if (!strcmp(param->value, NO))
762 SET_PSTATE_REPLY_OPTIONAL(param);
763 } else if (IS_TYPE_BOOL_OR(param)) {
764 if (!strcmp(param->value, YES))
765 SET_PSTATE_REPLY_OPTIONAL(param);
766 /*
767 * Required for gPXE iSCSI boot client
768 */
769 if (!strcmp(param->name, IMMEDIATEDATA))
770 SET_PSTATE_REPLY_OPTIONAL(param);
771 } else if (IS_TYPE_NUMBER(param)) {
772 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
773 SET_PSTATE_REPLY_OPTIONAL(param);
774 /*
775 * The GlobalSAN iSCSI Initiator for MacOSX does
776 * not respond to MaxBurstLength, FirstBurstLength,
777 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
778 * So, we set them to 'reply optional' here, and assume the
779 * the defaults from iscsi_parameters.h if the initiator
780 * is not RFC compliant and the keys are not negotiated.
781 */
782 if (!strcmp(param->name, MAXBURSTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param);
784 if (!strcmp(param->name, FIRSTBURSTLENGTH))
785 SET_PSTATE_REPLY_OPTIONAL(param);
786 if (!strcmp(param->name, DEFAULTTIME2WAIT))
787 SET_PSTATE_REPLY_OPTIONAL(param);
788 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
789 SET_PSTATE_REPLY_OPTIONAL(param);
790 /*
791 * Required for gPXE iSCSI boot client
792 */
793 if (!strcmp(param->name, MAXCONNECTIONS))
794 SET_PSTATE_REPLY_OPTIONAL(param);
795 } else if (IS_PHASE_DECLARATIVE(param))
796 SET_PSTATE_REPLY_OPTIONAL(param);
797}
798
799static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
800{
801 if (strcmp(value, YES) && strcmp(value, NO)) {
802 pr_err("Illegal value for \"%s\", must be either"
803 " \"%s\" or \"%s\".\n", param->name, YES, NO);
804 return -1;
805 }
806
807 return 0;
808}
809
810static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
811{
812 char *tmpptr;
813 int value = 0;
814
815 value = simple_strtoul(value_ptr, &tmpptr, 0);
816
817/* #warning FIXME: Fix this */
818#if 0
819 if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
820 pr_err("Illegal value \"%s\" for \"%s\".\n",
821 value, param->name);
822 return -1;
823 }
824#endif
825 if (IS_TYPERANGE_0_TO_2(param)) {
826 if ((value < 0) || (value > 2)) {
827 pr_err("Illegal value for \"%s\", must be"
828 " between 0 and 2.\n", param->name);
829 return -1;
830 }
831 return 0;
832 }
833 if (IS_TYPERANGE_0_TO_3600(param)) {
834 if ((value < 0) || (value > 3600)) {
835 pr_err("Illegal value for \"%s\", must be"
836 " between 0 and 3600.\n", param->name);
837 return -1;
838 }
839 return 0;
840 }
841 if (IS_TYPERANGE_0_TO_32767(param)) {
842 if ((value < 0) || (value > 32767)) {
843 pr_err("Illegal value for \"%s\", must be"
844 " between 0 and 32767.\n", param->name);
845 return -1;
846 }
847 return 0;
848 }
849 if (IS_TYPERANGE_0_TO_65535(param)) {
850 if ((value < 0) || (value > 65535)) {
851 pr_err("Illegal value for \"%s\", must be"
852 " between 0 and 65535.\n", param->name);
853 return -1;
854 }
855 return 0;
856 }
857 if (IS_TYPERANGE_1_TO_65535(param)) {
858 if ((value < 1) || (value > 65535)) {
859 pr_err("Illegal value for \"%s\", must be"
860 " between 1 and 65535.\n", param->name);
861 return -1;
862 }
863 return 0;
864 }
865 if (IS_TYPERANGE_2_TO_3600(param)) {
866 if ((value < 2) || (value > 3600)) {
867 pr_err("Illegal value for \"%s\", must be"
868 " between 2 and 3600.\n", param->name);
869 return -1;
870 }
871 return 0;
872 }
873 if (IS_TYPERANGE_512_TO_16777215(param)) {
874 if ((value < 512) || (value > 16777215)) {
875 pr_err("Illegal value for \"%s\", must be"
876 " between 512 and 16777215.\n", param->name);
877 return -1;
878 }
879 return 0;
880 }
881
882 return 0;
883}
884
885static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
886{
887 char *left_val_ptr = NULL, *right_val_ptr = NULL;
888 char *tilde_ptr = NULL, *tmp_ptr = NULL;
889 u32 left_val, right_val, local_left_val, local_right_val;
890
891 if (strcmp(param->name, IFMARKINT) &&
892 strcmp(param->name, OFMARKINT)) {
893 pr_err("Only parameters \"%s\" or \"%s\" may contain a"
894 " numerical range value.\n", IFMARKINT, OFMARKINT);
895 return -1;
896 }
897
898 if (IS_PSTATE_PROPOSER(param))
899 return 0;
900
901 tilde_ptr = strchr(value, '~');
902 if (!tilde_ptr) {
903 pr_err("Unable to locate numerical range indicator"
904 " \"~\" for \"%s\".\n", param->name);
905 return -1;
906 }
907 *tilde_ptr = '\0';
908
909 left_val_ptr = value;
910 right_val_ptr = value + strlen(left_val_ptr) + 1;
911
912 if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
913 return -1;
914 if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
915 return -1;
916
917 left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
918 right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
919 *tilde_ptr = '~';
920
921 if (right_val < left_val) {
922 pr_err("Numerical range for parameter \"%s\" contains"
923 " a right value which is less than the left.\n",
924 param->name);
925 return -1;
926 }
927
928 /*
929 * For now, enforce reasonable defaults for [I,O]FMarkInt.
930 */
931 tilde_ptr = strchr(param->value, '~');
932 if (!tilde_ptr) {
933 pr_err("Unable to locate numerical range indicator"
934 " \"~\" for \"%s\".\n", param->name);
935 return -1;
936 }
937 *tilde_ptr = '\0';
938
939 left_val_ptr = param->value;
940 right_val_ptr = param->value + strlen(left_val_ptr) + 1;
941
942 local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
943 local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
944 *tilde_ptr = '~';
945
946 if (param->set_param) {
947 if ((left_val < local_left_val) ||
948 (right_val < local_left_val)) {
949 pr_err("Passed value range \"%u~%u\" is below"
950 " minimum left value \"%u\" for key \"%s\","
951 " rejecting.\n", left_val, right_val,
952 local_left_val, param->name);
953 return -1;
954 }
955 } else {
956 if ((left_val < local_left_val) &&
957 (right_val < local_left_val)) {
958 pr_err("Received value range \"%u~%u\" is"
959 " below minimum left value \"%u\" for key"
960 " \"%s\", rejecting.\n", left_val, right_val,
961 local_left_val, param->name);
962 SET_PSTATE_REJECT(param);
963 if (iscsi_update_param_value(param, REJECT) < 0)
964 return -1;
965 }
966 }
967
968 return 0;
969}
970
971static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
972{
973 if (IS_PSTATE_PROPOSER(param))
974 return 0;
975
976 if (IS_TYPERANGE_AUTH_PARAM(param)) {
977 if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
978 strcmp(value, SPKM2) && strcmp(value, SRP) &&
979 strcmp(value, CHAP) && strcmp(value, NONE)) {
980 pr_err("Illegal value for \"%s\", must be"
981 " \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
982 " or \"%s\".\n", param->name, KRB5,
983 SPKM1, SPKM2, SRP, CHAP, NONE);
984 return -1;
985 }
986 }
987 if (IS_TYPERANGE_DIGEST_PARAM(param)) {
988 if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
989 pr_err("Illegal value for \"%s\", must be"
990 " \"%s\" or \"%s\".\n", param->name,
991 CRC32C, NONE);
992 return -1;
993 }
994 }
995 if (IS_TYPERANGE_SESSIONTYPE(param)) {
996 if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
997 pr_err("Illegal value for \"%s\", must be"
998 " \"%s\" or \"%s\".\n", param->name,
999 DISCOVERY, NORMAL);
1000 return -1;
1001 }
1002 }
1003
1004 return 0;
1005}
1006
1007/*
1008 * This function is used to pick a value range number, currently just
1009 * returns the lesser of both right values.
1010 */
1011static char *iscsi_get_value_from_number_range(
1012 struct iscsi_param *param,
1013 char *value)
1014{
1015 char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
1016 u32 acceptor_right_value, proposer_right_value;
1017
1018 tilde_ptr1 = strchr(value, '~');
1019 if (!tilde_ptr1)
1020 return NULL;
1021 *tilde_ptr1++ = '\0';
1022 proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
1023
1024 tilde_ptr2 = strchr(param->value, '~');
1025 if (!tilde_ptr2)
1026 return NULL;
1027 *tilde_ptr2++ = '\0';
1028 acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
1029
1030 return (acceptor_right_value >= proposer_right_value) ?
1031 tilde_ptr1 : tilde_ptr2;
1032}
1033
1034static char *iscsi_check_valuelist_for_support(
1035 struct iscsi_param *param,
1036 char *value)
1037{
1038 char *tmp1 = NULL, *tmp2 = NULL;
1039 char *acceptor_values = NULL, *proposer_values = NULL;
1040
1041 acceptor_values = param->value;
1042 proposer_values = value;
1043
1044 do {
1045 if (!proposer_values)
1046 return NULL;
1047 tmp1 = strchr(proposer_values, ',');
1048 if (tmp1)
1049 *tmp1 = '\0';
1050 acceptor_values = param->value;
1051 do {
1052 if (!acceptor_values) {
1053 if (tmp1)
1054 *tmp1 = ',';
1055 return NULL;
1056 }
1057 tmp2 = strchr(acceptor_values, ',');
1058 if (tmp2)
1059 *tmp2 = '\0';
1060 if (!acceptor_values || !proposer_values) {
1061 if (tmp1)
1062 *tmp1 = ',';
1063 if (tmp2)
1064 *tmp2 = ',';
1065 return NULL;
1066 }
1067 if (!strcmp(acceptor_values, proposer_values)) {
1068 if (tmp2)
1069 *tmp2 = ',';
1070 goto out;
1071 }
1072 if (tmp2)
1073 *tmp2++ = ',';
1074
1075 acceptor_values = tmp2;
1076 if (!acceptor_values)
1077 break;
1078 } while (acceptor_values);
1079 if (tmp1)
1080 *tmp1++ = ',';
1081 proposer_values = tmp1;
1082 } while (proposer_values);
1083
1084out:
1085 return proposer_values;
1086}
1087
1088static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
1089{
1090 u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
1091 char *negoitated_value = NULL;
1092
1093 if (IS_PSTATE_ACCEPTOR(param)) {
1094 pr_err("Received key \"%s\" twice, protocol error.\n",
1095 param->name);
1096 return -1;
1097 }
1098
1099 if (IS_PSTATE_REJECT(param))
1100 return 0;
1101
1102 if (IS_TYPE_BOOL_AND(param)) {
1103 if (!strcmp(value, YES))
1104 proposer_boolean_value = 1;
1105 if (!strcmp(param->value, YES))
1106 acceptor_boolean_value = 1;
1107 if (acceptor_boolean_value && proposer_boolean_value)
1108 do {} while (0);
1109 else {
1110 if (iscsi_update_param_value(param, NO) < 0)
1111 return -1;
1112 if (!proposer_boolean_value)
1113 SET_PSTATE_REPLY_OPTIONAL(param);
1114 }
1115 } else if (IS_TYPE_BOOL_OR(param)) {
1116 if (!strcmp(value, YES))
1117 proposer_boolean_value = 1;
1118 if (!strcmp(param->value, YES))
1119 acceptor_boolean_value = 1;
1120 if (acceptor_boolean_value || proposer_boolean_value) {
1121 if (iscsi_update_param_value(param, YES) < 0)
1122 return -1;
1123 if (proposer_boolean_value)
1124 SET_PSTATE_REPLY_OPTIONAL(param);
1125 }
1126 } else if (IS_TYPE_NUMBER(param)) {
1127 char *tmpptr, buf[10];
1128 u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
1129 u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
1130
1131 memset(buf, 0, 10);
1132
1133 if (!strcmp(param->name, MAXCONNECTIONS) ||
1134 !strcmp(param->name, MAXBURSTLENGTH) ||
1135 !strcmp(param->name, FIRSTBURSTLENGTH) ||
1136 !strcmp(param->name, MAXOUTSTANDINGR2T) ||
1137 !strcmp(param->name, DEFAULTTIME2RETAIN) ||
1138 !strcmp(param->name, ERRORRECOVERYLEVEL)) {
1139 if (proposer_value > acceptor_value) {
1140 sprintf(buf, "%u", acceptor_value);
1141 if (iscsi_update_param_value(param,
1142 &buf[0]) < 0)
1143 return -1;
1144 } else {
1145 if (iscsi_update_param_value(param, value) < 0)
1146 return -1;
1147 }
1148 } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
1149 if (acceptor_value > proposer_value) {
1150 sprintf(buf, "%u", acceptor_value);
1151 if (iscsi_update_param_value(param,
1152 &buf[0]) < 0)
1153 return -1;
1154 } else {
1155 if (iscsi_update_param_value(param, value) < 0)
1156 return -1;
1157 }
1158 } else {
1159 if (iscsi_update_param_value(param, value) < 0)
1160 return -1;
1161 }
1162
1163 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
1164 SET_PSTATE_REPLY_OPTIONAL(param);
1165 } else if (IS_TYPE_NUMBER_RANGE(param)) {
1166 negoitated_value = iscsi_get_value_from_number_range(
1167 param, value);
1168 if (!negoitated_value)
1169 return -1;
1170 if (iscsi_update_param_value(param, negoitated_value) < 0)
1171 return -1;
1172 } else if (IS_TYPE_VALUE_LIST(param)) {
1173 negoitated_value = iscsi_check_valuelist_for_support(
1174 param, value);
1175 if (!negoitated_value) {
1176 pr_err("Proposer's value list \"%s\" contains"
1177 " no valid values from Acceptor's value list"
1178 " \"%s\".\n", value, param->value);
1179 return -1;
1180 }
1181 if (iscsi_update_param_value(param, negoitated_value) < 0)
1182 return -1;
1183 } else if (IS_PHASE_DECLARATIVE(param)) {
1184 if (iscsi_update_param_value(param, value) < 0)
1185 return -1;
1186 SET_PSTATE_REPLY_OPTIONAL(param);
1187 }
1188
1189 return 0;
1190}
1191
1192static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
1193{
1194 if (IS_PSTATE_RESPONSE_GOT(param)) {
1195 pr_err("Received key \"%s\" twice, protocol error.\n",
1196 param->name);
1197 return -1;
1198 }
1199
1200 if (IS_TYPE_NUMBER_RANGE(param)) {
1201 u32 left_val = 0, right_val = 0, recieved_value = 0;
1202 char *left_val_ptr = NULL, *right_val_ptr = NULL;
1203 char *tilde_ptr = NULL, *tmp_ptr = NULL;
1204
1205 if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
1206 if (iscsi_update_param_value(param, value) < 0)
1207 return -1;
1208 return 0;
1209 }
1210
1211 tilde_ptr = strchr(value, '~');
1212 if (tilde_ptr) {
1213 pr_err("Illegal \"~\" in response for \"%s\".\n",
1214 param->name);
1215 return -1;
1216 }
1217 tilde_ptr = strchr(param->value, '~');
1218 if (!tilde_ptr) {
1219 pr_err("Unable to locate numerical range"
1220 " indicator \"~\" for \"%s\".\n", param->name);
1221 return -1;
1222 }
1223 *tilde_ptr = '\0';
1224
1225 left_val_ptr = param->value;
1226 right_val_ptr = param->value + strlen(left_val_ptr) + 1;
1227 left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
1228 right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
1229 recieved_value = simple_strtoul(value, &tmp_ptr, 0);
1230
1231 *tilde_ptr = '~';
1232
1233 if ((recieved_value < left_val) ||
1234 (recieved_value > right_val)) {
1235 pr_err("Illegal response \"%s=%u\", value must"
1236 " be between %u and %u.\n", param->name,
1237 recieved_value, left_val, right_val);
1238 return -1;
1239 }
1240 } else if (IS_TYPE_VALUE_LIST(param)) {
1241 char *comma_ptr = NULL, *tmp_ptr = NULL;
1242
1243 comma_ptr = strchr(value, ',');
1244 if (comma_ptr) {
1245 pr_err("Illegal \",\" in response for \"%s\".\n",
1246 param->name);
1247 return -1;
1248 }
1249
1250 tmp_ptr = iscsi_check_valuelist_for_support(param, value);
1251 if (!tmp_ptr)
1252 return -1;
1253 }
1254
1255 if (iscsi_update_param_value(param, value) < 0)
1256 return -1;
1257
1258 return 0;
1259}
1260
1261static int iscsi_check_value(struct iscsi_param *param, char *value)
1262{
1263 char *comma_ptr = NULL;
1264
1265 if (!strcmp(value, REJECT)) {
1266 if (!strcmp(param->name, IFMARKINT) ||
1267 !strcmp(param->name, OFMARKINT)) {
1268 /*
1269 * Reject is not fatal for [I,O]FMarkInt, and causes
1270 * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
1271 */
1272 SET_PSTATE_REJECT(param);
1273 return 0;
1274 }
1275 pr_err("Received %s=%s\n", param->name, value);
1276 return -1;
1277 }
1278 if (!strcmp(value, IRRELEVANT)) {
1279 pr_debug("Received %s=%s\n", param->name, value);
1280 SET_PSTATE_IRRELEVANT(param);
1281 return 0;
1282 }
1283 if (!strcmp(value, NOTUNDERSTOOD)) {
1284 if (!IS_PSTATE_PROPOSER(param)) {
1285 pr_err("Received illegal offer %s=%s\n",
1286 param->name, value);
1287 return -1;
1288 }
1289
1290/* #warning FIXME: Add check for X-ExtensionKey here */
1291 pr_err("Standard iSCSI key \"%s\" cannot be answered"
1292 " with \"%s\", protocol error.\n", param->name, value);
1293 return -1;
1294 }
1295
1296 do {
1297 comma_ptr = NULL;
1298 comma_ptr = strchr(value, ',');
1299
1300 if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
1301 pr_err("Detected value seperator \",\", but"
1302 " key \"%s\" does not allow a value list,"
1303 " protocol error.\n", param->name);
1304 return -1;
1305 }
1306 if (comma_ptr)
1307 *comma_ptr = '\0';
1308
1309 if (strlen(value) > VALUE_MAXLEN) {
1310 pr_err("Value for key \"%s\" exceeds %d,"
1311 " protocol error.\n", param->name,
1312 VALUE_MAXLEN);
1313 return -1;
1314 }
1315
1316 if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
1317 if (iscsi_check_boolean_value(param, value) < 0)
1318 return -1;
1319 } else if (IS_TYPE_NUMBER(param)) {
1320 if (iscsi_check_numerical_value(param, value) < 0)
1321 return -1;
1322 } else if (IS_TYPE_NUMBER_RANGE(param)) {
1323 if (iscsi_check_numerical_range_value(param, value) < 0)
1324 return -1;
1325 } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
1326 if (iscsi_check_string_or_list_value(param, value) < 0)
1327 return -1;
1328 } else {
1329 pr_err("Huh? 0x%02x\n", param->type);
1330 return -1;
1331 }
1332
1333 if (comma_ptr)
1334 *comma_ptr++ = ',';
1335
1336 value = comma_ptr;
1337 } while (value);
1338
1339 return 0;
1340}
1341
1342static struct iscsi_param *__iscsi_check_key(
1343 char *key,
1344 int sender,
1345 struct iscsi_param_list *param_list)
1346{
1347 struct iscsi_param *param;
1348
1349 if (strlen(key) > KEY_MAXLEN) {
1350 pr_err("Length of key name \"%s\" exceeds %d.\n",
1351 key, KEY_MAXLEN);
1352 return NULL;
1353 }
1354
1355 param = iscsi_find_param_from_key(key, param_list);
1356 if (!param)
1357 return NULL;
1358
1359 if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
1360 pr_err("Key \"%s\" may not be sent to %s,"
1361 " protocol error.\n", param->name,
1362 (sender & SENDER_RECEIVER) ? "target" : "initiator");
1363 return NULL;
1364 }
1365
1366 if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
1367 pr_err("Key \"%s\" may not be sent to %s,"
1368 " protocol error.\n", param->name,
1369 (sender & SENDER_RECEIVER) ? "initiator" : "target");
1370 return NULL;
1371 }
1372
1373 return param;
1374}
1375
1376static struct iscsi_param *iscsi_check_key(
1377 char *key,
1378 int phase,
1379 int sender,
1380 struct iscsi_param_list *param_list)
1381{
1382 struct iscsi_param *param;
1383 /*
1384 * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
1385 */
1386 if (strlen(key) > KEY_MAXLEN) {
1387 pr_err("Length of key name \"%s\" exceeds %d.\n",
1388 key, KEY_MAXLEN);
1389 return NULL;
1390 }
1391
1392 param = iscsi_find_param_from_key(key, param_list);
1393 if (!param)
1394 return NULL;
1395
1396 if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
1397 pr_err("Key \"%s\" may not be sent to %s,"
1398 " protocol error.\n", param->name,
1399 (sender & SENDER_RECEIVER) ? "target" : "initiator");
1400 return NULL;
1401 }
1402 if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
1403 pr_err("Key \"%s\" may not be sent to %s,"
1404 " protocol error.\n", param->name,
1405 (sender & SENDER_RECEIVER) ? "initiator" : "target");
1406 return NULL;
1407 }
1408
1409 if (IS_PSTATE_ACCEPTOR(param)) {
1410 pr_err("Key \"%s\" received twice, protocol error.\n",
1411 key);
1412 return NULL;
1413 }
1414
1415 if (!phase)
1416 return param;
1417
1418 if (!(param->phase & phase)) {
1419 pr_err("Key \"%s\" may not be negotiated during ",
1420 param->name);
1421 switch (phase) {
1422 case PHASE_SECURITY:
1423 pr_debug("Security phase.\n");
1424 break;
1425 case PHASE_OPERATIONAL:
1426 pr_debug("Operational phase.\n");
1427 default:
1428 pr_debug("Unknown phase.\n");
1429 }
1430 return NULL;
1431 }
1432
1433 return param;
1434}
1435
1436static int iscsi_enforce_integrity_rules(
1437 u8 phase,
1438 struct iscsi_param_list *param_list)
1439{
1440 char *tmpptr;
1441 u8 DataSequenceInOrder = 0;
1442 u8 ErrorRecoveryLevel = 0, SessionType = 0;
1443 u8 IFMarker = 0, OFMarker = 0;
1444 u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
1445 u32 FirstBurstLength = 0, MaxBurstLength = 0;
1446 struct iscsi_param *param = NULL;
1447
1448 list_for_each_entry(param, &param_list->param_list, p_list) {
1449 if (!(param->phase & phase))
1450 continue;
1451 if (!strcmp(param->name, SESSIONTYPE))
1452 if (!strcmp(param->value, NORMAL))
1453 SessionType = 1;
1454 if (!strcmp(param->name, ERRORRECOVERYLEVEL))
1455 ErrorRecoveryLevel = simple_strtoul(param->value,
1456 &tmpptr, 0);
1457 if (!strcmp(param->name, DATASEQUENCEINORDER))
1458 if (!strcmp(param->value, YES))
1459 DataSequenceInOrder = 1;
1460 if (!strcmp(param->name, MAXBURSTLENGTH))
1461 MaxBurstLength = simple_strtoul(param->value,
1462 &tmpptr, 0);
1463 if (!strcmp(param->name, IFMARKER))
1464 if (!strcmp(param->value, YES))
1465 IFMarker = 1;
1466 if (!strcmp(param->name, OFMARKER))
1467 if (!strcmp(param->value, YES))
1468 OFMarker = 1;
1469 if (!strcmp(param->name, IFMARKINT))
1470 if (!strcmp(param->value, REJECT))
1471 IFMarkInt_Reject = 1;
1472 if (!strcmp(param->name, OFMARKINT))
1473 if (!strcmp(param->value, REJECT))
1474 OFMarkInt_Reject = 1;
1475 }
1476
1477 list_for_each_entry(param, &param_list->param_list, p_list) {
1478 if (!(param->phase & phase))
1479 continue;
1480 if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
1481 (strcmp(param->name, IFMARKER) &&
1482 strcmp(param->name, OFMARKER) &&
1483 strcmp(param->name, IFMARKINT) &&
1484 strcmp(param->name, OFMARKINT))))
1485 continue;
1486 if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
1487 DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
1488 if (strcmp(param->value, "1")) {
1489 if (iscsi_update_param_value(param, "1") < 0)
1490 return -1;
1491 pr_debug("Reset \"%s\" to \"%s\".\n",
1492 param->name, param->value);
1493 }
1494 }
1495 if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
1496 if (strcmp(param->value, "1")) {
1497 if (iscsi_update_param_value(param, "1") < 0)
1498 return -1;
1499 pr_debug("Reset \"%s\" to \"%s\".\n",
1500 param->name, param->value);
1501 }
1502 }
1503 if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
1504 FirstBurstLength = simple_strtoul(param->value,
1505 &tmpptr, 0);
1506 if (FirstBurstLength > MaxBurstLength) {
1507 char tmpbuf[10];
1508 memset(tmpbuf, 0, 10);
1509 sprintf(tmpbuf, "%u", MaxBurstLength);
1510 if (iscsi_update_param_value(param, tmpbuf))
1511 return -1;
1512 pr_debug("Reset \"%s\" to \"%s\".\n",
1513 param->name, param->value);
1514 }
1515 }
1516 if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
1517 if (iscsi_update_param_value(param, NO) < 0)
1518 return -1;
1519 IFMarker = 0;
1520 pr_debug("Reset \"%s\" to \"%s\".\n",
1521 param->name, param->value);
1522 }
1523 if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
1524 if (iscsi_update_param_value(param, NO) < 0)
1525 return -1;
1526 OFMarker = 0;
1527 pr_debug("Reset \"%s\" to \"%s\".\n",
1528 param->name, param->value);
1529 }
1530 if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
1531 if (!strcmp(param->value, REJECT))
1532 continue;
1533 param->state &= ~PSTATE_NEGOTIATE;
1534 if (iscsi_update_param_value(param, IRRELEVANT) < 0)
1535 return -1;
1536 pr_debug("Reset \"%s\" to \"%s\".\n",
1537 param->name, param->value);
1538 }
1539 if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
1540 if (!strcmp(param->value, REJECT))
1541 continue;
1542 param->state &= ~PSTATE_NEGOTIATE;
1543 if (iscsi_update_param_value(param, IRRELEVANT) < 0)
1544 return -1;
1545 pr_debug("Reset \"%s\" to \"%s\".\n",
1546 param->name, param->value);
1547 }
1548 }
1549
1550 return 0;
1551}
1552
1553int iscsi_decode_text_input(
1554 u8 phase,
1555 u8 sender,
1556 char *textbuf,
1557 u32 length,
1558 struct iscsi_param_list *param_list)
1559{
1560 char *tmpbuf, *start = NULL, *end = NULL;
1561
1562 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
1563 if (!tmpbuf) {
1564 pr_err("Unable to allocate memory for tmpbuf.\n");
1565 return -1;
1566 }
1567
1568 memcpy(tmpbuf, textbuf, length);
1569 tmpbuf[length] = '\0';
1570 start = tmpbuf;
1571 end = (start + length);
1572
1573 while (start < end) {
1574 char *key, *value;
1575 struct iscsi_param *param;
1576
1577 if (iscsi_extract_key_value(start, &key, &value) < 0) {
1578 kfree(tmpbuf);
1579 return -1;
1580 }
1581
1582 pr_debug("Got key: %s=%s\n", key, value);
1583
1584 if (phase & PHASE_SECURITY) {
1585 if (iscsi_check_for_auth_key(key) > 0) {
1586 char *tmpptr = key + strlen(key);
1587 *tmpptr = '=';
1588 kfree(tmpbuf);
1589 return 1;
1590 }
1591 }
1592
1593 param = iscsi_check_key(key, phase, sender, param_list);
1594 if (!param) {
1595 if (iscsi_add_notunderstood_response(key,
1596 value, param_list) < 0) {
1597 kfree(tmpbuf);
1598 return -1;
1599 }
1600 start += strlen(key) + strlen(value) + 2;
1601 continue;
1602 }
1603 if (iscsi_check_value(param, value) < 0) {
1604 kfree(tmpbuf);
1605 return -1;
1606 }
1607
1608 start += strlen(key) + strlen(value) + 2;
1609
1610 if (IS_PSTATE_PROPOSER(param)) {
1611 if (iscsi_check_proposer_state(param, value) < 0) {
1612 kfree(tmpbuf);
1613 return -1;
1614 }
1615 SET_PSTATE_RESPONSE_GOT(param);
1616 } else {
1617 if (iscsi_check_acceptor_state(param, value) < 0) {
1618 kfree(tmpbuf);
1619 return -1;
1620 }
1621 SET_PSTATE_ACCEPTOR(param);
1622 }
1623 }
1624
1625 kfree(tmpbuf);
1626 return 0;
1627}
1628
1629int iscsi_encode_text_output(
1630 u8 phase,
1631 u8 sender,
1632 char *textbuf,
1633 u32 *length,
1634 struct iscsi_param_list *param_list)
1635{
1636 char *output_buf = NULL;
1637 struct iscsi_extra_response *er;
1638 struct iscsi_param *param;
1639
1640 output_buf = textbuf + *length;
1641
1642 if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
1643 return -1;
1644
1645 list_for_each_entry(param, &param_list->param_list, p_list) {
1646 if (!(param->sender & sender))
1647 continue;
1648 if (IS_PSTATE_ACCEPTOR(param) &&
1649 !IS_PSTATE_RESPONSE_SENT(param) &&
1650 !IS_PSTATE_REPLY_OPTIONAL(param) &&
1651 (param->phase & phase)) {
1652 *length += sprintf(output_buf, "%s=%s",
1653 param->name, param->value);
1654 *length += 1;
1655 output_buf = textbuf + *length;
1656 SET_PSTATE_RESPONSE_SENT(param);
1657 pr_debug("Sending key: %s=%s\n",
1658 param->name, param->value);
1659 continue;
1660 }
1661 if (IS_PSTATE_NEGOTIATE(param) &&
1662 !IS_PSTATE_ACCEPTOR(param) &&
1663 !IS_PSTATE_PROPOSER(param) &&
1664 (param->phase & phase)) {
1665 *length += sprintf(output_buf, "%s=%s",
1666 param->name, param->value);
1667 *length += 1;
1668 output_buf = textbuf + *length;
1669 SET_PSTATE_PROPOSER(param);
1670 iscsi_check_proposer_for_optional_reply(param);
1671 pr_debug("Sending key: %s=%s\n",
1672 param->name, param->value);
1673 }
1674 }
1675
1676 list_for_each_entry(er, &param_list->extra_response_list, er_list) {
1677 *length += sprintf(output_buf, "%s=%s", er->key, er->value);
1678 *length += 1;
1679 output_buf = textbuf + *length;
1680 pr_debug("Sending key: %s=%s\n", er->key, er->value);
1681 }
1682 iscsi_release_extra_responses(param_list);
1683
1684 return 0;
1685}
1686
1687int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
1688{
1689 int ret = 0;
1690 struct iscsi_param *param;
1691
1692 list_for_each_entry(param, &param_list->param_list, p_list) {
1693 if (IS_PSTATE_NEGOTIATE(param) &&
1694 IS_PSTATE_PROPOSER(param) &&
1695 !IS_PSTATE_RESPONSE_GOT(param) &&
1696 !IS_PSTATE_REPLY_OPTIONAL(param) &&
1697 !IS_PHASE_DECLARATIVE(param)) {
1698 pr_err("No response for proposed key \"%s\".\n",
1699 param->name);
1700 ret = -1;
1701 }
1702 }
1703
1704 return ret;
1705}
1706
1707int iscsi_change_param_value(
1708 char *keyvalue,
1709 struct iscsi_param_list *param_list,
1710 int check_key)
1711{
1712 char *key = NULL, *value = NULL;
1713 struct iscsi_param *param;
1714 int sender = 0;
1715
1716 if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
1717 return -1;
1718
1719 if (!check_key) {
1720 param = __iscsi_check_key(keyvalue, sender, param_list);
1721 if (!param)
1722 return -1;
1723 } else {
1724 param = iscsi_check_key(keyvalue, 0, sender, param_list);
1725 if (!param)
1726 return -1;
1727
1728 param->set_param = 1;
1729 if (iscsi_check_value(param, value) < 0) {
1730 param->set_param = 0;
1731 return -1;
1732 }
1733 param->set_param = 0;
1734 }
1735
1736 if (iscsi_update_param_value(param, value) < 0)
1737 return -1;
1738
1739 return 0;
1740}
1741
1742void iscsi_set_connection_parameters(
1743 struct iscsi_conn_ops *ops,
1744 struct iscsi_param_list *param_list)
1745{
1746 char *tmpptr;
1747 struct iscsi_param *param;
1748
1749 pr_debug("---------------------------------------------------"
1750 "---------------\n");
1751 list_for_each_entry(param, &param_list->param_list, p_list) {
1752 if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
1753 continue;
1754 if (!strcmp(param->name, AUTHMETHOD)) {
1755 pr_debug("AuthMethod: %s\n",
1756 param->value);
1757 } else if (!strcmp(param->name, HEADERDIGEST)) {
1758 ops->HeaderDigest = !strcmp(param->value, CRC32C);
1759 pr_debug("HeaderDigest: %s\n",
1760 param->value);
1761 } else if (!strcmp(param->name, DATADIGEST)) {
1762 ops->DataDigest = !strcmp(param->value, CRC32C);
1763 pr_debug("DataDigest: %s\n",
1764 param->value);
1765 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
1766 ops->MaxRecvDataSegmentLength =
1767 simple_strtoul(param->value, &tmpptr, 0);
1768 pr_debug("MaxRecvDataSegmentLength: %s\n",
1769 param->value);
1770 } else if (!strcmp(param->name, OFMARKER)) {
1771 ops->OFMarker = !strcmp(param->value, YES);
1772 pr_debug("OFMarker: %s\n",
1773 param->value);
1774 } else if (!strcmp(param->name, IFMARKER)) {
1775 ops->IFMarker = !strcmp(param->value, YES);
1776 pr_debug("IFMarker: %s\n",
1777 param->value);
1778 } else if (!strcmp(param->name, OFMARKINT)) {
1779 ops->OFMarkInt =
1780 simple_strtoul(param->value, &tmpptr, 0);
1781 pr_debug("OFMarkInt: %s\n",
1782 param->value);
1783 } else if (!strcmp(param->name, IFMARKINT)) {
1784 ops->IFMarkInt =
1785 simple_strtoul(param->value, &tmpptr, 0);
1786 pr_debug("IFMarkInt: %s\n",
1787 param->value);
1788 }
1789 }
1790 pr_debug("----------------------------------------------------"
1791 "--------------\n");
1792}
1793
1794void iscsi_set_session_parameters(
1795 struct iscsi_sess_ops *ops,
1796 struct iscsi_param_list *param_list,
1797 int leading)
1798{
1799 char *tmpptr;
1800 struct iscsi_param *param;
1801
1802 pr_debug("----------------------------------------------------"
1803 "--------------\n");
1804 list_for_each_entry(param, &param_list->param_list, p_list) {
1805 if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
1806 continue;
1807 if (!strcmp(param->name, INITIATORNAME)) {
1808 if (!param->value)
1809 continue;
1810 if (leading)
1811 snprintf(ops->InitiatorName,
1812 sizeof(ops->InitiatorName),
1813 "%s", param->value);
1814 pr_debug("InitiatorName: %s\n",
1815 param->value);
1816 } else if (!strcmp(param->name, INITIATORALIAS)) {
1817 if (!param->value)
1818 continue;
1819 snprintf(ops->InitiatorAlias,
1820 sizeof(ops->InitiatorAlias),
1821 "%s", param->value);
1822 pr_debug("InitiatorAlias: %s\n",
1823 param->value);
1824 } else if (!strcmp(param->name, TARGETNAME)) {
1825 if (!param->value)
1826 continue;
1827 if (leading)
1828 snprintf(ops->TargetName,
1829 sizeof(ops->TargetName),
1830 "%s", param->value);
1831 pr_debug("TargetName: %s\n",
1832 param->value);
1833 } else if (!strcmp(param->name, TARGETALIAS)) {
1834 if (!param->value)
1835 continue;
1836 snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
1837 "%s", param->value);
1838 pr_debug("TargetAlias: %s\n",
1839 param->value);
1840 } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
1841 ops->TargetPortalGroupTag =
1842 simple_strtoul(param->value, &tmpptr, 0);
1843 pr_debug("TargetPortalGroupTag: %s\n",
1844 param->value);
1845 } else if (!strcmp(param->name, MAXCONNECTIONS)) {
1846 ops->MaxConnections =
1847 simple_strtoul(param->value, &tmpptr, 0);
1848 pr_debug("MaxConnections: %s\n",
1849 param->value);
1850 } else if (!strcmp(param->name, INITIALR2T)) {
1851 ops->InitialR2T = !strcmp(param->value, YES);
1852 pr_debug("InitialR2T: %s\n",
1853 param->value);
1854 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
1855 ops->ImmediateData = !strcmp(param->value, YES);
1856 pr_debug("ImmediateData: %s\n",
1857 param->value);
1858 } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
1859 ops->MaxBurstLength =
1860 simple_strtoul(param->value, &tmpptr, 0);
1861 pr_debug("MaxBurstLength: %s\n",
1862 param->value);
1863 } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
1864 ops->FirstBurstLength =
1865 simple_strtoul(param->value, &tmpptr, 0);
1866 pr_debug("FirstBurstLength: %s\n",
1867 param->value);
1868 } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
1869 ops->DefaultTime2Wait =
1870 simple_strtoul(param->value, &tmpptr, 0);
1871 pr_debug("DefaultTime2Wait: %s\n",
1872 param->value);
1873 } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
1874 ops->DefaultTime2Retain =
1875 simple_strtoul(param->value, &tmpptr, 0);
1876 pr_debug("DefaultTime2Retain: %s\n",
1877 param->value);
1878 } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
1879 ops->MaxOutstandingR2T =
1880 simple_strtoul(param->value, &tmpptr, 0);
1881 pr_debug("MaxOutstandingR2T: %s\n",
1882 param->value);
1883 } else if (!strcmp(param->name, DATAPDUINORDER)) {
1884 ops->DataPDUInOrder = !strcmp(param->value, YES);
1885 pr_debug("DataPDUInOrder: %s\n",
1886 param->value);
1887 } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
1888 ops->DataSequenceInOrder = !strcmp(param->value, YES);
1889 pr_debug("DataSequenceInOrder: %s\n",
1890 param->value);
1891 } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
1892 ops->ErrorRecoveryLevel =
1893 simple_strtoul(param->value, &tmpptr, 0);
1894 pr_debug("ErrorRecoveryLevel: %s\n",
1895 param->value);
1896 } else if (!strcmp(param->name, SESSIONTYPE)) {
1897 ops->SessionType = !strcmp(param->value, DISCOVERY);
1898 pr_debug("SessionType: %s\n",
1899 param->value);
1900 }
1901 }
1902 pr_debug("----------------------------------------------------"
1903 "--------------\n");
1904
1905}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 000000000000..6a37fd6f1285
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,269 @@
1#ifndef ISCSI_PARAMETERS_H
2#define ISCSI_PARAMETERS_H
3
4struct iscsi_extra_response {
5 char key[64];
6 char value[32];
7 struct list_head er_list;
8} ____cacheline_aligned;
9
10struct iscsi_param {
11 char *name;
12 char *value;
13 u8 set_param;
14 u8 phase;
15 u8 scope;
16 u8 sender;
17 u8 type;
18 u8 use;
19 u16 type_range;
20 u32 state;
21 struct list_head p_list;
22} ____cacheline_aligned;
23
24extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
25extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
26extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
27extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
28extern void iscsi_print_params(struct iscsi_param_list *);
29extern int iscsi_create_default_params(struct iscsi_param_list **);
30extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
31extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
32extern int iscsi_copy_param_list(struct iscsi_param_list **,
33 struct iscsi_param_list *, int);
34extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
35extern void iscsi_release_param_list(struct iscsi_param_list *);
36extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
37extern int iscsi_extract_key_value(char *, char **, char **);
38extern int iscsi_update_param_value(struct iscsi_param *, char *);
39extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
40extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
41 struct iscsi_param_list *);
42extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
43extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
44 struct iscsi_param_list *);
45extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
46 struct iscsi_param_list *, int);
47
48#define YES "Yes"
49#define NO "No"
50#define ALL "All"
51#define IRRELEVANT "Irrelevant"
52#define NONE "None"
53#define NOTUNDERSTOOD "NotUnderstood"
54#define REJECT "Reject"
55
56/*
57 * The Parameter Names.
58 */
59#define AUTHMETHOD "AuthMethod"
60#define HEADERDIGEST "HeaderDigest"
61#define DATADIGEST "DataDigest"
62#define MAXCONNECTIONS "MaxConnections"
63#define SENDTARGETS "SendTargets"
64#define TARGETNAME "TargetName"
65#define INITIATORNAME "InitiatorName"
66#define TARGETALIAS "TargetAlias"
67#define INITIATORALIAS "InitiatorAlias"
68#define TARGETADDRESS "TargetAddress"
69#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
70#define INITIALR2T "InitialR2T"
71#define IMMEDIATEDATA "ImmediateData"
72#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
73#define MAXBURSTLENGTH "MaxBurstLength"
74#define FIRSTBURSTLENGTH "FirstBurstLength"
75#define DEFAULTTIME2WAIT "DefaultTime2Wait"
76#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
77#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
78#define DATAPDUINORDER "DataPDUInOrder"
79#define DATASEQUENCEINORDER "DataSequenceInOrder"
80#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
81#define SESSIONTYPE "SessionType"
82#define IFMARKER "IFMarker"
83#define OFMARKER "OFMarker"
84#define IFMARKINT "IFMarkInt"
85#define OFMARKINT "OFMarkInt"
86#define X_EXTENSIONKEY "X-com.sbei.version"
87#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
88#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
89
90/*
91 * For AuthMethod.
92 */
93#define KRB5 "KRB5"
94#define SPKM1 "SPKM1"
95#define SPKM2 "SPKM2"
96#define SRP "SRP"
97#define CHAP "CHAP"
98
99/*
100 * Initial values for Parameter Negotiation.
101 */
102#define INITIAL_AUTHMETHOD CHAP
103#define INITIAL_HEADERDIGEST "CRC32C,None"
104#define INITIAL_DATADIGEST "CRC32C,None"
105#define INITIAL_MAXCONNECTIONS "1"
106#define INITIAL_SENDTARGETS ALL
107#define INITIAL_TARGETNAME "LIO.Target"
108#define INITIAL_INITIATORNAME "LIO.Initiator"
109#define INITIAL_TARGETALIAS "LIO Target"
110#define INITIAL_INITIATORALIAS "LIO Initiator"
111#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
112#define INITIAL_TARGETPORTALGROUPTAG "1"
113#define INITIAL_INITIALR2T YES
114#define INITIAL_IMMEDIATEDATA YES
115#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
116#define INITIAL_MAXBURSTLENGTH "262144"
117#define INITIAL_FIRSTBURSTLENGTH "65536"
118#define INITIAL_DEFAULTTIME2WAIT "2"
119#define INITIAL_DEFAULTTIME2RETAIN "20"
120#define INITIAL_MAXOUTSTANDINGR2T "1"
121#define INITIAL_DATAPDUINORDER YES
122#define INITIAL_DATASEQUENCEINORDER YES
123#define INITIAL_ERRORRECOVERYLEVEL "0"
124#define INITIAL_SESSIONTYPE NORMAL
125#define INITIAL_IFMARKER NO
126#define INITIAL_OFMARKER NO
127#define INITIAL_IFMARKINT "2048~65535"
128#define INITIAL_OFMARKINT "2048~65535"
129
130/*
131 * For [Header,Data]Digests.
132 */
133#define CRC32C "CRC32C"
134
135/*
136 * For SessionType.
137 */
138#define DISCOVERY "Discovery"
139#define NORMAL "Normal"
140
141/*
142 * struct iscsi_param->use
143 */
144#define USE_LEADING_ONLY 0x01
145#define USE_INITIAL_ONLY 0x02
146#define USE_ALL 0x04
147
148#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
149#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
150#define IS_USE_ALL(p) ((p)->use & USE_ALL)
151
152#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
153
154/*
155 * struct iscsi_param->sender
156 */
157#define SENDER_INITIATOR 0x01
158#define SENDER_TARGET 0x02
159#define SENDER_BOTH 0x03
160/* Used in iscsi_check_key() */
161#define SENDER_RECEIVER 0x04
162
163#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
164#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
165#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
166
167/*
168 * struct iscsi_param->scope
169 */
170#define SCOPE_CONNECTION_ONLY 0x01
171#define SCOPE_SESSION_WIDE 0x02
172
173#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
174#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
175
176/*
177 * struct iscsi_param->phase
178 */
179#define PHASE_SECURITY 0x01
180#define PHASE_OPERATIONAL 0x02
181#define PHASE_DECLARATIVE 0x04
182#define PHASE_FFP0 0x08
183
184#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
185#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
186#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
187#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
188
189/*
190 * struct iscsi_param->type
191 */
192#define TYPE_BOOL_AND 0x01
193#define TYPE_BOOL_OR 0x02
194#define TYPE_NUMBER 0x04
195#define TYPE_NUMBER_RANGE 0x08
196#define TYPE_STRING 0x10
197#define TYPE_VALUE_LIST 0x20
198
199#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
200#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
201#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
202#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
203#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
204#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
205
206/*
207 * struct iscsi_param->type_range
208 */
209#define TYPERANGE_BOOL_AND 0x0001
210#define TYPERANGE_BOOL_OR 0x0002
211#define TYPERANGE_0_TO_2 0x0004
212#define TYPERANGE_0_TO_3600 0x0008
213#define TYPERANGE_0_TO_32767 0x0010
214#define TYPERANGE_0_TO_65535 0x0020
215#define TYPERANGE_1_TO_65535 0x0040
216#define TYPERANGE_2_TO_3600 0x0080
217#define TYPERANGE_512_TO_16777215 0x0100
218#define TYPERANGE_AUTH 0x0200
219#define TYPERANGE_DIGEST 0x0400
220#define TYPERANGE_ISCSINAME 0x0800
221#define TYPERANGE_MARKINT 0x1000
222#define TYPERANGE_SESSIONTYPE 0x2000
223#define TYPERANGE_TARGETADDRESS 0x4000
224#define TYPERANGE_UTF8 0x8000
225
226#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
227#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
228#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
229#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
230#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
231#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
232#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
233 TYPERANGE_512_TO_16777215)
234#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
235#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
236#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
237 TYPERANGE_SESSIONTYPE)
238
239/*
240 * struct iscsi_param->state
241 */
242#define PSTATE_ACCEPTOR 0x01
243#define PSTATE_NEGOTIATE 0x02
244#define PSTATE_PROPOSER 0x04
245#define PSTATE_IRRELEVANT 0x08
246#define PSTATE_REJECT 0x10
247#define PSTATE_REPLY_OPTIONAL 0x20
248#define PSTATE_RESPONSE_GOT 0x40
249#define PSTATE_RESPONSE_SENT 0x80
250
251#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
252#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
253#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
254#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
255#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
256#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
257#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
258#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
259
260#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
261#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
262#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
263#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
264#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
265#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
266#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
267#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
268
269#endif /* ISCSI_PARAMETERS_H */
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 000000000000..fc694082bfc0
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,664 @@
1/*******************************************************************************
2 * This file contains main functions related to iSCSI DataSequenceInOrder=No
3 * and DataPDUInOrder=No.
4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <linux/slab.h>
23#include <linux/random.h>
24
25#include "iscsi_target_core.h"
26#include "iscsi_target_util.h"
27#include "iscsi_target_seq_pdu_list.h"
28
29#define OFFLOAD_BUF_SIZE 32768
30
31void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
32{
33 int i;
34 struct iscsi_seq *seq;
35
36 pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
37 cmd->init_task_tag);
38
39 for (i = 0; i < cmd->seq_count; i++) {
40 seq = &cmd->seq_list[i];
41 pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
42 " offset: %d, xfer_len: %d, seq_send_order: %d,"
43 " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
44 seq->offset, seq->xfer_len, seq->seq_send_order,
45 seq->seq_no);
46 }
47}
48
49void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
50{
51 int i;
52 struct iscsi_pdu *pdu;
53
54 pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
55 cmd->init_task_tag);
56
57 for (i = 0; i < cmd->pdu_count; i++) {
58 pdu = &cmd->pdu_list[i];
59 pr_debug("i: %d, offset: %d, length: %d,"
60 " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
61 pdu->length, pdu->pdu_send_order, pdu->seq_no);
62 }
63}
64
65static void iscsit_ordered_seq_lists(
66 struct iscsi_cmd *cmd,
67 u8 type)
68{
69 u32 i, seq_count = 0;
70
71 for (i = 0; i < cmd->seq_count; i++) {
72 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
73 continue;
74 cmd->seq_list[i].seq_send_order = seq_count++;
75 }
76}
77
78static void iscsit_ordered_pdu_lists(
79 struct iscsi_cmd *cmd,
80 u8 type)
81{
82 u32 i, pdu_send_order = 0, seq_no = 0;
83
84 for (i = 0; i < cmd->pdu_count; i++) {
85redo:
86 if (cmd->pdu_list[i].seq_no == seq_no) {
87 cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
88 continue;
89 }
90 seq_no++;
91 pdu_send_order = 0;
92 goto redo;
93 }
94}
95
96/*
97 * Generate count random values into array.
98 * Use 0x80000000 to mark generates valued in array[].
99 */
100static void iscsit_create_random_array(u32 *array, u32 count)
101{
102 int i, j, k;
103
104 if (count == 1) {
105 array[0] = 0;
106 return;
107 }
108
109 for (i = 0; i < count; i++) {
110redo:
111 get_random_bytes(&j, sizeof(u32));
112 j = (1 + (int) (9999 + 1) - j) % count;
113 for (k = 0; k < i + 1; k++) {
114 j |= 0x80000000;
115 if ((array[k] & 0x80000000) && (array[k] == j))
116 goto redo;
117 }
118 array[i] = j;
119 }
120
121 for (i = 0; i < count; i++)
122 array[i] &= ~0x80000000;
123}
124
125static int iscsit_randomize_pdu_lists(
126 struct iscsi_cmd *cmd,
127 u8 type)
128{
129 int i = 0;
130 u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
131
132 for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
133redo:
134 if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
135 seq_count++;
136 continue;
137 }
138 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
139 if (!array) {
140 pr_err("Unable to allocate memory"
141 " for random array.\n");
142 return -1;
143 }
144 iscsit_create_random_array(array, seq_count);
145
146 for (i = 0; i < seq_count; i++)
147 cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
148
149 kfree(array);
150
151 seq_offset += seq_count;
152 seq_count = 0;
153 seq_no++;
154 goto redo;
155 }
156
157 if (seq_count) {
158 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
159 if (!array) {
160 pr_err("Unable to allocate memory for"
161 " random array.\n");
162 return -1;
163 }
164 iscsit_create_random_array(array, seq_count);
165
166 for (i = 0; i < seq_count; i++)
167 cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
168
169 kfree(array);
170 }
171
172 return 0;
173}
174
175static int iscsit_randomize_seq_lists(
176 struct iscsi_cmd *cmd,
177 u8 type)
178{
179 int i, j = 0;
180 u32 *array, seq_count = cmd->seq_count;
181
182 if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
183 seq_count--;
184 else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
185 seq_count -= 2;
186
187 if (!seq_count)
188 return 0;
189
190 array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
191 if (!array) {
192 pr_err("Unable to allocate memory for random array.\n");
193 return -1;
194 }
195 iscsit_create_random_array(array, seq_count);
196
197 for (i = 0; i < cmd->seq_count; i++) {
198 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
199 continue;
200 cmd->seq_list[i].seq_send_order = array[j++];
201 }
202
203 kfree(array);
204 return 0;
205}
206
207static void iscsit_determine_counts_for_list(
208 struct iscsi_cmd *cmd,
209 struct iscsi_build_list *bl,
210 u32 *seq_count,
211 u32 *pdu_count)
212{
213 int check_immediate = 0;
214 u32 burstlength = 0, offset = 0;
215 u32 unsolicited_data_length = 0;
216 struct iscsi_conn *conn = cmd->conn;
217
218 if ((bl->type == PDULIST_IMMEDIATE) ||
219 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
220 check_immediate = 1;
221
222 if ((bl->type == PDULIST_UNSOLICITED) ||
223 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
224 unsolicited_data_length = (cmd->data_length >
225 conn->sess->sess_ops->FirstBurstLength) ?
226 conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
227
228 while (offset < cmd->data_length) {
229 *pdu_count += 1;
230
231 if (check_immediate) {
232 check_immediate = 0;
233 offset += bl->immediate_data_length;
234 *seq_count += 1;
235 if (unsolicited_data_length)
236 unsolicited_data_length -=
237 bl->immediate_data_length;
238 continue;
239 }
240 if (unsolicited_data_length > 0) {
241 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
242 >= cmd->data_length) {
243 unsolicited_data_length -=
244 (cmd->data_length - offset);
245 offset += (cmd->data_length - offset);
246 continue;
247 }
248 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
249 >= conn->sess->sess_ops->FirstBurstLength) {
250 unsolicited_data_length -=
251 (conn->sess->sess_ops->FirstBurstLength -
252 offset);
253 offset += (conn->sess->sess_ops->FirstBurstLength -
254 offset);
255 burstlength = 0;
256 *seq_count += 1;
257 continue;
258 }
259
260 offset += conn->conn_ops->MaxRecvDataSegmentLength;
261 unsolicited_data_length -=
262 conn->conn_ops->MaxRecvDataSegmentLength;
263 continue;
264 }
265 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
266 cmd->data_length) {
267 offset += (cmd->data_length - offset);
268 continue;
269 }
270 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
271 conn->sess->sess_ops->MaxBurstLength) {
272 offset += (conn->sess->sess_ops->MaxBurstLength -
273 burstlength);
274 burstlength = 0;
275 *seq_count += 1;
276 continue;
277 }
278
279 burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
280 offset += conn->conn_ops->MaxRecvDataSegmentLength;
281 }
282}
283
284
285/*
286 * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
287 * and DataPDUInOrder=No.
288 */
289static int iscsit_build_pdu_and_seq_list(
290 struct iscsi_cmd *cmd,
291 struct iscsi_build_list *bl)
292{
293 int check_immediate = 0, datapduinorder, datasequenceinorder;
294 u32 burstlength = 0, offset = 0, i = 0;
295 u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
296 struct iscsi_conn *conn = cmd->conn;
297 struct iscsi_pdu *pdu = cmd->pdu_list;
298 struct iscsi_seq *seq = cmd->seq_list;
299
300 datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
301 datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
302
303 if ((bl->type == PDULIST_IMMEDIATE) ||
304 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
305 check_immediate = 1;
306
307 if ((bl->type == PDULIST_UNSOLICITED) ||
308 (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
309 unsolicited_data_length = (cmd->data_length >
310 conn->sess->sess_ops->FirstBurstLength) ?
311 conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
312
313 while (offset < cmd->data_length) {
314 pdu_count++;
315 if (!datapduinorder) {
316 pdu[i].offset = offset;
317 pdu[i].seq_no = seq_no;
318 }
319 if (!datasequenceinorder && (pdu_count == 1)) {
320 seq[seq_no].pdu_start = i;
321 seq[seq_no].seq_no = seq_no;
322 seq[seq_no].offset = offset;
323 seq[seq_no].orig_offset = offset;
324 }
325
326 if (check_immediate) {
327 check_immediate = 0;
328 if (!datapduinorder) {
329 pdu[i].type = PDUTYPE_IMMEDIATE;
330 pdu[i++].length = bl->immediate_data_length;
331 }
332 if (!datasequenceinorder) {
333 seq[seq_no].type = SEQTYPE_IMMEDIATE;
334 seq[seq_no].pdu_count = 1;
335 seq[seq_no].xfer_len =
336 bl->immediate_data_length;
337 }
338 offset += bl->immediate_data_length;
339 pdu_count = 0;
340 seq_no++;
341 if (unsolicited_data_length)
342 unsolicited_data_length -=
343 bl->immediate_data_length;
344 continue;
345 }
346 if (unsolicited_data_length > 0) {
347 if ((offset +
348 conn->conn_ops->MaxRecvDataSegmentLength) >=
349 cmd->data_length) {
350 if (!datapduinorder) {
351 pdu[i].type = PDUTYPE_UNSOLICITED;
352 pdu[i].length =
353 (cmd->data_length - offset);
354 }
355 if (!datasequenceinorder) {
356 seq[seq_no].type = SEQTYPE_UNSOLICITED;
357 seq[seq_no].pdu_count = pdu_count;
358 seq[seq_no].xfer_len = (burstlength +
359 (cmd->data_length - offset));
360 }
361 unsolicited_data_length -=
362 (cmd->data_length - offset);
363 offset += (cmd->data_length - offset);
364 continue;
365 }
366 if ((offset +
367 conn->conn_ops->MaxRecvDataSegmentLength) >=
368 conn->sess->sess_ops->FirstBurstLength) {
369 if (!datapduinorder) {
370 pdu[i].type = PDUTYPE_UNSOLICITED;
371 pdu[i++].length =
372 (conn->sess->sess_ops->FirstBurstLength -
373 offset);
374 }
375 if (!datasequenceinorder) {
376 seq[seq_no].type = SEQTYPE_UNSOLICITED;
377 seq[seq_no].pdu_count = pdu_count;
378 seq[seq_no].xfer_len = (burstlength +
379 (conn->sess->sess_ops->FirstBurstLength -
380 offset));
381 }
382 unsolicited_data_length -=
383 (conn->sess->sess_ops->FirstBurstLength -
384 offset);
385 offset += (conn->sess->sess_ops->FirstBurstLength -
386 offset);
387 burstlength = 0;
388 pdu_count = 0;
389 seq_no++;
390 continue;
391 }
392
393 if (!datapduinorder) {
394 pdu[i].type = PDUTYPE_UNSOLICITED;
395 pdu[i++].length =
396 conn->conn_ops->MaxRecvDataSegmentLength;
397 }
398 burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
399 offset += conn->conn_ops->MaxRecvDataSegmentLength;
400 unsolicited_data_length -=
401 conn->conn_ops->MaxRecvDataSegmentLength;
402 continue;
403 }
404 if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
405 cmd->data_length) {
406 if (!datapduinorder) {
407 pdu[i].type = PDUTYPE_NORMAL;
408 pdu[i].length = (cmd->data_length - offset);
409 }
410 if (!datasequenceinorder) {
411 seq[seq_no].type = SEQTYPE_NORMAL;
412 seq[seq_no].pdu_count = pdu_count;
413 seq[seq_no].xfer_len = (burstlength +
414 (cmd->data_length - offset));
415 }
416 offset += (cmd->data_length - offset);
417 continue;
418 }
419 if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
420 conn->sess->sess_ops->MaxBurstLength) {
421 if (!datapduinorder) {
422 pdu[i].type = PDUTYPE_NORMAL;
423 pdu[i++].length =
424 (conn->sess->sess_ops->MaxBurstLength -
425 burstlength);
426 }
427 if (!datasequenceinorder) {
428 seq[seq_no].type = SEQTYPE_NORMAL;
429 seq[seq_no].pdu_count = pdu_count;
430 seq[seq_no].xfer_len = (burstlength +
431 (conn->sess->sess_ops->MaxBurstLength -
432 burstlength));
433 }
434 offset += (conn->sess->sess_ops->MaxBurstLength -
435 burstlength);
436 burstlength = 0;
437 pdu_count = 0;
438 seq_no++;
439 continue;
440 }
441
442 if (!datapduinorder) {
443 pdu[i].type = PDUTYPE_NORMAL;
444 pdu[i++].length =
445 conn->conn_ops->MaxRecvDataSegmentLength;
446 }
447 burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
448 offset += conn->conn_ops->MaxRecvDataSegmentLength;
449 }
450
451 if (!datasequenceinorder) {
452 if (bl->data_direction & ISCSI_PDU_WRITE) {
453 if (bl->randomize & RANDOM_R2T_OFFSETS) {
454 if (iscsit_randomize_seq_lists(cmd, bl->type)
455 < 0)
456 return -1;
457 } else
458 iscsit_ordered_seq_lists(cmd, bl->type);
459 } else if (bl->data_direction & ISCSI_PDU_READ) {
460 if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
461 if (iscsit_randomize_seq_lists(cmd, bl->type)
462 < 0)
463 return -1;
464 } else
465 iscsit_ordered_seq_lists(cmd, bl->type);
466 }
467#if 0
468 iscsit_dump_seq_list(cmd);
469#endif
470 }
471 if (!datapduinorder) {
472 if (bl->data_direction & ISCSI_PDU_WRITE) {
473 if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
474 if (iscsit_randomize_pdu_lists(cmd, bl->type)
475 < 0)
476 return -1;
477 } else
478 iscsit_ordered_pdu_lists(cmd, bl->type);
479 } else if (bl->data_direction & ISCSI_PDU_READ) {
480 if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
481 if (iscsit_randomize_pdu_lists(cmd, bl->type)
482 < 0)
483 return -1;
484 } else
485 iscsit_ordered_pdu_lists(cmd, bl->type);
486 }
487#if 0
488 iscsit_dump_pdu_list(cmd);
489#endif
490 }
491
492 return 0;
493}
494
495/*
496 * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
497 */
498int iscsit_do_build_list(
499 struct iscsi_cmd *cmd,
500 struct iscsi_build_list *bl)
501{
502 u32 pdu_count = 0, seq_count = 1;
503 struct iscsi_conn *conn = cmd->conn;
504 struct iscsi_pdu *pdu = NULL;
505 struct iscsi_seq *seq = NULL;
506
507 iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
508
509 if (!conn->sess->sess_ops->DataSequenceInOrder) {
510 seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
511 if (!seq) {
512 pr_err("Unable to allocate struct iscsi_seq list\n");
513 return -1;
514 }
515 cmd->seq_list = seq;
516 cmd->seq_count = seq_count;
517 }
518
519 if (!conn->sess->sess_ops->DataPDUInOrder) {
520 pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
521 if (!pdu) {
522 pr_err("Unable to allocate struct iscsi_pdu list.\n");
523 kfree(seq);
524 return -1;
525 }
526 cmd->pdu_list = pdu;
527 cmd->pdu_count = pdu_count;
528 }
529
530 return iscsit_build_pdu_and_seq_list(cmd, bl);
531}
532
533struct iscsi_pdu *iscsit_get_pdu_holder(
534 struct iscsi_cmd *cmd,
535 u32 offset,
536 u32 length)
537{
538 u32 i;
539 struct iscsi_pdu *pdu = NULL;
540
541 if (!cmd->pdu_list) {
542 pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
543 return NULL;
544 }
545
546 pdu = &cmd->pdu_list[0];
547
548 for (i = 0; i < cmd->pdu_count; i++)
549 if ((pdu[i].offset == offset) && (pdu[i].length == length))
550 return &pdu[i];
551
552 pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
553 " %u, Length: %u\n", cmd->init_task_tag, offset, length);
554 return NULL;
555}
556
557struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
558 struct iscsi_cmd *cmd,
559 struct iscsi_seq *seq)
560{
561 u32 i;
562 struct iscsi_conn *conn = cmd->conn;
563 struct iscsi_pdu *pdu = NULL;
564
565 if (!cmd->pdu_list) {
566 pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
567 return NULL;
568 }
569
570 if (conn->sess->sess_ops->DataSequenceInOrder) {
571redo:
572 pdu = &cmd->pdu_list[cmd->pdu_start];
573
574 for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
575#if 0
576 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
577 "_send_order: %d, pdu[i].offset: %d,"
578 " pdu[i].length: %d\n", pdu[i].seq_no,
579 pdu[i].pdu_send_order, pdu[i].offset,
580 pdu[i].length);
581#endif
582 if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
583 cmd->pdu_send_order++;
584 return &pdu[i];
585 }
586 }
587
588 cmd->pdu_start += cmd->pdu_send_order;
589 cmd->pdu_send_order = 0;
590 cmd->seq_no++;
591
592 if (cmd->pdu_start < cmd->pdu_count)
593 goto redo;
594
595 pr_err("Command ITT: 0x%08x unable to locate"
596 " struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
597 cmd->init_task_tag, cmd->pdu_send_order);
598 return NULL;
599 } else {
600 if (!seq) {
601 pr_err("struct iscsi_seq is NULL!\n");
602 return NULL;
603 }
604#if 0
605 pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
606 " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
607 seq->seq_no);
608#endif
609 pdu = &cmd->pdu_list[seq->pdu_start];
610
611 if (seq->pdu_send_order == seq->pdu_count) {
612 pr_err("Command ITT: 0x%08x seq->pdu_send"
613 "_order: %u equals seq->pdu_count: %u\n",
614 cmd->init_task_tag, seq->pdu_send_order,
615 seq->pdu_count);
616 return NULL;
617 }
618
619 for (i = 0; i < seq->pdu_count; i++) {
620 if (pdu[i].pdu_send_order == seq->pdu_send_order) {
621 seq->pdu_send_order++;
622 return &pdu[i];
623 }
624 }
625
626 pr_err("Command ITT: 0x%08x unable to locate iscsi"
627 "_pdu_t for seq->pdu_send_order: %u.\n",
628 cmd->init_task_tag, seq->pdu_send_order);
629 return NULL;
630 }
631
632 return NULL;
633}
634
635struct iscsi_seq *iscsit_get_seq_holder(
636 struct iscsi_cmd *cmd,
637 u32 offset,
638 u32 length)
639{
640 u32 i;
641
642 if (!cmd->seq_list) {
643 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
644 return NULL;
645 }
646
647 for (i = 0; i < cmd->seq_count; i++) {
648#if 0
649 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
650 "xfer_len: %d, seq_list[i].seq_no %u\n",
651 cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
652 cmd->seq_list[i].seq_no);
653#endif
654 if ((cmd->seq_list[i].orig_offset +
655 cmd->seq_list[i].xfer_len) >=
656 (offset + length))
657 return &cmd->seq_list[i];
658 }
659
660 pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
661 " Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
662 length);
663 return NULL;
664}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 000000000000..0d52a10e3069
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
1#ifndef ISCSI_SEQ_AND_PDU_LIST_H
2#define ISCSI_SEQ_AND_PDU_LIST_H
3
4/* struct iscsi_pdu->status */
5#define DATAOUT_PDU_SENT 1
6
7/* struct iscsi_seq->type */
8#define SEQTYPE_IMMEDIATE 1
9#define SEQTYPE_UNSOLICITED 2
10#define SEQTYPE_NORMAL 3
11
12/* struct iscsi_seq->status */
13#define DATAOUT_SEQUENCE_GOT_R2T 1
14#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
15#define DATAOUT_SEQUENCE_COMPLETE 3
16
17/* iscsi_determine_counts_for_list() type */
18#define PDULIST_NORMAL 1
19#define PDULIST_IMMEDIATE 2
20#define PDULIST_UNSOLICITED 3
21#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4
22
23/* struct iscsi_pdu->type */
24#define PDUTYPE_IMMEDIATE 1
25#define PDUTYPE_UNSOLICITED 2
26#define PDUTYPE_NORMAL 3
27
28/* struct iscsi_pdu->status */
29#define ISCSI_PDU_NOT_RECEIVED 0
30#define ISCSI_PDU_RECEIVED_OK 1
31#define ISCSI_PDU_CRC_FAILED 2
32#define ISCSI_PDU_TIMED_OUT 3
33
34/* struct iscsi_build_list->randomize */
35#define RANDOM_DATAIN_PDU_OFFSETS 0x01
36#define RANDOM_DATAIN_SEQ_OFFSETS 0x02
37#define RANDOM_DATAOUT_PDU_OFFSETS 0x04
38#define RANDOM_R2T_OFFSETS 0x08
39
40/* struct iscsi_build_list->data_direction */
41#define ISCSI_PDU_READ 0x01
42#define ISCSI_PDU_WRITE 0x02
43
44struct iscsi_build_list {
45 int data_direction;
46 int randomize;
47 int type;
48 int immediate_data_length;
49};
50
51struct iscsi_pdu {
52 int status;
53 int type;
54 u8 flags;
55 u32 data_sn;
56 u32 length;
57 u32 offset;
58 u32 pdu_send_order;
59 u32 seq_no;
60} ____cacheline_aligned;
61
62struct iscsi_seq {
63 int sent;
64 int status;
65 int type;
66 u32 data_sn;
67 u32 first_datasn;
68 u32 last_datasn;
69 u32 next_burst_len;
70 u32 pdu_start;
71 u32 pdu_count;
72 u32 offset;
73 u32 orig_offset;
74 u32 pdu_send_order;
75 u32 r2t_sn;
76 u32 seq_send_order;
77 u32 seq_no;
78 u32 xfer_len;
79} ____cacheline_aligned;
80
81extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
82extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
83extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
84extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
85
86#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 000000000000..bbdbe9301b27
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,950 @@
1/*******************************************************************************
2 * Modern ConfigFS group context specific iSCSI statistics based on original
3 * iscsi_target_mib.c code
4 *
5 * Copyright (c) 2011 Rising Tide Systems
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 ******************************************************************************/
21
22#include <linux/configfs.h>
23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h>
25#include <target/target_core_transport.h>
26#include <target/configfs_macros.h>
27
28#include "iscsi_target_core.h"
29#include "iscsi_target_parameters.h"
30#include "iscsi_target_device.h"
31#include "iscsi_target_tpg.h"
32#include "iscsi_target_util.h"
33#include "iscsi_target_stat.h"
34
35#ifndef INITIAL_JIFFIES
36#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
37#endif
38
39/* Instance Attributes Table */
40#define ISCSI_INST_NUM_NODES 1
41#define ISCSI_INST_DESCR "Storage Engine Target"
42#define ISCSI_INST_LAST_FAILURE_TYPE 0
43#define ISCSI_DISCONTINUITY_TIME 0
44
45#define ISCSI_NODE_INDEX 1
46
47#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
48
49/****************************************************************************
50 * iSCSI MIB Tables
51 ****************************************************************************/
52/*
53 * Instance Attributes Table
54 */
55CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
56#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
57static struct iscsi_stat_instance_attribute \
58 iscsi_stat_instance_##_name = \
59 __CONFIGFS_EATTR(_name, _mode, \
60 iscsi_stat_instance_show_attr_##_name, \
61 iscsi_stat_instance_store_attr_##_name);
62
63#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
64static struct iscsi_stat_instance_attribute \
65 iscsi_stat_instance_##_name = \
66 __CONFIGFS_EATTR_RO(_name, \
67 iscsi_stat_instance_show_attr_##_name);
68
69static ssize_t iscsi_stat_instance_show_attr_inst(
70 struct iscsi_wwn_stat_grps *igrps, char *page)
71{
72 struct iscsi_tiqn *tiqn = container_of(igrps,
73 struct iscsi_tiqn, tiqn_stat_grps);
74
75 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
76}
77ISCSI_STAT_INSTANCE_ATTR_RO(inst);
78
79static ssize_t iscsi_stat_instance_show_attr_min_ver(
80 struct iscsi_wwn_stat_grps *igrps, char *page)
81{
82 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
83}
84ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
85
86static ssize_t iscsi_stat_instance_show_attr_max_ver(
87 struct iscsi_wwn_stat_grps *igrps, char *page)
88{
89 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
90}
91ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
92
93static ssize_t iscsi_stat_instance_show_attr_portals(
94 struct iscsi_wwn_stat_grps *igrps, char *page)
95{
96 struct iscsi_tiqn *tiqn = container_of(igrps,
97 struct iscsi_tiqn, tiqn_stat_grps);
98
99 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
100}
101ISCSI_STAT_INSTANCE_ATTR_RO(portals);
102
103static ssize_t iscsi_stat_instance_show_attr_nodes(
104 struct iscsi_wwn_stat_grps *igrps, char *page)
105{
106 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
107}
108ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
109
110static ssize_t iscsi_stat_instance_show_attr_sessions(
111 struct iscsi_wwn_stat_grps *igrps, char *page)
112{
113 struct iscsi_tiqn *tiqn = container_of(igrps,
114 struct iscsi_tiqn, tiqn_stat_grps);
115
116 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
117}
118ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
119
120static ssize_t iscsi_stat_instance_show_attr_fail_sess(
121 struct iscsi_wwn_stat_grps *igrps, char *page)
122{
123 struct iscsi_tiqn *tiqn = container_of(igrps,
124 struct iscsi_tiqn, tiqn_stat_grps);
125 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
126 u32 sess_err_count;
127
128 spin_lock_bh(&sess_err->lock);
129 sess_err_count = (sess_err->digest_errors +
130 sess_err->cxn_timeout_errors +
131 sess_err->pdu_format_errors);
132 spin_unlock_bh(&sess_err->lock);
133
134 return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
135}
136ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
137
138static ssize_t iscsi_stat_instance_show_attr_fail_type(
139 struct iscsi_wwn_stat_grps *igrps, char *page)
140{
141 struct iscsi_tiqn *tiqn = container_of(igrps,
142 struct iscsi_tiqn, tiqn_stat_grps);
143 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
144
145 return snprintf(page, PAGE_SIZE, "%u\n",
146 sess_err->last_sess_failure_type);
147}
148ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
149
150static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
151 struct iscsi_wwn_stat_grps *igrps, char *page)
152{
153 struct iscsi_tiqn *tiqn = container_of(igrps,
154 struct iscsi_tiqn, tiqn_stat_grps);
155 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
156
157 return snprintf(page, PAGE_SIZE, "%s\n",
158 sess_err->last_sess_fail_rem_name[0] ?
159 sess_err->last_sess_fail_rem_name : NONE);
160}
161ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
162
163static ssize_t iscsi_stat_instance_show_attr_disc_time(
164 struct iscsi_wwn_stat_grps *igrps, char *page)
165{
166 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
167}
168ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
169
170static ssize_t iscsi_stat_instance_show_attr_description(
171 struct iscsi_wwn_stat_grps *igrps, char *page)
172{
173 return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
174}
175ISCSI_STAT_INSTANCE_ATTR_RO(description);
176
177static ssize_t iscsi_stat_instance_show_attr_vendor(
178 struct iscsi_wwn_stat_grps *igrps, char *page)
179{
180 return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
181}
182ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
183
184static ssize_t iscsi_stat_instance_show_attr_version(
185 struct iscsi_wwn_stat_grps *igrps, char *page)
186{
187 return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
188}
189ISCSI_STAT_INSTANCE_ATTR_RO(version);
190
191CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
192 iscsi_instance_group);
193
194static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
195 &iscsi_stat_instance_inst.attr,
196 &iscsi_stat_instance_min_ver.attr,
197 &iscsi_stat_instance_max_ver.attr,
198 &iscsi_stat_instance_portals.attr,
199 &iscsi_stat_instance_nodes.attr,
200 &iscsi_stat_instance_sessions.attr,
201 &iscsi_stat_instance_fail_sess.attr,
202 &iscsi_stat_instance_fail_type.attr,
203 &iscsi_stat_instance_fail_rem_name.attr,
204 &iscsi_stat_instance_disc_time.attr,
205 &iscsi_stat_instance_description.attr,
206 &iscsi_stat_instance_vendor.attr,
207 &iscsi_stat_instance_version.attr,
208 NULL,
209};
210
211static struct configfs_item_operations iscsi_stat_instance_item_ops = {
212 .show_attribute = iscsi_stat_instance_attr_show,
213 .store_attribute = iscsi_stat_instance_attr_store,
214};
215
216struct config_item_type iscsi_stat_instance_cit = {
217 .ct_item_ops = &iscsi_stat_instance_item_ops,
218 .ct_attrs = iscsi_stat_instance_attrs,
219 .ct_owner = THIS_MODULE,
220};
221
222/*
223 * Instance Session Failure Stats Table
224 */
225CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
226#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
227static struct iscsi_stat_sess_err_attribute \
228 iscsi_stat_sess_err_##_name = \
229 __CONFIGFS_EATTR(_name, _mode, \
230 iscsi_stat_sess_err_show_attr_##_name, \
231 iscsi_stat_sess_err_store_attr_##_name);
232
233#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
234static struct iscsi_stat_sess_err_attribute \
235 iscsi_stat_sess_err_##_name = \
236 __CONFIGFS_EATTR_RO(_name, \
237 iscsi_stat_sess_err_show_attr_##_name);
238
239static ssize_t iscsi_stat_sess_err_show_attr_inst(
240 struct iscsi_wwn_stat_grps *igrps, char *page)
241{
242 struct iscsi_tiqn *tiqn = container_of(igrps,
243 struct iscsi_tiqn, tiqn_stat_grps);
244
245 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
246}
247ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
248
249static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
250 struct iscsi_wwn_stat_grps *igrps, char *page)
251{
252 struct iscsi_tiqn *tiqn = container_of(igrps,
253 struct iscsi_tiqn, tiqn_stat_grps);
254 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
255
256 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
257}
258ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
259
260static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
261 struct iscsi_wwn_stat_grps *igrps, char *page)
262{
263 struct iscsi_tiqn *tiqn = container_of(igrps,
264 struct iscsi_tiqn, tiqn_stat_grps);
265 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
266
267 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
268}
269ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
270
271static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
272 struct iscsi_wwn_stat_grps *igrps, char *page)
273{
274 struct iscsi_tiqn *tiqn = container_of(igrps,
275 struct iscsi_tiqn, tiqn_stat_grps);
276 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
277
278 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
279}
280ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
281
282CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
283 iscsi_sess_err_group);
284
285static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
286 &iscsi_stat_sess_err_inst.attr,
287 &iscsi_stat_sess_err_digest_errors.attr,
288 &iscsi_stat_sess_err_cxn_errors.attr,
289 &iscsi_stat_sess_err_format_errors.attr,
290 NULL,
291};
292
293static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
294 .show_attribute = iscsi_stat_sess_err_attr_show,
295 .store_attribute = iscsi_stat_sess_err_attr_store,
296};
297
298struct config_item_type iscsi_stat_sess_err_cit = {
299 .ct_item_ops = &iscsi_stat_sess_err_item_ops,
300 .ct_attrs = iscsi_stat_sess_err_attrs,
301 .ct_owner = THIS_MODULE,
302};
303
304/*
305 * Target Attributes Table
306 */
307CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
308#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
309static struct iscsi_stat_tgt_attr_attribute \
310 iscsi_stat_tgt_attr_##_name = \
311 __CONFIGFS_EATTR(_name, _mode, \
312 iscsi_stat_tgt-attr_show_attr_##_name, \
313 iscsi_stat_tgt_attr_store_attr_##_name);
314
315#define ISCSI_STAT_TGT_ATTR_RO(_name) \
316static struct iscsi_stat_tgt_attr_attribute \
317 iscsi_stat_tgt_attr_##_name = \
318 __CONFIGFS_EATTR_RO(_name, \
319 iscsi_stat_tgt_attr_show_attr_##_name);
320
321static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
322 struct iscsi_wwn_stat_grps *igrps, char *page)
323{
324 struct iscsi_tiqn *tiqn = container_of(igrps,
325 struct iscsi_tiqn, tiqn_stat_grps);
326
327 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
328}
329ISCSI_STAT_TGT_ATTR_RO(inst);
330
331static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
332 struct iscsi_wwn_stat_grps *igrps, char *page)
333{
334 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
335}
336ISCSI_STAT_TGT_ATTR_RO(indx);
337
338static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
339 struct iscsi_wwn_stat_grps *igrps, char *page)
340{
341 struct iscsi_tiqn *tiqn = container_of(igrps,
342 struct iscsi_tiqn, tiqn_stat_grps);
343 struct iscsi_login_stats *lstat = &tiqn->login_stats;
344 u32 fail_count;
345
346 spin_lock(&lstat->lock);
347 fail_count = (lstat->redirects + lstat->authorize_fails +
348 lstat->authenticate_fails + lstat->negotiate_fails +
349 lstat->other_fails);
350 spin_unlock(&lstat->lock);
351
352 return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
353}
354ISCSI_STAT_TGT_ATTR_RO(login_fails);
355
356static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
357 struct iscsi_wwn_stat_grps *igrps, char *page)
358{
359 struct iscsi_tiqn *tiqn = container_of(igrps,
360 struct iscsi_tiqn, tiqn_stat_grps);
361 struct iscsi_login_stats *lstat = &tiqn->login_stats;
362 u32 last_fail_time;
363
364 spin_lock(&lstat->lock);
365 last_fail_time = lstat->last_fail_time ?
366 (u32)(((u32)lstat->last_fail_time -
367 INITIAL_JIFFIES) * 100 / HZ) : 0;
368 spin_unlock(&lstat->lock);
369
370 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
371}
372ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
373
374static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
375 struct iscsi_wwn_stat_grps *igrps, char *page)
376{
377 struct iscsi_tiqn *tiqn = container_of(igrps,
378 struct iscsi_tiqn, tiqn_stat_grps);
379 struct iscsi_login_stats *lstat = &tiqn->login_stats;
380 u32 last_fail_type;
381
382 spin_lock(&lstat->lock);
383 last_fail_type = lstat->last_fail_type;
384 spin_unlock(&lstat->lock);
385
386 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
387}
388ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
389
390static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
391 struct iscsi_wwn_stat_grps *igrps, char *page)
392{
393 struct iscsi_tiqn *tiqn = container_of(igrps,
394 struct iscsi_tiqn, tiqn_stat_grps);
395 struct iscsi_login_stats *lstat = &tiqn->login_stats;
396 unsigned char buf[224];
397
398 spin_lock(&lstat->lock);
399 snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
400 lstat->last_intr_fail_name : NONE);
401 spin_unlock(&lstat->lock);
402
403 return snprintf(page, PAGE_SIZE, "%s\n", buf);
404}
405ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
406
407static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
408 struct iscsi_wwn_stat_grps *igrps, char *page)
409{
410 struct iscsi_tiqn *tiqn = container_of(igrps,
411 struct iscsi_tiqn, tiqn_stat_grps);
412 struct iscsi_login_stats *lstat = &tiqn->login_stats;
413 unsigned char buf[8];
414
415 spin_lock(&lstat->lock);
416 snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
417 "ipv6" : "ipv4");
418 spin_unlock(&lstat->lock);
419
420 return snprintf(page, PAGE_SIZE, "%s\n", buf);
421}
422ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
423
424static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
425 struct iscsi_wwn_stat_grps *igrps, char *page)
426{
427 struct iscsi_tiqn *tiqn = container_of(igrps,
428 struct iscsi_tiqn, tiqn_stat_grps);
429 struct iscsi_login_stats *lstat = &tiqn->login_stats;
430 unsigned char buf[32];
431
432 spin_lock(&lstat->lock);
433 if (lstat->last_intr_fail_ip_family == AF_INET6)
434 snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
435 else
436 snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
437 spin_unlock(&lstat->lock);
438
439 return snprintf(page, PAGE_SIZE, "%s\n", buf);
440}
441ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
442
443CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
444 iscsi_tgt_attr_group);
445
446static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
447 &iscsi_stat_tgt_attr_inst.attr,
448 &iscsi_stat_tgt_attr_indx.attr,
449 &iscsi_stat_tgt_attr_login_fails.attr,
450 &iscsi_stat_tgt_attr_last_fail_time.attr,
451 &iscsi_stat_tgt_attr_last_fail_type.attr,
452 &iscsi_stat_tgt_attr_fail_intr_name.attr,
453 &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
454 &iscsi_stat_tgt_attr_fail_intr_addr.attr,
455 NULL,
456};
457
458static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
459 .show_attribute = iscsi_stat_tgt_attr_attr_show,
460 .store_attribute = iscsi_stat_tgt_attr_attr_store,
461};
462
463struct config_item_type iscsi_stat_tgt_attr_cit = {
464 .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
465 .ct_attrs = iscsi_stat_tgt_attr_attrs,
466 .ct_owner = THIS_MODULE,
467};
468
469/*
470 * Target Login Stats Table
471 */
472CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
473#define ISCSI_STAT_LOGIN(_name, _mode) \
474static struct iscsi_stat_login_attribute \
475 iscsi_stat_login_##_name = \
476 __CONFIGFS_EATTR(_name, _mode, \
477 iscsi_stat_login_show_attr_##_name, \
478 iscsi_stat_login_store_attr_##_name);
479
480#define ISCSI_STAT_LOGIN_RO(_name) \
481static struct iscsi_stat_login_attribute \
482 iscsi_stat_login_##_name = \
483 __CONFIGFS_EATTR_RO(_name, \
484 iscsi_stat_login_show_attr_##_name);
485
486static ssize_t iscsi_stat_login_show_attr_inst(
487 struct iscsi_wwn_stat_grps *igrps, char *page)
488{
489 struct iscsi_tiqn *tiqn = container_of(igrps,
490 struct iscsi_tiqn, tiqn_stat_grps);
491
492 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
493}
494ISCSI_STAT_LOGIN_RO(inst);
495
496static ssize_t iscsi_stat_login_show_attr_indx(
497 struct iscsi_wwn_stat_grps *igrps, char *page)
498{
499 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
500}
501ISCSI_STAT_LOGIN_RO(indx);
502
503static ssize_t iscsi_stat_login_show_attr_accepts(
504 struct iscsi_wwn_stat_grps *igrps, char *page)
505{
506 struct iscsi_tiqn *tiqn = container_of(igrps,
507 struct iscsi_tiqn, tiqn_stat_grps);
508 struct iscsi_login_stats *lstat = &tiqn->login_stats;
509 ssize_t ret;
510
511 spin_lock(&lstat->lock);
512 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
513 spin_unlock(&lstat->lock);
514
515 return ret;
516}
517ISCSI_STAT_LOGIN_RO(accepts);
518
519static ssize_t iscsi_stat_login_show_attr_other_fails(
520 struct iscsi_wwn_stat_grps *igrps, char *page)
521{
522 struct iscsi_tiqn *tiqn = container_of(igrps,
523 struct iscsi_tiqn, tiqn_stat_grps);
524 struct iscsi_login_stats *lstat = &tiqn->login_stats;
525 ssize_t ret;
526
527 spin_lock(&lstat->lock);
528 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
529 spin_unlock(&lstat->lock);
530
531 return ret;
532}
533ISCSI_STAT_LOGIN_RO(other_fails);
534
535static ssize_t iscsi_stat_login_show_attr_redirects(
536 struct iscsi_wwn_stat_grps *igrps, char *page)
537{
538 struct iscsi_tiqn *tiqn = container_of(igrps,
539 struct iscsi_tiqn, tiqn_stat_grps);
540 struct iscsi_login_stats *lstat = &tiqn->login_stats;
541 ssize_t ret;
542
543 spin_lock(&lstat->lock);
544 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
545 spin_unlock(&lstat->lock);
546
547 return ret;
548}
549ISCSI_STAT_LOGIN_RO(redirects);
550
551static ssize_t iscsi_stat_login_show_attr_authorize_fails(
552 struct iscsi_wwn_stat_grps *igrps, char *page)
553{
554 struct iscsi_tiqn *tiqn = container_of(igrps,
555 struct iscsi_tiqn, tiqn_stat_grps);
556 struct iscsi_login_stats *lstat = &tiqn->login_stats;
557 ssize_t ret;
558
559 spin_lock(&lstat->lock);
560 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
561 spin_unlock(&lstat->lock);
562
563 return ret;
564}
565ISCSI_STAT_LOGIN_RO(authorize_fails);
566
567static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
568 struct iscsi_wwn_stat_grps *igrps, char *page)
569{
570 struct iscsi_tiqn *tiqn = container_of(igrps,
571 struct iscsi_tiqn, tiqn_stat_grps);
572 struct iscsi_login_stats *lstat = &tiqn->login_stats;
573 ssize_t ret;
574
575 spin_lock(&lstat->lock);
576 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
577 spin_unlock(&lstat->lock);
578
579 return ret;
580}
581ISCSI_STAT_LOGIN_RO(authenticate_fails);
582
583static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
584 struct iscsi_wwn_stat_grps *igrps, char *page)
585{
586 struct iscsi_tiqn *tiqn = container_of(igrps,
587 struct iscsi_tiqn, tiqn_stat_grps);
588 struct iscsi_login_stats *lstat = &tiqn->login_stats;
589 ssize_t ret;
590
591 spin_lock(&lstat->lock);
592 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
593 spin_unlock(&lstat->lock);
594
595 return ret;
596}
597ISCSI_STAT_LOGIN_RO(negotiate_fails);
598
599CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
600 iscsi_login_stats_group);
601
602static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
603 &iscsi_stat_login_inst.attr,
604 &iscsi_stat_login_indx.attr,
605 &iscsi_stat_login_accepts.attr,
606 &iscsi_stat_login_other_fails.attr,
607 &iscsi_stat_login_redirects.attr,
608 &iscsi_stat_login_authorize_fails.attr,
609 &iscsi_stat_login_authenticate_fails.attr,
610 &iscsi_stat_login_negotiate_fails.attr,
611 NULL,
612};
613
614static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
615 .show_attribute = iscsi_stat_login_attr_show,
616 .store_attribute = iscsi_stat_login_attr_store,
617};
618
619struct config_item_type iscsi_stat_login_cit = {
620 .ct_item_ops = &iscsi_stat_login_stats_item_ops,
621 .ct_attrs = iscsi_stat_login_stats_attrs,
622 .ct_owner = THIS_MODULE,
623};
624
625/*
626 * Target Logout Stats Table
627 */
628
629CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
630#define ISCSI_STAT_LOGOUT(_name, _mode) \
631static struct iscsi_stat_logout_attribute \
632 iscsi_stat_logout_##_name = \
633 __CONFIGFS_EATTR(_name, _mode, \
634 iscsi_stat_logout_show_attr_##_name, \
635 iscsi_stat_logout_store_attr_##_name);
636
637#define ISCSI_STAT_LOGOUT_RO(_name) \
638static struct iscsi_stat_logout_attribute \
639 iscsi_stat_logout_##_name = \
640 __CONFIGFS_EATTR_RO(_name, \
641 iscsi_stat_logout_show_attr_##_name);
642
643static ssize_t iscsi_stat_logout_show_attr_inst(
644 struct iscsi_wwn_stat_grps *igrps, char *page)
645{
646 struct iscsi_tiqn *tiqn = container_of(igrps,
647 struct iscsi_tiqn, tiqn_stat_grps);
648
649 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
650}
651ISCSI_STAT_LOGOUT_RO(inst);
652
653static ssize_t iscsi_stat_logout_show_attr_indx(
654 struct iscsi_wwn_stat_grps *igrps, char *page)
655{
656 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
657}
658ISCSI_STAT_LOGOUT_RO(indx);
659
660static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
661 struct iscsi_wwn_stat_grps *igrps, char *page)
662{
663 struct iscsi_tiqn *tiqn = container_of(igrps,
664 struct iscsi_tiqn, tiqn_stat_grps);
665 struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
666
667 return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
668}
669ISCSI_STAT_LOGOUT_RO(normal_logouts);
670
671static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
672 struct iscsi_wwn_stat_grps *igrps, char *page)
673{
674 struct iscsi_tiqn *tiqn = container_of(igrps,
675 struct iscsi_tiqn, tiqn_stat_grps);
676 struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
677
678 return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
679}
680ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
681
682CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
683 iscsi_logout_stats_group);
684
685static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
686 &iscsi_stat_logout_inst.attr,
687 &iscsi_stat_logout_indx.attr,
688 &iscsi_stat_logout_normal_logouts.attr,
689 &iscsi_stat_logout_abnormal_logouts.attr,
690 NULL,
691};
692
693static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
694 .show_attribute = iscsi_stat_logout_attr_show,
695 .store_attribute = iscsi_stat_logout_attr_store,
696};
697
698struct config_item_type iscsi_stat_logout_cit = {
699 .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
700 .ct_attrs = iscsi_stat_logout_stats_attrs,
701 .ct_owner = THIS_MODULE,
702};
703
704/*
705 * Session Stats Table
706 */
707
708CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
709#define ISCSI_STAT_SESS(_name, _mode) \
710static struct iscsi_stat_sess_attribute \
711 iscsi_stat_sess_##_name = \
712 __CONFIGFS_EATTR(_name, _mode, \
713 iscsi_stat_sess_show_attr_##_name, \
714 iscsi_stat_sess_store_attr_##_name);
715
716#define ISCSI_STAT_SESS_RO(_name) \
717static struct iscsi_stat_sess_attribute \
718 iscsi_stat_sess_##_name = \
719 __CONFIGFS_EATTR_RO(_name, \
720 iscsi_stat_sess_show_attr_##_name);
721
722static ssize_t iscsi_stat_sess_show_attr_inst(
723 struct iscsi_node_stat_grps *igrps, char *page)
724{
725 struct iscsi_node_acl *acl = container_of(igrps,
726 struct iscsi_node_acl, node_stat_grps);
727 struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
728 struct iscsi_tiqn *tiqn = container_of(wwn,
729 struct iscsi_tiqn, tiqn_wwn);
730
731 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
732}
733ISCSI_STAT_SESS_RO(inst);
734
735static ssize_t iscsi_stat_sess_show_attr_node(
736 struct iscsi_node_stat_grps *igrps, char *page)
737{
738 struct iscsi_node_acl *acl = container_of(igrps,
739 struct iscsi_node_acl, node_stat_grps);
740 struct se_node_acl *se_nacl = &acl->se_node_acl;
741 struct iscsi_session *sess;
742 struct se_session *se_sess;
743 ssize_t ret = 0;
744
745 spin_lock_bh(&se_nacl->nacl_sess_lock);
746 se_sess = se_nacl->nacl_sess;
747 if (se_sess) {
748 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
749 if (sess)
750 ret = snprintf(page, PAGE_SIZE, "%u\n",
751 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
752 }
753 spin_unlock_bh(&se_nacl->nacl_sess_lock);
754
755 return ret;
756}
757ISCSI_STAT_SESS_RO(node);
758
759static ssize_t iscsi_stat_sess_show_attr_indx(
760 struct iscsi_node_stat_grps *igrps, char *page)
761{
762 struct iscsi_node_acl *acl = container_of(igrps,
763 struct iscsi_node_acl, node_stat_grps);
764 struct se_node_acl *se_nacl = &acl->se_node_acl;
765 struct iscsi_session *sess;
766 struct se_session *se_sess;
767 ssize_t ret = 0;
768
769 spin_lock_bh(&se_nacl->nacl_sess_lock);
770 se_sess = se_nacl->nacl_sess;
771 if (se_sess) {
772 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
773 if (sess)
774 ret = snprintf(page, PAGE_SIZE, "%u\n",
775 sess->session_index);
776 }
777 spin_unlock_bh(&se_nacl->nacl_sess_lock);
778
779 return ret;
780}
781ISCSI_STAT_SESS_RO(indx);
782
783static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
784 struct iscsi_node_stat_grps *igrps, char *page)
785{
786 struct iscsi_node_acl *acl = container_of(igrps,
787 struct iscsi_node_acl, node_stat_grps);
788 struct se_node_acl *se_nacl = &acl->se_node_acl;
789 struct iscsi_session *sess;
790 struct se_session *se_sess;
791 ssize_t ret = 0;
792
793 spin_lock_bh(&se_nacl->nacl_sess_lock);
794 se_sess = se_nacl->nacl_sess;
795 if (se_sess) {
796 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
797 if (sess)
798 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
799 }
800 spin_unlock_bh(&se_nacl->nacl_sess_lock);
801
802 return ret;
803}
804ISCSI_STAT_SESS_RO(cmd_pdus);
805
806static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
807 struct iscsi_node_stat_grps *igrps, char *page)
808{
809 struct iscsi_node_acl *acl = container_of(igrps,
810 struct iscsi_node_acl, node_stat_grps);
811 struct se_node_acl *se_nacl = &acl->se_node_acl;
812 struct iscsi_session *sess;
813 struct se_session *se_sess;
814 ssize_t ret = 0;
815
816 spin_lock_bh(&se_nacl->nacl_sess_lock);
817 se_sess = se_nacl->nacl_sess;
818 if (se_sess) {
819 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
820 if (sess)
821 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
822 }
823 spin_unlock_bh(&se_nacl->nacl_sess_lock);
824
825 return ret;
826}
827ISCSI_STAT_SESS_RO(rsp_pdus);
828
829static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
830 struct iscsi_node_stat_grps *igrps, char *page)
831{
832 struct iscsi_node_acl *acl = container_of(igrps,
833 struct iscsi_node_acl, node_stat_grps);
834 struct se_node_acl *se_nacl = &acl->se_node_acl;
835 struct iscsi_session *sess;
836 struct se_session *se_sess;
837 ssize_t ret = 0;
838
839 spin_lock_bh(&se_nacl->nacl_sess_lock);
840 se_sess = se_nacl->nacl_sess;
841 if (se_sess) {
842 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
843 if (sess)
844 ret = snprintf(page, PAGE_SIZE, "%llu\n",
845 (unsigned long long)sess->tx_data_octets);
846 }
847 spin_unlock_bh(&se_nacl->nacl_sess_lock);
848
849 return ret;
850}
851ISCSI_STAT_SESS_RO(txdata_octs);
852
853static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
854 struct iscsi_node_stat_grps *igrps, char *page)
855{
856 struct iscsi_node_acl *acl = container_of(igrps,
857 struct iscsi_node_acl, node_stat_grps);
858 struct se_node_acl *se_nacl = &acl->se_node_acl;
859 struct iscsi_session *sess;
860 struct se_session *se_sess;
861 ssize_t ret = 0;
862
863 spin_lock_bh(&se_nacl->nacl_sess_lock);
864 se_sess = se_nacl->nacl_sess;
865 if (se_sess) {
866 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
867 if (sess)
868 ret = snprintf(page, PAGE_SIZE, "%llu\n",
869 (unsigned long long)sess->rx_data_octets);
870 }
871 spin_unlock_bh(&se_nacl->nacl_sess_lock);
872
873 return ret;
874}
875ISCSI_STAT_SESS_RO(rxdata_octs);
876
877static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
878 struct iscsi_node_stat_grps *igrps, char *page)
879{
880 struct iscsi_node_acl *acl = container_of(igrps,
881 struct iscsi_node_acl, node_stat_grps);
882 struct se_node_acl *se_nacl = &acl->se_node_acl;
883 struct iscsi_session *sess;
884 struct se_session *se_sess;
885 ssize_t ret = 0;
886
887 spin_lock_bh(&se_nacl->nacl_sess_lock);
888 se_sess = se_nacl->nacl_sess;
889 if (se_sess) {
890 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
891 if (sess)
892 ret = snprintf(page, PAGE_SIZE, "%u\n",
893 sess->conn_digest_errors);
894 }
895 spin_unlock_bh(&se_nacl->nacl_sess_lock);
896
897 return ret;
898}
899ISCSI_STAT_SESS_RO(conn_digest_errors);
900
901static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
902 struct iscsi_node_stat_grps *igrps, char *page)
903{
904 struct iscsi_node_acl *acl = container_of(igrps,
905 struct iscsi_node_acl, node_stat_grps);
906 struct se_node_acl *se_nacl = &acl->se_node_acl;
907 struct iscsi_session *sess;
908 struct se_session *se_sess;
909 ssize_t ret = 0;
910
911 spin_lock_bh(&se_nacl->nacl_sess_lock);
912 se_sess = se_nacl->nacl_sess;
913 if (se_sess) {
914 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
915 if (sess)
916 ret = snprintf(page, PAGE_SIZE, "%u\n",
917 sess->conn_timeout_errors);
918 }
919 spin_unlock_bh(&se_nacl->nacl_sess_lock);
920
921 return ret;
922}
923ISCSI_STAT_SESS_RO(conn_timeout_errors);
924
925CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
926 iscsi_sess_stats_group);
927
928static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
929 &iscsi_stat_sess_inst.attr,
930 &iscsi_stat_sess_node.attr,
931 &iscsi_stat_sess_indx.attr,
932 &iscsi_stat_sess_cmd_pdus.attr,
933 &iscsi_stat_sess_rsp_pdus.attr,
934 &iscsi_stat_sess_txdata_octs.attr,
935 &iscsi_stat_sess_rxdata_octs.attr,
936 &iscsi_stat_sess_conn_digest_errors.attr,
937 &iscsi_stat_sess_conn_timeout_errors.attr,
938 NULL,
939};
940
941static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
942 .show_attribute = iscsi_stat_sess_attr_show,
943 .store_attribute = iscsi_stat_sess_attr_store,
944};
945
946struct config_item_type iscsi_stat_sess_cit = {
947 .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
948 .ct_attrs = iscsi_stat_sess_stats_attrs,
949 .ct_owner = THIS_MODULE,
950};
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 000000000000..3ff76b4faad3
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
1#ifndef ISCSI_TARGET_STAT_H
2#define ISCSI_TARGET_STAT_H
3
4/*
5 * For struct iscsi_tiqn->tiqn_wwn default groups
6 */
7extern struct config_item_type iscsi_stat_instance_cit;
8extern struct config_item_type iscsi_stat_sess_err_cit;
9extern struct config_item_type iscsi_stat_tgt_attr_cit;
10extern struct config_item_type iscsi_stat_login_cit;
11extern struct config_item_type iscsi_stat_logout_cit;
12
13/*
14 * For struct iscsi_session->se_sess default groups
15 */
16extern struct config_item_type iscsi_stat_sess_cit;
17
18/* iSCSI session error types */
19#define ISCSI_SESS_ERR_UNKNOWN 0
20#define ISCSI_SESS_ERR_DIGEST 1
21#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
22#define ISCSI_SESS_ERR_PDU_FORMAT 3
23
24/* iSCSI session error stats */
25struct iscsi_sess_err_stats {
26 spinlock_t lock;
27 u32 digest_errors;
28 u32 cxn_timeout_errors;
29 u32 pdu_format_errors;
30 u32 last_sess_failure_type;
31 char last_sess_fail_rem_name[224];
32} ____cacheline_aligned;
33
34/* iSCSI login failure types (sub oids) */
35#define ISCSI_LOGIN_FAIL_OTHER 2
36#define ISCSI_LOGIN_FAIL_REDIRECT 3
37#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
38#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
39#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
40
41/* iSCSI login stats */
42struct iscsi_login_stats {
43 spinlock_t lock;
44 u32 accepts;
45 u32 other_fails;
46 u32 redirects;
47 u32 authorize_fails;
48 u32 authenticate_fails;
49 u32 negotiate_fails; /* used for notifications */
50 u64 last_fail_time; /* time stamp (jiffies) */
51 u32 last_fail_type;
52 int last_intr_fail_ip_family;
53 unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
54 char last_intr_fail_name[224];
55} ____cacheline_aligned;
56
57/* iSCSI logout stats */
58struct iscsi_logout_stats {
59 spinlock_t lock;
60 u32 normal_logouts;
61 u32 abnormal_logouts;
62} ____cacheline_aligned;
63
64#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 000000000000..db1fe1ec84df
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
1/*******************************************************************************
2 * This file contains the iSCSI Target specific Task Management functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <asm/unaligned.h>
22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h>
24#include <target/target_core_transport.h>
25
26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h"
28#include "iscsi_target_datain_values.h"
29#include "iscsi_target_device.h"
30#include "iscsi_target_erl0.h"
31#include "iscsi_target_erl1.h"
32#include "iscsi_target_erl2.h"
33#include "iscsi_target_tmr.h"
34#include "iscsi_target_tpg.h"
35#include "iscsi_target_util.h"
36#include "iscsi_target.h"
37
38u8 iscsit_tmr_abort_task(
39 struct iscsi_cmd *cmd,
40 unsigned char *buf)
41{
42 struct iscsi_cmd *ref_cmd;
43 struct iscsi_conn *conn = cmd->conn;
44 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
45 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
46 struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
47
48 ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
49 if (!ref_cmd) {
50 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
51 " %hu.\n", hdr->rtt, conn->cid);
52 return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
53 (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
54 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
55 }
56 if (ref_cmd->cmd_sn != hdr->refcmdsn) {
57 pr_err("RefCmdSN 0x%08x does not equal"
58 " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
59 hdr->refcmdsn, ref_cmd->cmd_sn);
60 return ISCSI_TMF_RSP_REJECTED;
61 }
62
63 se_tmr->ref_task_tag = hdr->rtt;
64 se_tmr->ref_cmd = &ref_cmd->se_cmd;
65 tmr_req->ref_cmd_sn = hdr->refcmdsn;
66 tmr_req->exp_data_sn = hdr->exp_datasn;
67
68 return ISCSI_TMF_RSP_COMPLETE;
69}
70
71/*
72 * Called from iscsit_handle_task_mgt_cmd().
73 */
74int iscsit_tmr_task_warm_reset(
75 struct iscsi_conn *conn,
76 struct iscsi_tmr_req *tmr_req,
77 unsigned char *buf)
78{
79 struct iscsi_session *sess = conn->sess;
80 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
81#if 0
82 struct iscsi_init_task_mgt_cmnd *hdr =
83 (struct iscsi_init_task_mgt_cmnd *) buf;
84#endif
85 if (!na->tmr_warm_reset) {
86 pr_err("TMR Opcode TARGET_WARM_RESET authorization"
87 " failed for Initiator Node: %s\n",
88 sess->se_sess->se_node_acl->initiatorname);
89 return -1;
90 }
91 /*
92 * Do the real work in transport_generic_do_tmr().
93 */
94 return 0;
95}
96
97int iscsit_tmr_task_cold_reset(
98 struct iscsi_conn *conn,
99 struct iscsi_tmr_req *tmr_req,
100 unsigned char *buf)
101{
102 struct iscsi_session *sess = conn->sess;
103 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
104
105 if (!na->tmr_cold_reset) {
106 pr_err("TMR Opcode TARGET_COLD_RESET authorization"
107 " failed for Initiator Node: %s\n",
108 sess->se_sess->se_node_acl->initiatorname);
109 return -1;
110 }
111 /*
112 * Do the real work in transport_generic_do_tmr().
113 */
114 return 0;
115}
116
117u8 iscsit_tmr_task_reassign(
118 struct iscsi_cmd *cmd,
119 unsigned char *buf)
120{
121 struct iscsi_cmd *ref_cmd = NULL;
122 struct iscsi_conn *conn = cmd->conn;
123 struct iscsi_conn_recovery *cr = NULL;
124 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
125 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
126 struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
127 int ret;
128
129 pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
130 " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
131 hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
132
133 if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
134 pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
135 " ignoring request.\n");
136 return ISCSI_TMF_RSP_NOT_SUPPORTED;
137 }
138
139 ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
140 if (ret == -2) {
141 pr_err("Command ITT: 0x%08x is still alligent to CID:"
142 " %hu\n", ref_cmd->init_task_tag, cr->cid);
143 return ISCSI_TMF_RSP_TASK_ALLEGIANT;
144 } else if (ret == -1) {
145 pr_err("Unable to locate RefTaskTag: 0x%08x in"
146 " connection recovery command list.\n", hdr->rtt);
147 return ISCSI_TMF_RSP_NO_TASK;
148 }
149 /*
150 * Temporary check to prevent connection recovery for
151 * connections with a differing MaxRecvDataSegmentLength.
152 */
153 if (cr->maxrecvdatasegmentlength !=
154 conn->conn_ops->MaxRecvDataSegmentLength) {
155 pr_err("Unable to perform connection recovery for"
156 " differing MaxRecvDataSegmentLength, rejecting"
157 " TMR TASK_REASSIGN.\n");
158 return ISCSI_TMF_RSP_REJECTED;
159 }
160
161 se_tmr->ref_task_tag = hdr->rtt;
162 se_tmr->ref_cmd = &ref_cmd->se_cmd;
163 se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
164 tmr_req->ref_cmd_sn = hdr->refcmdsn;
165 tmr_req->exp_data_sn = hdr->exp_datasn;
166 tmr_req->conn_recovery = cr;
167 tmr_req->task_reassign = 1;
168 /*
169 * Command can now be reassigned to a new connection.
170 * The task management response must be sent before the
171 * reassignment actually happens. See iscsi_tmr_post_handler().
172 */
173 return ISCSI_TMF_RSP_COMPLETE;
174}
175
176static void iscsit_task_reassign_remove_cmd(
177 struct iscsi_cmd *cmd,
178 struct iscsi_conn_recovery *cr,
179 struct iscsi_session *sess)
180{
181 int ret;
182
183 spin_lock(&cr->conn_recovery_cmd_lock);
184 ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
185 spin_unlock(&cr->conn_recovery_cmd_lock);
186 if (!ret) {
187 pr_debug("iSCSI connection recovery successful for CID:"
188 " %hu on SID: %u\n", cr->cid, sess->sid);
189 iscsit_remove_active_connection_recovery_entry(cr, sess);
190 }
191}
192
193static int iscsit_task_reassign_complete_nop_out(
194 struct iscsi_tmr_req *tmr_req,
195 struct iscsi_conn *conn)
196{
197 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
198 struct se_cmd *se_cmd = se_tmr->ref_cmd;
199 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
200 struct iscsi_conn_recovery *cr;
201
202 if (!cmd->cr) {
203 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
204 " is NULL!\n", cmd->init_task_tag);
205 return -1;
206 }
207 cr = cmd->cr;
208
209 /*
210 * Reset the StatSN so a new one for this commands new connection
211 * will be assigned.
212 * Reset the ExpStatSN as well so we may receive Status SNACKs.
213 */
214 cmd->stat_sn = cmd->exp_stat_sn = 0;
215
216 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
217
218 spin_lock_bh(&conn->cmd_lock);
219 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
220 spin_unlock_bh(&conn->cmd_lock);
221
222 cmd->i_state = ISTATE_SEND_NOPIN;
223 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
224 return 0;
225}
226
227static int iscsit_task_reassign_complete_write(
228 struct iscsi_cmd *cmd,
229 struct iscsi_tmr_req *tmr_req)
230{
231 int no_build_r2ts = 0;
232 u32 length = 0, offset = 0;
233 struct iscsi_conn *conn = cmd->conn;
234 struct se_cmd *se_cmd = &cmd->se_cmd;
235 /*
236 * The Initiator must not send a R2T SNACK with a Begrun less than
237 * the TMR TASK_REASSIGN's ExpDataSN.
238 */
239 if (!tmr_req->exp_data_sn) {
240 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
241 cmd->acked_data_sn = 0;
242 } else {
243 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
244 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
245 }
246
247 /*
248 * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
249 * Initiator is expecting. The Target controls all WRITE operations
250 * so if we have received all DataOUT we can safety ignore Initiator.
251 */
252 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
253 if (!atomic_read(&cmd->transport_sent)) {
254 pr_debug("WRITE ITT: 0x%08x: t_state: %d"
255 " never sent to transport\n",
256 cmd->init_task_tag, cmd->se_cmd.t_state);
257 return transport_generic_handle_data(se_cmd);
258 }
259
260 cmd->i_state = ISTATE_SEND_STATUS;
261 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
262 return 0;
263 }
264
265 /*
266 * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
267 * Unsolicited DataOut.
268 */
269 if (cmd->unsolicited_data) {
270 cmd->unsolicited_data = 0;
271
272 offset = cmd->next_burst_len = cmd->write_data_done;
273
274 if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
275 cmd->data_length) {
276 no_build_r2ts = 1;
277 length = (cmd->data_length - offset);
278 } else
279 length = (conn->sess->sess_ops->FirstBurstLength - offset);
280
281 spin_lock_bh(&cmd->r2t_lock);
282 if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
283 spin_unlock_bh(&cmd->r2t_lock);
284 return -1;
285 }
286 cmd->outstanding_r2ts++;
287 spin_unlock_bh(&cmd->r2t_lock);
288
289 if (no_build_r2ts)
290 return 0;
291 }
292 /*
293 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
294 */
295 return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
296}
297
298static int iscsit_task_reassign_complete_read(
299 struct iscsi_cmd *cmd,
300 struct iscsi_tmr_req *tmr_req)
301{
302 struct iscsi_conn *conn = cmd->conn;
303 struct iscsi_datain_req *dr;
304 struct se_cmd *se_cmd = &cmd->se_cmd;
305 /*
306 * The Initiator must not send a Data SNACK with a BegRun less than
307 * the TMR TASK_REASSIGN's ExpDataSN.
308 */
309 if (!tmr_req->exp_data_sn) {
310 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
311 cmd->acked_data_sn = 0;
312 } else {
313 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
314 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
315 }
316
317 if (!atomic_read(&cmd->transport_sent)) {
318 pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
319 " transport\n", cmd->init_task_tag,
320 cmd->se_cmd.t_state);
321 transport_generic_handle_cdb(se_cmd);
322 return 0;
323 }
324
325 if (!atomic_read(&se_cmd->t_transport_complete)) {
326 pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
327 " from transport\n", cmd->init_task_tag,
328 cmd->se_cmd.t_state);
329 return -1;
330 }
331
332 dr = iscsit_allocate_datain_req();
333 if (!dr)
334 return -1;
335 /*
336 * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
337 * Initiator is expecting.
338 */
339 dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
340 dr->runlength = 0;
341 dr->generate_recovery_values = 1;
342 dr->recovery = DATAIN_CONNECTION_RECOVERY;
343
344 iscsit_attach_datain_req(cmd, dr);
345
346 cmd->i_state = ISTATE_SEND_DATAIN;
347 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
348 return 0;
349}
350
351static int iscsit_task_reassign_complete_none(
352 struct iscsi_cmd *cmd,
353 struct iscsi_tmr_req *tmr_req)
354{
355 struct iscsi_conn *conn = cmd->conn;
356
357 cmd->i_state = ISTATE_SEND_STATUS;
358 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
359 return 0;
360}
361
362static int iscsit_task_reassign_complete_scsi_cmnd(
363 struct iscsi_tmr_req *tmr_req,
364 struct iscsi_conn *conn)
365{
366 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
367 struct se_cmd *se_cmd = se_tmr->ref_cmd;
368 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
369 struct iscsi_conn_recovery *cr;
370
371 if (!cmd->cr) {
372 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
373 " is NULL!\n", cmd->init_task_tag);
374 return -1;
375 }
376 cr = cmd->cr;
377
378 /*
379 * Reset the StatSN so a new one for this commands new connection
380 * will be assigned.
381 * Reset the ExpStatSN as well so we may receive Status SNACKs.
382 */
383 cmd->stat_sn = cmd->exp_stat_sn = 0;
384
385 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
386
387 spin_lock_bh(&conn->cmd_lock);
388 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
389 spin_unlock_bh(&conn->cmd_lock);
390
391 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
392 cmd->i_state = ISTATE_SEND_STATUS;
393 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
394 return 0;
395 }
396
397 switch (cmd->data_direction) {
398 case DMA_TO_DEVICE:
399 return iscsit_task_reassign_complete_write(cmd, tmr_req);
400 case DMA_FROM_DEVICE:
401 return iscsit_task_reassign_complete_read(cmd, tmr_req);
402 case DMA_NONE:
403 return iscsit_task_reassign_complete_none(cmd, tmr_req);
404 default:
405 pr_err("Unknown cmd->data_direction: 0x%02x\n",
406 cmd->data_direction);
407 return -1;
408 }
409
410 return 0;
411}
412
413static int iscsit_task_reassign_complete(
414 struct iscsi_tmr_req *tmr_req,
415 struct iscsi_conn *conn)
416{
417 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
418 struct se_cmd *se_cmd;
419 struct iscsi_cmd *cmd;
420 int ret = 0;
421
422 if (!se_tmr->ref_cmd) {
423 pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
424 return -1;
425 }
426 se_cmd = se_tmr->ref_cmd;
427 cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
428
429 cmd->conn = conn;
430
431 switch (cmd->iscsi_opcode) {
432 case ISCSI_OP_NOOP_OUT:
433 ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
434 break;
435 case ISCSI_OP_SCSI_CMD:
436 ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
437 break;
438 default:
439 pr_err("Illegal iSCSI Opcode 0x%02x during"
440 " command realligence\n", cmd->iscsi_opcode);
441 return -1;
442 }
443
444 if (ret != 0)
445 return ret;
446
447 pr_debug("Completed connection realligence for Opcode: 0x%02x,"
448 " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
449 cmd->init_task_tag, conn->cid);
450
451 return 0;
452}
453
454/*
455 * Handles special after-the-fact actions related to TMRs.
456 * Right now the only one that its really needed for is
457 * connection recovery releated TASK_REASSIGN.
458 */
459extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
460{
461 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
462 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
463
464 if (tmr_req->task_reassign &&
465 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
466 return iscsit_task_reassign_complete(tmr_req, conn);
467
468 return 0;
469}
470
471/*
472 * Nothing to do here, but leave it for good measure. :-)
473 */
474int iscsit_task_reassign_prepare_read(
475 struct iscsi_tmr_req *tmr_req,
476 struct iscsi_conn *conn)
477{
478 return 0;
479}
480
481static void iscsit_task_reassign_prepare_unsolicited_dataout(
482 struct iscsi_cmd *cmd,
483 struct iscsi_conn *conn)
484{
485 int i, j;
486 struct iscsi_pdu *pdu = NULL;
487 struct iscsi_seq *seq = NULL;
488
489 if (conn->sess->sess_ops->DataSequenceInOrder) {
490 cmd->data_sn = 0;
491
492 if (cmd->immediate_data)
493 cmd->r2t_offset += (cmd->first_burst_len -
494 cmd->seq_start_offset);
495
496 if (conn->sess->sess_ops->DataPDUInOrder) {
497 cmd->write_data_done -= (cmd->immediate_data) ?
498 (cmd->first_burst_len -
499 cmd->seq_start_offset) :
500 cmd->first_burst_len;
501 cmd->first_burst_len = 0;
502 return;
503 }
504
505 for (i = 0; i < cmd->pdu_count; i++) {
506 pdu = &cmd->pdu_list[i];
507
508 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
509 continue;
510
511 if ((pdu->offset >= cmd->seq_start_offset) &&
512 ((pdu->offset + pdu->length) <=
513 cmd->seq_end_offset)) {
514 cmd->first_burst_len -= pdu->length;
515 cmd->write_data_done -= pdu->length;
516 pdu->status = ISCSI_PDU_NOT_RECEIVED;
517 }
518 }
519 } else {
520 for (i = 0; i < cmd->seq_count; i++) {
521 seq = &cmd->seq_list[i];
522
523 if (seq->type != SEQTYPE_UNSOLICITED)
524 continue;
525
526 cmd->write_data_done -=
527 (seq->offset - seq->orig_offset);
528 cmd->first_burst_len = 0;
529 seq->data_sn = 0;
530 seq->offset = seq->orig_offset;
531 seq->next_burst_len = 0;
532 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
533
534 if (conn->sess->sess_ops->DataPDUInOrder)
535 continue;
536
537 for (j = 0; j < seq->pdu_count; j++) {
538 pdu = &cmd->pdu_list[j+seq->pdu_start];
539
540 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
541 continue;
542
543 pdu->status = ISCSI_PDU_NOT_RECEIVED;
544 }
545 }
546 }
547}
548
549int iscsit_task_reassign_prepare_write(
550 struct iscsi_tmr_req *tmr_req,
551 struct iscsi_conn *conn)
552{
553 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
554 struct se_cmd *se_cmd = se_tmr->ref_cmd;
555 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
556 struct iscsi_pdu *pdu = NULL;
557 struct iscsi_r2t *r2t = NULL, *r2t_tmp;
558 int first_incomplete_r2t = 1, i = 0;
559
560 /*
561 * The command was in the process of receiving Unsolicited DataOUT when
562 * the connection failed.
563 */
564 if (cmd->unsolicited_data)
565 iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
566
567 /*
568 * The Initiator is requesting R2Ts starting from zero, skip
569 * checking acknowledged R2Ts and start checking struct iscsi_r2ts
570 * greater than zero.
571 */
572 if (!tmr_req->exp_data_sn)
573 goto drop_unacknowledged_r2ts;
574
575 /*
576 * We now check that the PDUs in DataOUT sequences below
577 * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
578 * expecting next) have all the DataOUT they require to complete
579 * the DataOUT sequence. First scan from R2TSN 0 to TMR
580 * TASK_REASSIGN ExpDataSN-1.
581 *
582 * If we have not received all DataOUT in question, we must
583 * make sure to make the appropriate changes to values in
584 * struct iscsi_cmd (and elsewhere depending on session parameters)
585 * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
586 * will resend a new R2T for the DataOUT sequences in question.
587 */
588 spin_lock_bh(&cmd->r2t_lock);
589 if (list_empty(&cmd->cmd_r2t_list)) {
590 spin_unlock_bh(&cmd->r2t_lock);
591 return -1;
592 }
593
594 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
595
596 if (r2t->r2t_sn >= tmr_req->exp_data_sn)
597 continue;
598 /*
599 * Safely ignore Recovery R2Ts and R2Ts that have completed
600 * DataOUT sequences.
601 */
602 if (r2t->seq_complete)
603 continue;
604
605 if (r2t->recovery_r2t)
606 continue;
607
608 /*
609 * DataSequenceInOrder=Yes:
610 *
611 * Taking into account the iSCSI implementation requirement of
612 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
613 * DataSequenceInOrder=Yes, we must take into consideration
614 * the following:
615 *
616 * DataSequenceInOrder=No:
617 *
618 * Taking into account that the Initiator controls the (possibly
619 * random) PDU Order in (possibly random) Sequence Order of
620 * DataOUT the target requests with R2Ts, we must take into
621 * consideration the following:
622 *
623 * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
624 *
625 * While processing non-complete R2T DataOUT sequence requests
626 * the Target will re-request only the total sequence length
627 * minus current received offset. This is because we must
628 * assume the initiator will continue sending DataOUT from the
629 * last PDU before the connection failed.
630 *
631 * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
632 *
633 * While processing non-complete R2T DataOUT sequence requests
634 * the Target will re-request the entire DataOUT sequence if
635 * any single PDU is missing from the sequence. This is because
636 * we have no logical method to determine the next PDU offset,
637 * and we must assume the Initiator will be sending any random
638 * PDU offset in the current sequence after TASK_REASSIGN
639 * has completed.
640 */
641 if (conn->sess->sess_ops->DataSequenceInOrder) {
642 if (!first_incomplete_r2t) {
643 cmd->r2t_offset -= r2t->xfer_len;
644 goto next;
645 }
646
647 if (conn->sess->sess_ops->DataPDUInOrder) {
648 cmd->data_sn = 0;
649 cmd->r2t_offset -= (r2t->xfer_len -
650 cmd->next_burst_len);
651 first_incomplete_r2t = 0;
652 goto next;
653 }
654
655 cmd->data_sn = 0;
656 cmd->r2t_offset -= r2t->xfer_len;
657
658 for (i = 0; i < cmd->pdu_count; i++) {
659 pdu = &cmd->pdu_list[i];
660
661 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
662 continue;
663
664 if ((pdu->offset >= r2t->offset) &&
665 (pdu->offset < (r2t->offset +
666 r2t->xfer_len))) {
667 cmd->next_burst_len -= pdu->length;
668 cmd->write_data_done -= pdu->length;
669 pdu->status = ISCSI_PDU_NOT_RECEIVED;
670 }
671 }
672
673 first_incomplete_r2t = 0;
674 } else {
675 struct iscsi_seq *seq;
676
677 seq = iscsit_get_seq_holder(cmd, r2t->offset,
678 r2t->xfer_len);
679 if (!seq) {
680 spin_unlock_bh(&cmd->r2t_lock);
681 return -1;
682 }
683
684 cmd->write_data_done -=
685 (seq->offset - seq->orig_offset);
686 seq->data_sn = 0;
687 seq->offset = seq->orig_offset;
688 seq->next_burst_len = 0;
689 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
690
691 cmd->seq_send_order--;
692
693 if (conn->sess->sess_ops->DataPDUInOrder)
694 goto next;
695
696 for (i = 0; i < seq->pdu_count; i++) {
697 pdu = &cmd->pdu_list[i+seq->pdu_start];
698
699 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
700 continue;
701
702 pdu->status = ISCSI_PDU_NOT_RECEIVED;
703 }
704 }
705
706next:
707 cmd->outstanding_r2ts--;
708 }
709 spin_unlock_bh(&cmd->r2t_lock);
710
711 /*
712 * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
713 * TASK_REASSIGN to the last R2T in the list.. We are also careful
714 * to check that the Initiator is not requesting R2Ts for DataOUT
715 * sequences it has already completed.
716 *
717 * Free each R2T in question and adjust values in struct iscsi_cmd
718 * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
719 * the work after the TMR TASK_REASSIGN Response is sent.
720 */
721drop_unacknowledged_r2ts:
722
723 cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
724 cmd->r2t_sn = tmr_req->exp_data_sn;
725
726 spin_lock_bh(&cmd->r2t_lock);
727 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
728 /*
729 * Skip up to the R2T Sequence number provided by the
730 * iSCSI TASK_REASSIGN TMR
731 */
732 if (r2t->r2t_sn < tmr_req->exp_data_sn)
733 continue;
734
735 if (r2t->seq_complete) {
736 pr_err("Initiator is requesting R2Ts from"
737 " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
738 " Length: %u is already complete."
739 " BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
740 tmr_req->exp_data_sn, r2t->r2t_sn,
741 r2t->offset, r2t->xfer_len);
742 spin_unlock_bh(&cmd->r2t_lock);
743 return -1;
744 }
745
746 if (r2t->recovery_r2t) {
747 iscsit_free_r2t(r2t, cmd);
748 continue;
749 }
750
751 /* DataSequenceInOrder=Yes:
752 *
753 * Taking into account the iSCSI implementation requirement of
754 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
755 * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
756 * entire transfer length from the commands R2T offset marker.
757 *
758 * DataSequenceInOrder=No:
759 *
760 * We subtract the difference from struct iscsi_seq between the
761 * current offset and original offset from cmd->write_data_done
762 * for account for DataOUT PDUs already received. Then reset
763 * the current offset to the original and zero out the current
764 * burst length, to make sure we re-request the entire DataOUT
765 * sequence.
766 */
767 if (conn->sess->sess_ops->DataSequenceInOrder)
768 cmd->r2t_offset -= r2t->xfer_len;
769 else
770 cmd->seq_send_order--;
771
772 cmd->outstanding_r2ts--;
773 iscsit_free_r2t(r2t, cmd);
774 }
775 spin_unlock_bh(&cmd->r2t_lock);
776
777 return 0;
778}
779
780/*
781 * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
782 * a given struct iscsi_cmd.
783 */
784int iscsit_check_task_reassign_expdatasn(
785 struct iscsi_tmr_req *tmr_req,
786 struct iscsi_conn *conn)
787{
788 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
789 struct se_cmd *se_cmd = se_tmr->ref_cmd;
790 struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
791
792 if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
793 return 0;
794
795 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
796 return 0;
797
798 if (ref_cmd->data_direction == DMA_NONE)
799 return 0;
800
801 /*
802 * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
803 * of DataIN the Initiator is expecting.
804 *
805 * Also check that the Initiator is not re-requesting DataIN that has
806 * already been acknowledged with a DataAck SNACK.
807 */
808 if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
809 if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
810 pr_err("Received ExpDataSN: 0x%08x for READ"
811 " in TMR TASK_REASSIGN greater than command's"
812 " DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
813 ref_cmd->data_sn);
814 return -1;
815 }
816 if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
817 (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
818 pr_err("Received ExpDataSN: 0x%08x for READ"
819 " in TMR TASK_REASSIGN for previously"
820 " acknowledged DataIN: 0x%08x,"
821 " protocol error\n", tmr_req->exp_data_sn,
822 ref_cmd->acked_data_sn);
823 return -1;
824 }
825 return iscsit_task_reassign_prepare_read(tmr_req, conn);
826 }
827
828 /*
829 * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
830 * for R2Ts the Initiator is expecting.
831 *
832 * Do the magic in iscsit_task_reassign_prepare_write().
833 */
834 if (ref_cmd->data_direction == DMA_TO_DEVICE) {
835 if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
836 pr_err("Received ExpDataSN: 0x%08x for WRITE"
837 " in TMR TASK_REASSIGN greater than command's"
838 " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
839 ref_cmd->r2t_sn);
840 return -1;
841 }
842 return iscsit_task_reassign_prepare_write(tmr_req, conn);
843 }
844
845 pr_err("Unknown iSCSI data_direction: 0x%02x\n",
846 ref_cmd->data_direction);
847
848 return -1;
849}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 000000000000..142e992cb097
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
1#ifndef ISCSI_TARGET_TMR_H
2#define ISCSI_TARGET_TMR_H
3
4extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
5extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
6 unsigned char *);
7extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
8 unsigned char *);
9extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
10extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
11extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
12 struct iscsi_conn *);
13
14#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 000000000000..d4cf2cd25c44
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,759 @@
1/*******************************************************************************
2 * This file contains iSCSI Target Portal Group related functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <target/target_core_base.h>
22#include <target/target_core_transport.h>
23#include <target/target_core_fabric_ops.h>
24#include <target/target_core_configfs.h>
25#include <target/target_core_tpg.h>
26
27#include "iscsi_target_core.h"
28#include "iscsi_target_erl0.h"
29#include "iscsi_target_login.h"
30#include "iscsi_target_nodeattrib.h"
31#include "iscsi_target_tpg.h"
32#include "iscsi_target_util.h"
33#include "iscsi_target.h"
34#include "iscsi_target_parameters.h"
35
36struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
37{
38 struct iscsi_portal_group *tpg;
39
40 tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
41 if (!tpg) {
42 pr_err("Unable to allocate struct iscsi_portal_group\n");
43 return NULL;
44 }
45
46 tpg->tpgt = tpgt;
47 tpg->tpg_state = TPG_STATE_FREE;
48 tpg->tpg_tiqn = tiqn;
49 INIT_LIST_HEAD(&tpg->tpg_gnp_list);
50 INIT_LIST_HEAD(&tpg->tpg_list);
51 mutex_init(&tpg->tpg_access_lock);
52 mutex_init(&tpg->np_login_lock);
53 spin_lock_init(&tpg->tpg_state_lock);
54 spin_lock_init(&tpg->tpg_np_lock);
55
56 return tpg;
57}
58
59static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
60
61int iscsit_load_discovery_tpg(void)
62{
63 struct iscsi_param *param;
64 struct iscsi_portal_group *tpg;
65 int ret;
66
67 tpg = iscsit_alloc_portal_group(NULL, 1);
68 if (!tpg) {
69 pr_err("Unable to allocate struct iscsi_portal_group\n");
70 return -1;
71 }
72
73 ret = core_tpg_register(
74 &lio_target_fabric_configfs->tf_ops,
75 NULL, &tpg->tpg_se_tpg, (void *)tpg,
76 TRANSPORT_TPG_TYPE_DISCOVERY);
77 if (ret < 0) {
78 kfree(tpg);
79 return -1;
80 }
81
82 tpg->sid = 1; /* First Assigned LIO Session ID */
83 iscsit_set_default_tpg_attribs(tpg);
84
85 if (iscsi_create_default_params(&tpg->param_list) < 0)
86 goto out;
87 /*
88 * By default we disable authentication for discovery sessions,
89 * this can be changed with:
90 *
91 * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
92 */
93 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
94 if (!param)
95 goto out;
96
97 if (iscsi_update_param_value(param, "CHAP,None") < 0)
98 goto out;
99
100 tpg->tpg_attrib.authentication = 0;
101
102 spin_lock(&tpg->tpg_state_lock);
103 tpg->tpg_state = TPG_STATE_ACTIVE;
104 spin_unlock(&tpg->tpg_state_lock);
105
106 iscsit_global->discovery_tpg = tpg;
107 pr_debug("CORE[0] - Allocated Discovery TPG\n");
108
109 return 0;
110out:
111 if (tpg->sid == 1)
112 core_tpg_deregister(&tpg->tpg_se_tpg);
113 kfree(tpg);
114 return -1;
115}
116
117void iscsit_release_discovery_tpg(void)
118{
119 struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
120
121 if (!tpg)
122 return;
123
124 core_tpg_deregister(&tpg->tpg_se_tpg);
125
126 kfree(tpg);
127 iscsit_global->discovery_tpg = NULL;
128}
129
130struct iscsi_portal_group *iscsit_get_tpg_from_np(
131 struct iscsi_tiqn *tiqn,
132 struct iscsi_np *np)
133{
134 struct iscsi_portal_group *tpg = NULL;
135 struct iscsi_tpg_np *tpg_np;
136
137 spin_lock(&tiqn->tiqn_tpg_lock);
138 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
139
140 spin_lock(&tpg->tpg_state_lock);
141 if (tpg->tpg_state == TPG_STATE_FREE) {
142 spin_unlock(&tpg->tpg_state_lock);
143 continue;
144 }
145 spin_unlock(&tpg->tpg_state_lock);
146
147 spin_lock(&tpg->tpg_np_lock);
148 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
149 if (tpg_np->tpg_np == np) {
150 spin_unlock(&tpg->tpg_np_lock);
151 spin_unlock(&tiqn->tiqn_tpg_lock);
152 return tpg;
153 }
154 }
155 spin_unlock(&tpg->tpg_np_lock);
156 }
157 spin_unlock(&tiqn->tiqn_tpg_lock);
158
159 return NULL;
160}
161
162int iscsit_get_tpg(
163 struct iscsi_portal_group *tpg)
164{
165 int ret;
166
167 ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
168 return ((ret != 0) || signal_pending(current)) ? -1 : 0;
169}
170
171void iscsit_put_tpg(struct iscsi_portal_group *tpg)
172{
173 mutex_unlock(&tpg->tpg_access_lock);
174}
175
176static void iscsit_clear_tpg_np_login_thread(
177 struct iscsi_tpg_np *tpg_np,
178 struct iscsi_portal_group *tpg)
179{
180 if (!tpg_np->tpg_np) {
181 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
182 return;
183 }
184
185 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
186}
187
188void iscsit_clear_tpg_np_login_threads(
189 struct iscsi_portal_group *tpg)
190{
191 struct iscsi_tpg_np *tpg_np;
192
193 spin_lock(&tpg->tpg_np_lock);
194 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
195 if (!tpg_np->tpg_np) {
196 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
197 continue;
198 }
199 spin_unlock(&tpg->tpg_np_lock);
200 iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
201 spin_lock(&tpg->tpg_np_lock);
202 }
203 spin_unlock(&tpg->tpg_np_lock);
204}
205
206void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
207{
208 iscsi_print_params(tpg->param_list);
209}
210
211static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
212{
213 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
214
215 a->authentication = TA_AUTHENTICATION;
216 a->login_timeout = TA_LOGIN_TIMEOUT;
217 a->netif_timeout = TA_NETIF_TIMEOUT;
218 a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
219 a->generate_node_acls = TA_GENERATE_NODE_ACLS;
220 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
221 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
222 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
223}
224
225int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
226{
227 if (tpg->tpg_state != TPG_STATE_FREE) {
228 pr_err("Unable to add iSCSI Target Portal Group: %d"
229 " while not in TPG_STATE_FREE state.\n", tpg->tpgt);
230 return -EEXIST;
231 }
232 iscsit_set_default_tpg_attribs(tpg);
233
234 if (iscsi_create_default_params(&tpg->param_list) < 0)
235 goto err_out;
236
237 ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
238
239 spin_lock(&tpg->tpg_state_lock);
240 tpg->tpg_state = TPG_STATE_INACTIVE;
241 spin_unlock(&tpg->tpg_state_lock);
242
243 spin_lock(&tiqn->tiqn_tpg_lock);
244 list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
245 tiqn->tiqn_ntpgs++;
246 pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
247 tiqn->tiqn, tpg->tpgt);
248 spin_unlock(&tiqn->tiqn_tpg_lock);
249
250 return 0;
251err_out:
252 if (tpg->param_list) {
253 iscsi_release_param_list(tpg->param_list);
254 tpg->param_list = NULL;
255 }
256 kfree(tpg);
257 return -ENOMEM;
258}
259
260int iscsit_tpg_del_portal_group(
261 struct iscsi_tiqn *tiqn,
262 struct iscsi_portal_group *tpg,
263 int force)
264{
265 u8 old_state = tpg->tpg_state;
266
267 spin_lock(&tpg->tpg_state_lock);
268 tpg->tpg_state = TPG_STATE_INACTIVE;
269 spin_unlock(&tpg->tpg_state_lock);
270
271 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
272 pr_err("Unable to delete iSCSI Target Portal Group:"
273 " %hu while active sessions exist, and force=0\n",
274 tpg->tpgt);
275 tpg->tpg_state = old_state;
276 return -EPERM;
277 }
278
279 core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
280
281 if (tpg->param_list) {
282 iscsi_release_param_list(tpg->param_list);
283 tpg->param_list = NULL;
284 }
285
286 core_tpg_deregister(&tpg->tpg_se_tpg);
287
288 spin_lock(&tpg->tpg_state_lock);
289 tpg->tpg_state = TPG_STATE_FREE;
290 spin_unlock(&tpg->tpg_state_lock);
291
292 spin_lock(&tiqn->tiqn_tpg_lock);
293 tiqn->tiqn_ntpgs--;
294 list_del(&tpg->tpg_list);
295 spin_unlock(&tiqn->tiqn_tpg_lock);
296
297 pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
298 tiqn->tiqn, tpg->tpgt);
299
300 kfree(tpg);
301 return 0;
302}
303
304int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
305{
306 struct iscsi_param *param;
307 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
308
309 spin_lock(&tpg->tpg_state_lock);
310 if (tpg->tpg_state == TPG_STATE_ACTIVE) {
311 pr_err("iSCSI target portal group: %hu is already"
312 " active, ignoring request.\n", tpg->tpgt);
313 spin_unlock(&tpg->tpg_state_lock);
314 return -EINVAL;
315 }
316 /*
317 * Make sure that AuthMethod does not contain None as an option
318 * unless explictly disabled. Set the default to CHAP if authentication
319 * is enforced (as per default), and remove the NONE option.
320 */
321 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
322 if (!param) {
323 spin_unlock(&tpg->tpg_state_lock);
324 return -ENOMEM;
325 }
326
327 if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
328 if (!strcmp(param->value, NONE))
329 if (iscsi_update_param_value(param, CHAP) < 0) {
330 spin_unlock(&tpg->tpg_state_lock);
331 return -ENOMEM;
332 }
333 if (iscsit_ta_authentication(tpg, 1) < 0) {
334 spin_unlock(&tpg->tpg_state_lock);
335 return -ENOMEM;
336 }
337 }
338
339 tpg->tpg_state = TPG_STATE_ACTIVE;
340 spin_unlock(&tpg->tpg_state_lock);
341
342 spin_lock(&tiqn->tiqn_tpg_lock);
343 tiqn->tiqn_active_tpgs++;
344 pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
345 tpg->tpgt);
346 spin_unlock(&tiqn->tiqn_tpg_lock);
347
348 return 0;
349}
350
351int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
352{
353 struct iscsi_tiqn *tiqn;
354 u8 old_state = tpg->tpg_state;
355
356 spin_lock(&tpg->tpg_state_lock);
357 if (tpg->tpg_state == TPG_STATE_INACTIVE) {
358 pr_err("iSCSI Target Portal Group: %hu is already"
359 " inactive, ignoring request.\n", tpg->tpgt);
360 spin_unlock(&tpg->tpg_state_lock);
361 return -EINVAL;
362 }
363 tpg->tpg_state = TPG_STATE_INACTIVE;
364 spin_unlock(&tpg->tpg_state_lock);
365
366 iscsit_clear_tpg_np_login_threads(tpg);
367
368 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
369 spin_lock(&tpg->tpg_state_lock);
370 tpg->tpg_state = old_state;
371 spin_unlock(&tpg->tpg_state_lock);
372 pr_err("Unable to disable iSCSI Target Portal Group:"
373 " %hu while active sessions exist, and force=0\n",
374 tpg->tpgt);
375 return -EPERM;
376 }
377
378 tiqn = tpg->tpg_tiqn;
379 if (!tiqn || (tpg == iscsit_global->discovery_tpg))
380 return 0;
381
382 spin_lock(&tiqn->tiqn_tpg_lock);
383 tiqn->tiqn_active_tpgs--;
384 pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
385 tpg->tpgt);
386 spin_unlock(&tiqn->tiqn_tpg_lock);
387
388 return 0;
389}
390
391struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
392 struct iscsi_session *sess)
393{
394 struct se_session *se_sess = sess->se_sess;
395 struct se_node_acl *se_nacl = se_sess->se_node_acl;
396 struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
397 se_node_acl);
398
399 return &acl->node_attrib;
400}
401
402struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
403 struct iscsi_tpg_np *tpg_np,
404 int network_transport)
405{
406 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
407
408 spin_lock(&tpg_np->tpg_np_parent_lock);
409 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
410 &tpg_np->tpg_np_parent_list, tpg_np_child_list) {
411 if (tpg_np_child->tpg_np->np_network_transport ==
412 network_transport) {
413 spin_unlock(&tpg_np->tpg_np_parent_lock);
414 return tpg_np_child;
415 }
416 }
417 spin_unlock(&tpg_np->tpg_np_parent_lock);
418
419 return NULL;
420}
421
422struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
423 struct iscsi_portal_group *tpg,
424 struct __kernel_sockaddr_storage *sockaddr,
425 char *ip_str,
426 struct iscsi_tpg_np *tpg_np_parent,
427 int network_transport)
428{
429 struct iscsi_np *np;
430 struct iscsi_tpg_np *tpg_np;
431
432 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
433 if (!tpg_np) {
434 pr_err("Unable to allocate memory for"
435 " struct iscsi_tpg_np.\n");
436 return ERR_PTR(-ENOMEM);
437 }
438
439 np = iscsit_add_np(sockaddr, ip_str, network_transport);
440 if (IS_ERR(np)) {
441 kfree(tpg_np);
442 return ERR_CAST(np);
443 }
444
445 INIT_LIST_HEAD(&tpg_np->tpg_np_list);
446 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
447 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
448 spin_lock_init(&tpg_np->tpg_np_parent_lock);
449 tpg_np->tpg_np = np;
450 tpg_np->tpg = tpg;
451
452 spin_lock(&tpg->tpg_np_lock);
453 list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
454 tpg->num_tpg_nps++;
455 if (tpg->tpg_tiqn)
456 tpg->tpg_tiqn->tiqn_num_tpg_nps++;
457 spin_unlock(&tpg->tpg_np_lock);
458
459 if (tpg_np_parent) {
460 tpg_np->tpg_np_parent = tpg_np_parent;
461 spin_lock(&tpg_np_parent->tpg_np_parent_lock);
462 list_add_tail(&tpg_np->tpg_np_child_list,
463 &tpg_np_parent->tpg_np_parent_list);
464 spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
465 }
466
467 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
468 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
469 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
470
471 return tpg_np;
472}
473
474static int iscsit_tpg_release_np(
475 struct iscsi_tpg_np *tpg_np,
476 struct iscsi_portal_group *tpg,
477 struct iscsi_np *np)
478{
479 iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
480
481 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
482 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
483 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
484
485 tpg_np->tpg_np = NULL;
486 tpg_np->tpg = NULL;
487 kfree(tpg_np);
488 /*
489 * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
490 */
491 return iscsit_del_np(np);
492}
493
494int iscsit_tpg_del_network_portal(
495 struct iscsi_portal_group *tpg,
496 struct iscsi_tpg_np *tpg_np)
497{
498 struct iscsi_np *np;
499 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
500 int ret = 0;
501
502 np = tpg_np->tpg_np;
503 if (!np) {
504 pr_err("Unable to locate struct iscsi_np from"
505 " struct iscsi_tpg_np\n");
506 return -EINVAL;
507 }
508
509 if (!tpg_np->tpg_np_parent) {
510 /*
511 * We are the parent tpg network portal. Release all of the
512 * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
513 * list first.
514 */
515 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
516 &tpg_np->tpg_np_parent_list,
517 tpg_np_child_list) {
518 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
519 if (ret < 0)
520 pr_err("iscsit_tpg_del_network_portal()"
521 " failed: %d\n", ret);
522 }
523 } else {
524 /*
525 * We are not the parent ISCSI_TCP tpg network portal. Release
526 * our own network portals from the child list.
527 */
528 spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
529 list_del(&tpg_np->tpg_np_child_list);
530 spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
531 }
532
533 spin_lock(&tpg->tpg_np_lock);
534 list_del(&tpg_np->tpg_np_list);
535 tpg->num_tpg_nps--;
536 if (tpg->tpg_tiqn)
537 tpg->tpg_tiqn->tiqn_num_tpg_nps--;
538 spin_unlock(&tpg->tpg_np_lock);
539
540 return iscsit_tpg_release_np(tpg_np, tpg, np);
541}
542
543int iscsit_tpg_set_initiator_node_queue_depth(
544 struct iscsi_portal_group *tpg,
545 unsigned char *initiatorname,
546 u32 queue_depth,
547 int force)
548{
549 return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
550 initiatorname, queue_depth, force);
551}
552
553int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
554{
555 unsigned char buf1[256], buf2[256], *none = NULL;
556 int len;
557 struct iscsi_param *param;
558 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
559
560 if ((authentication != 1) && (authentication != 0)) {
561 pr_err("Illegal value for authentication parameter:"
562 " %u, ignoring request.\n", authentication);
563 return -1;
564 }
565
566 memset(buf1, 0, sizeof(buf1));
567 memset(buf2, 0, sizeof(buf2));
568
569 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
570 if (!param)
571 return -EINVAL;
572
573 if (authentication) {
574 snprintf(buf1, sizeof(buf1), "%s", param->value);
575 none = strstr(buf1, NONE);
576 if (!none)
577 goto out;
578 if (!strncmp(none + 4, ",", 1)) {
579 if (!strcmp(buf1, none))
580 sprintf(buf2, "%s", none+5);
581 else {
582 none--;
583 *none = '\0';
584 len = sprintf(buf2, "%s", buf1);
585 none += 5;
586 sprintf(buf2 + len, "%s", none);
587 }
588 } else {
589 none--;
590 *none = '\0';
591 sprintf(buf2, "%s", buf1);
592 }
593 if (iscsi_update_param_value(param, buf2) < 0)
594 return -EINVAL;
595 } else {
596 snprintf(buf1, sizeof(buf1), "%s", param->value);
597 none = strstr(buf1, NONE);
598 if ((none))
599 goto out;
600 strncat(buf1, ",", strlen(","));
601 strncat(buf1, NONE, strlen(NONE));
602 if (iscsi_update_param_value(param, buf1) < 0)
603 return -EINVAL;
604 }
605
606out:
607 a->authentication = authentication;
608 pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
609 a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
610
611 return 0;
612}
613
614int iscsit_ta_login_timeout(
615 struct iscsi_portal_group *tpg,
616 u32 login_timeout)
617{
618 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
619
620 if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
621 pr_err("Requested Login Timeout %u larger than maximum"
622 " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
623 return -EINVAL;
624 } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
625 pr_err("Requested Logout Timeout %u smaller than"
626 " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
627 return -EINVAL;
628 }
629
630 a->login_timeout = login_timeout;
631 pr_debug("Set Logout Timeout to %u for Target Portal Group"
632 " %hu\n", a->login_timeout, tpg->tpgt);
633
634 return 0;
635}
636
637int iscsit_ta_netif_timeout(
638 struct iscsi_portal_group *tpg,
639 u32 netif_timeout)
640{
641 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
642
643 if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
644 pr_err("Requested Network Interface Timeout %u larger"
645 " than maximum %u\n", netif_timeout,
646 TA_NETIF_TIMEOUT_MAX);
647 return -EINVAL;
648 } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
649 pr_err("Requested Network Interface Timeout %u smaller"
650 " than minimum %u\n", netif_timeout,
651 TA_NETIF_TIMEOUT_MIN);
652 return -EINVAL;
653 }
654
655 a->netif_timeout = netif_timeout;
656 pr_debug("Set Network Interface Timeout to %u for"
657 " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
658
659 return 0;
660}
661
662int iscsit_ta_generate_node_acls(
663 struct iscsi_portal_group *tpg,
664 u32 flag)
665{
666 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
667
668 if ((flag != 0) && (flag != 1)) {
669 pr_err("Illegal value %d\n", flag);
670 return -EINVAL;
671 }
672
673 a->generate_node_acls = flag;
674 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
675 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
676
677 return 0;
678}
679
680int iscsit_ta_default_cmdsn_depth(
681 struct iscsi_portal_group *tpg,
682 u32 tcq_depth)
683{
684 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
685
686 if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
687 pr_err("Requested Default Queue Depth: %u larger"
688 " than maximum %u\n", tcq_depth,
689 TA_DEFAULT_CMDSN_DEPTH_MAX);
690 return -EINVAL;
691 } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
692 pr_err("Requested Default Queue Depth: %u smaller"
693 " than minimum %u\n", tcq_depth,
694 TA_DEFAULT_CMDSN_DEPTH_MIN);
695 return -EINVAL;
696 }
697
698 a->default_cmdsn_depth = tcq_depth;
699 pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
700 tpg->tpgt, a->default_cmdsn_depth);
701
702 return 0;
703}
704
705int iscsit_ta_cache_dynamic_acls(
706 struct iscsi_portal_group *tpg,
707 u32 flag)
708{
709 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
710
711 if ((flag != 0) && (flag != 1)) {
712 pr_err("Illegal value %d\n", flag);
713 return -EINVAL;
714 }
715
716 a->cache_dynamic_acls = flag;
717 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
718 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
719 "Enabled" : "Disabled");
720
721 return 0;
722}
723
724int iscsit_ta_demo_mode_write_protect(
725 struct iscsi_portal_group *tpg,
726 u32 flag)
727{
728 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
729
730 if ((flag != 0) && (flag != 1)) {
731 pr_err("Illegal value %d\n", flag);
732 return -EINVAL;
733 }
734
735 a->demo_mode_write_protect = flag;
736 pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
737 tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
738
739 return 0;
740}
741
742int iscsit_ta_prod_mode_write_protect(
743 struct iscsi_portal_group *tpg,
744 u32 flag)
745{
746 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
747
748 if ((flag != 0) && (flag != 1)) {
749 pr_err("Illegal value %d\n", flag);
750 return -EINVAL;
751 }
752
753 a->prod_mode_write_protect = flag;
754 pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
755 " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
756 "ON" : "OFF");
757
758 return 0;
759}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 000000000000..dda48c141a8c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,41 @@
1#ifndef ISCSI_TARGET_TPG_H
2#define ISCSI_TARGET_TPG_H
3
4extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
5extern int iscsit_load_discovery_tpg(void);
6extern void iscsit_release_discovery_tpg(void);
7extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
8 struct iscsi_np *);
9extern int iscsit_get_tpg(struct iscsi_portal_group *);
10extern void iscsit_put_tpg(struct iscsi_portal_group *);
11extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
12extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
13extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
14extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
15 int);
16extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
17extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
18extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
19 struct iscsi_portal_group *, const char *, u32);
20extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
21 struct se_node_acl *);
22extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
23extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
24extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
25extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
26 struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
27 int);
28extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
29 struct iscsi_tpg_np *);
30extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
31 unsigned char *, u32, int);
32extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
33extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
34extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
35extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
36extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
37extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
38extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
39extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
40
41#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
new file mode 100644
index 000000000000..0baac5bcebd4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -0,0 +1,551 @@
1/*******************************************************************************
2 * This file contains the iSCSI Login Thread and Thread Queue functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/kthread.h>
22#include <linux/list.h>
23#include <linux/bitmap.h>
24
25#include "iscsi_target_core.h"
26#include "iscsi_target_tq.h"
27#include "iscsi_target.h"
28
29static LIST_HEAD(active_ts_list);
30static LIST_HEAD(inactive_ts_list);
31static DEFINE_SPINLOCK(active_ts_lock);
32static DEFINE_SPINLOCK(inactive_ts_lock);
33static DEFINE_SPINLOCK(ts_bitmap_lock);
34
35static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
36{
37 spin_lock(&active_ts_lock);
38 list_add_tail(&ts->ts_list, &active_ts_list);
39 iscsit_global->active_ts++;
40 spin_unlock(&active_ts_lock);
41}
42
43extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
44{
45 spin_lock(&inactive_ts_lock);
46 list_add_tail(&ts->ts_list, &inactive_ts_list);
47 iscsit_global->inactive_ts++;
48 spin_unlock(&inactive_ts_lock);
49}
50
51static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
52{
53 spin_lock(&active_ts_lock);
54 list_del(&ts->ts_list);
55 iscsit_global->active_ts--;
56 spin_unlock(&active_ts_lock);
57}
58
59static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
60{
61 struct iscsi_thread_set *ts;
62
63 spin_lock(&inactive_ts_lock);
64 if (list_empty(&inactive_ts_list)) {
65 spin_unlock(&inactive_ts_lock);
66 return NULL;
67 }
68
69 list_for_each_entry(ts, &inactive_ts_list, ts_list)
70 break;
71
72 list_del(&ts->ts_list);
73 iscsit_global->inactive_ts--;
74 spin_unlock(&inactive_ts_lock);
75
76 return ts;
77}
78
79extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
80{
81 int allocated_thread_pair_count = 0, i, thread_id;
82 struct iscsi_thread_set *ts = NULL;
83
84 for (i = 0; i < thread_pair_count; i++) {
85 ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
86 if (!ts) {
87 pr_err("Unable to allocate memory for"
88 " thread set.\n");
89 return allocated_thread_pair_count;
90 }
91 /*
92 * Locate the next available regision in the thread_set_bitmap
93 */
94 spin_lock(&ts_bitmap_lock);
95 thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
96 iscsit_global->ts_bitmap_count, get_order(1));
97 spin_unlock(&ts_bitmap_lock);
98 if (thread_id < 0) {
99 pr_err("bitmap_find_free_region() failed for"
100 " thread_set_bitmap\n");
101 kfree(ts);
102 return allocated_thread_pair_count;
103 }
104
105 ts->thread_id = thread_id;
106 ts->status = ISCSI_THREAD_SET_FREE;
107 INIT_LIST_HEAD(&ts->ts_list);
108 spin_lock_init(&ts->ts_state_lock);
109 init_completion(&ts->rx_post_start_comp);
110 init_completion(&ts->tx_post_start_comp);
111 init_completion(&ts->rx_restart_comp);
112 init_completion(&ts->tx_restart_comp);
113 init_completion(&ts->rx_start_comp);
114 init_completion(&ts->tx_start_comp);
115
116 ts->create_threads = 1;
117 ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
118 ISCSI_TX_THREAD_NAME);
119 if (IS_ERR(ts->tx_thread)) {
120 dump_stack();
121 pr_err("Unable to start iscsi_target_tx_thread\n");
122 break;
123 }
124
125 ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
126 ISCSI_RX_THREAD_NAME);
127 if (IS_ERR(ts->rx_thread)) {
128 kthread_stop(ts->tx_thread);
129 pr_err("Unable to start iscsi_target_rx_thread\n");
130 break;
131 }
132 ts->create_threads = 0;
133
134 iscsi_add_ts_to_inactive_list(ts);
135 allocated_thread_pair_count++;
136 }
137
138 pr_debug("Spawned %d thread set(s) (%d total threads).\n",
139 allocated_thread_pair_count, allocated_thread_pair_count * 2);
140 return allocated_thread_pair_count;
141}
142
143extern void iscsi_deallocate_thread_sets(void)
144{
145 u32 released_count = 0;
146 struct iscsi_thread_set *ts = NULL;
147
148 while ((ts = iscsi_get_ts_from_inactive_list())) {
149
150 spin_lock_bh(&ts->ts_state_lock);
151 ts->status = ISCSI_THREAD_SET_DIE;
152 spin_unlock_bh(&ts->ts_state_lock);
153
154 if (ts->rx_thread) {
155 send_sig(SIGINT, ts->rx_thread, 1);
156 kthread_stop(ts->rx_thread);
157 }
158 if (ts->tx_thread) {
159 send_sig(SIGINT, ts->tx_thread, 1);
160 kthread_stop(ts->tx_thread);
161 }
162 /*
163 * Release this thread_id in the thread_set_bitmap
164 */
165 spin_lock(&ts_bitmap_lock);
166 bitmap_release_region(iscsit_global->ts_bitmap,
167 ts->thread_id, get_order(1));
168 spin_unlock(&ts_bitmap_lock);
169
170 released_count++;
171 kfree(ts);
172 }
173
174 if (released_count)
175 pr_debug("Stopped %d thread set(s) (%d total threads)."
176 "\n", released_count, released_count * 2);
177}
178
179static void iscsi_deallocate_extra_thread_sets(void)
180{
181 u32 orig_count, released_count = 0;
182 struct iscsi_thread_set *ts = NULL;
183
184 orig_count = TARGET_THREAD_SET_COUNT;
185
186 while ((iscsit_global->inactive_ts + 1) > orig_count) {
187 ts = iscsi_get_ts_from_inactive_list();
188 if (!ts)
189 break;
190
191 spin_lock_bh(&ts->ts_state_lock);
192 ts->status = ISCSI_THREAD_SET_DIE;
193 spin_unlock_bh(&ts->ts_state_lock);
194
195 if (ts->rx_thread) {
196 send_sig(SIGINT, ts->rx_thread, 1);
197 kthread_stop(ts->rx_thread);
198 }
199 if (ts->tx_thread) {
200 send_sig(SIGINT, ts->tx_thread, 1);
201 kthread_stop(ts->tx_thread);
202 }
203 /*
204 * Release this thread_id in the thread_set_bitmap
205 */
206 spin_lock(&ts_bitmap_lock);
207 bitmap_release_region(iscsit_global->ts_bitmap,
208 ts->thread_id, get_order(1));
209 spin_unlock(&ts_bitmap_lock);
210
211 released_count++;
212 kfree(ts);
213 }
214
215 if (released_count) {
216 pr_debug("Stopped %d thread set(s) (%d total threads)."
217 "\n", released_count, released_count * 2);
218 }
219}
220
221void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
222{
223 iscsi_add_ts_to_active_list(ts);
224
225 spin_lock_bh(&ts->ts_state_lock);
226 conn->thread_set = ts;
227 ts->conn = conn;
228 spin_unlock_bh(&ts->ts_state_lock);
229 /*
230 * Start up the RX thread and wait on rx_post_start_comp. The RX
231 * Thread will then do the same for the TX Thread in
232 * iscsi_rx_thread_pre_handler().
233 */
234 complete(&ts->rx_start_comp);
235 wait_for_completion(&ts->rx_post_start_comp);
236}
237
238struct iscsi_thread_set *iscsi_get_thread_set(void)
239{
240 int allocate_ts = 0;
241 struct completion comp;
242 struct iscsi_thread_set *ts = NULL;
243 /*
244 * If no inactive thread set is available on the first call to
245 * iscsi_get_ts_from_inactive_list(), sleep for a second and
246 * try again. If still none are available after two attempts,
247 * allocate a set ourselves.
248 */
249get_set:
250 ts = iscsi_get_ts_from_inactive_list();
251 if (!ts) {
252 if (allocate_ts == 2)
253 iscsi_allocate_thread_sets(1);
254
255 init_completion(&comp);
256 wait_for_completion_timeout(&comp, 1 * HZ);
257
258 allocate_ts++;
259 goto get_set;
260 }
261
262 ts->delay_inactive = 1;
263 ts->signal_sent = 0;
264 ts->thread_count = 2;
265 init_completion(&ts->rx_restart_comp);
266 init_completion(&ts->tx_restart_comp);
267
268 return ts;
269}
270
271void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
272{
273 struct iscsi_thread_set *ts = NULL;
274
275 if (!conn->thread_set) {
276 pr_err("struct iscsi_conn->thread_set is NULL\n");
277 return;
278 }
279 ts = conn->thread_set;
280
281 spin_lock_bh(&ts->ts_state_lock);
282 ts->thread_clear &= ~thread_clear;
283
284 if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
285 (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
286 complete(&ts->rx_restart_comp);
287 else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
288 (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
289 complete(&ts->tx_restart_comp);
290 spin_unlock_bh(&ts->ts_state_lock);
291}
292
293void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
294{
295 struct iscsi_thread_set *ts = NULL;
296
297 if (!conn->thread_set) {
298 pr_err("struct iscsi_conn->thread_set is NULL\n");
299 return;
300 }
301 ts = conn->thread_set;
302
303 spin_lock_bh(&ts->ts_state_lock);
304 ts->signal_sent |= signal_sent;
305 spin_unlock_bh(&ts->ts_state_lock);
306}
307
308int iscsi_release_thread_set(struct iscsi_conn *conn)
309{
310 int thread_called = 0;
311 struct iscsi_thread_set *ts = NULL;
312
313 if (!conn || !conn->thread_set) {
314 pr_err("connection or thread set pointer is NULL\n");
315 BUG();
316 }
317 ts = conn->thread_set;
318
319 spin_lock_bh(&ts->ts_state_lock);
320 ts->status = ISCSI_THREAD_SET_RESET;
321
322 if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
323 strlen(ISCSI_RX_THREAD_NAME)))
324 thread_called = ISCSI_RX_THREAD;
325 else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
326 strlen(ISCSI_TX_THREAD_NAME)))
327 thread_called = ISCSI_TX_THREAD;
328
329 if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
330 (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
331
332 if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
333 send_sig(SIGINT, ts->rx_thread, 1);
334 ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
335 }
336 ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
337 spin_unlock_bh(&ts->ts_state_lock);
338 wait_for_completion(&ts->rx_restart_comp);
339 spin_lock_bh(&ts->ts_state_lock);
340 ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
341 }
342 if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
343 (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
344
345 if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
346 send_sig(SIGINT, ts->tx_thread, 1);
347 ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
348 }
349 ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
350 spin_unlock_bh(&ts->ts_state_lock);
351 wait_for_completion(&ts->tx_restart_comp);
352 spin_lock_bh(&ts->ts_state_lock);
353 ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
354 }
355
356 ts->conn = NULL;
357 ts->status = ISCSI_THREAD_SET_FREE;
358 spin_unlock_bh(&ts->ts_state_lock);
359
360 return 0;
361}
362
363int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
364{
365 struct iscsi_thread_set *ts;
366
367 if (!conn->thread_set)
368 return -1;
369 ts = conn->thread_set;
370
371 spin_lock_bh(&ts->ts_state_lock);
372 if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
373 spin_unlock_bh(&ts->ts_state_lock);
374 return -1;
375 }
376
377 if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
378 send_sig(SIGINT, ts->tx_thread, 1);
379 ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
380 }
381 if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
382 send_sig(SIGINT, ts->rx_thread, 1);
383 ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
384 }
385 spin_unlock_bh(&ts->ts_state_lock);
386
387 return 0;
388}
389
390static void iscsi_check_to_add_additional_sets(void)
391{
392 int thread_sets_add;
393
394 spin_lock(&inactive_ts_lock);
395 thread_sets_add = iscsit_global->inactive_ts;
396 spin_unlock(&inactive_ts_lock);
397 if (thread_sets_add == 1)
398 iscsi_allocate_thread_sets(1);
399}
400
401static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
402{
403 spin_lock_bh(&ts->ts_state_lock);
404 if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
405 spin_unlock_bh(&ts->ts_state_lock);
406 return -1;
407 }
408 spin_unlock_bh(&ts->ts_state_lock);
409
410 return 0;
411}
412
413struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
414{
415 int ret;
416
417 spin_lock_bh(&ts->ts_state_lock);
418 if (ts->create_threads) {
419 spin_unlock_bh(&ts->ts_state_lock);
420 goto sleep;
421 }
422
423 flush_signals(current);
424
425 if (ts->delay_inactive && (--ts->thread_count == 0)) {
426 spin_unlock_bh(&ts->ts_state_lock);
427 iscsi_del_ts_from_active_list(ts);
428
429 if (!iscsit_global->in_shutdown)
430 iscsi_deallocate_extra_thread_sets();
431
432 iscsi_add_ts_to_inactive_list(ts);
433 spin_lock_bh(&ts->ts_state_lock);
434 }
435
436 if ((ts->status == ISCSI_THREAD_SET_RESET) &&
437 (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
438 complete(&ts->rx_restart_comp);
439
440 ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
441 spin_unlock_bh(&ts->ts_state_lock);
442sleep:
443 ret = wait_for_completion_interruptible(&ts->rx_start_comp);
444 if (ret != 0)
445 return NULL;
446
447 if (iscsi_signal_thread_pre_handler(ts) < 0)
448 return NULL;
449
450 if (!ts->conn) {
451 pr_err("struct iscsi_thread_set->conn is NULL for"
452 " thread_id: %d, going back to sleep\n", ts->thread_id);
453 goto sleep;
454 }
455 iscsi_check_to_add_additional_sets();
456 /*
457 * The RX Thread starts up the TX Thread and sleeps.
458 */
459 ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
460 complete(&ts->tx_start_comp);
461 wait_for_completion(&ts->tx_post_start_comp);
462
463 return ts->conn;
464}
465
466struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
467{
468 int ret;
469
470 spin_lock_bh(&ts->ts_state_lock);
471 if (ts->create_threads) {
472 spin_unlock_bh(&ts->ts_state_lock);
473 goto sleep;
474 }
475
476 flush_signals(current);
477
478 if (ts->delay_inactive && (--ts->thread_count == 0)) {
479 spin_unlock_bh(&ts->ts_state_lock);
480 iscsi_del_ts_from_active_list(ts);
481
482 if (!iscsit_global->in_shutdown)
483 iscsi_deallocate_extra_thread_sets();
484
485 iscsi_add_ts_to_inactive_list(ts);
486 spin_lock_bh(&ts->ts_state_lock);
487 }
488 if ((ts->status == ISCSI_THREAD_SET_RESET) &&
489 (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
490 complete(&ts->tx_restart_comp);
491
492 ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
493 spin_unlock_bh(&ts->ts_state_lock);
494sleep:
495 ret = wait_for_completion_interruptible(&ts->tx_start_comp);
496 if (ret != 0)
497 return NULL;
498
499 if (iscsi_signal_thread_pre_handler(ts) < 0)
500 return NULL;
501
502 if (!ts->conn) {
503 pr_err("struct iscsi_thread_set->conn is NULL for "
504 " thread_id: %d, going back to sleep\n",
505 ts->thread_id);
506 goto sleep;
507 }
508
509 iscsi_check_to_add_additional_sets();
510 /*
511 * From the TX thread, up the tx_post_start_comp that the RX Thread is
512 * sleeping on in iscsi_rx_thread_pre_handler(), then up the
513 * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
514 */
515 ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
516 complete(&ts->tx_post_start_comp);
517 complete(&ts->rx_post_start_comp);
518
519 spin_lock_bh(&ts->ts_state_lock);
520 ts->status = ISCSI_THREAD_SET_ACTIVE;
521 spin_unlock_bh(&ts->ts_state_lock);
522
523 return ts->conn;
524}
525
526int iscsi_thread_set_init(void)
527{
528 int size;
529
530 iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
531
532 size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
533 iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
534 if (!iscsit_global->ts_bitmap) {
535 pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
536 return -ENOMEM;
537 }
538
539 spin_lock_init(&active_ts_lock);
540 spin_lock_init(&inactive_ts_lock);
541 spin_lock_init(&ts_bitmap_lock);
542 INIT_LIST_HEAD(&active_ts_list);
543 INIT_LIST_HEAD(&inactive_ts_list);
544
545 return 0;
546}
547
548void iscsi_thread_set_free(void)
549{
550 kfree(iscsit_global->ts_bitmap);
551}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
new file mode 100644
index 000000000000..26e6a95ec203
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -0,0 +1,88 @@
1#ifndef ISCSI_THREAD_QUEUE_H
2#define ISCSI_THREAD_QUEUE_H
3
4/*
5 * Defines for thread sets.
6 */
7extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
8extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
9extern int iscsi_allocate_thread_sets(u32);
10extern void iscsi_deallocate_thread_sets(void);
11extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
12extern struct iscsi_thread_set *iscsi_get_thread_set(void);
13extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
14extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
15extern int iscsi_release_thread_set(struct iscsi_conn *);
16extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
17extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
18extern int iscsi_thread_set_init(void);
19extern void iscsi_thread_set_free(void);
20
21extern int iscsi_target_tx_thread(void *);
22extern int iscsi_target_rx_thread(void *);
23
24#define TARGET_THREAD_SET_COUNT 4
25
26#define ISCSI_RX_THREAD 1
27#define ISCSI_TX_THREAD 2
28#define ISCSI_RX_THREAD_NAME "iscsi_trx"
29#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
30#define ISCSI_BLOCK_RX_THREAD 0x1
31#define ISCSI_BLOCK_TX_THREAD 0x2
32#define ISCSI_CLEAR_RX_THREAD 0x1
33#define ISCSI_CLEAR_TX_THREAD 0x2
34#define ISCSI_SIGNAL_RX_THREAD 0x1
35#define ISCSI_SIGNAL_TX_THREAD 0x2
36
37/* struct iscsi_thread_set->status */
38#define ISCSI_THREAD_SET_FREE 1
39#define ISCSI_THREAD_SET_ACTIVE 2
40#define ISCSI_THREAD_SET_DIE 3
41#define ISCSI_THREAD_SET_RESET 4
42#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
43
44/* By default allow a maximum of 32K iSCSI connections */
45#define ISCSI_TS_BITMAP_BITS 32768
46
47struct iscsi_thread_set {
48 /* flags used for blocking and restarting sets */
49 int blocked_threads;
50 /* flag for creating threads */
51 int create_threads;
52 /* flag for delaying readding to inactive list */
53 int delay_inactive;
54 /* status for thread set */
55 int status;
56 /* which threads have had signals sent */
57 int signal_sent;
58 /* flag for which threads exited first */
59 int thread_clear;
60 /* Active threads in the thread set */
61 int thread_count;
62 /* Unique thread ID */
63 u32 thread_id;
64 /* pointer to connection if set is active */
65 struct iscsi_conn *conn;
66 /* used for controlling ts state accesses */
67 spinlock_t ts_state_lock;
68 /* Used for rx side post startup */
69 struct completion rx_post_start_comp;
70 /* Used for tx side post startup */
71 struct completion tx_post_start_comp;
72 /* used for restarting thread queue */
73 struct completion rx_restart_comp;
74 /* used for restarting thread queue */
75 struct completion tx_restart_comp;
76 /* used for normal unused blocking */
77 struct completion rx_start_comp;
78 /* used for normal unused blocking */
79 struct completion tx_start_comp;
80 /* OS descriptor for rx thread */
81 struct task_struct *rx_thread;
82 /* OS descriptor for tx thread */
83 struct task_struct *tx_thread;
84 /* struct iscsi_thread_set in list list head*/
85 struct list_head ts_list;
86};
87
88#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 000000000000..a1acb0167902
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1819 @@
1/*******************************************************************************
2 * This file contains the iSCSI Target specific utility functions.
3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 ******************************************************************************/
20
21#include <linux/list.h>
22#include <scsi/scsi_tcq.h>
23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h>
25#include <target/target_core_transport.h>
26#include <target/target_core_tmr.h>
27#include <target/target_core_fabric_ops.h>
28#include <target/target_core_configfs.h>
29
30#include "iscsi_target_core.h"
31#include "iscsi_target_parameters.h"
32#include "iscsi_target_seq_pdu_list.h"
33#include "iscsi_target_datain_values.h"
34#include "iscsi_target_erl0.h"
35#include "iscsi_target_erl1.h"
36#include "iscsi_target_erl2.h"
37#include "iscsi_target_tpg.h"
38#include "iscsi_target_tq.h"
39#include "iscsi_target_util.h"
40#include "iscsi_target.h"
41
42#define PRINT_BUFF(buff, len) \
43{ \
44 int zzz; \
45 \
46 pr_debug("%d:\n", __LINE__); \
47 for (zzz = 0; zzz < len; zzz++) { \
48 if (zzz % 16 == 0) { \
49 if (zzz) \
50 pr_debug("\n"); \
51 pr_debug("%4i: ", zzz); \
52 } \
53 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
54 } \
55 if ((len + 1) % 16) \
56 pr_debug("\n"); \
57}
58
59extern struct list_head g_tiqn_list;
60extern spinlock_t tiqn_lock;
61
62/*
63 * Called with cmd->r2t_lock held.
64 */
65int iscsit_add_r2t_to_list(
66 struct iscsi_cmd *cmd,
67 u32 offset,
68 u32 xfer_len,
69 int recovery,
70 u32 r2t_sn)
71{
72 struct iscsi_r2t *r2t;
73
74 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
75 if (!r2t) {
76 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
77 return -1;
78 }
79 INIT_LIST_HEAD(&r2t->r2t_list);
80
81 r2t->recovery_r2t = recovery;
82 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
83 r2t->offset = offset;
84 r2t->xfer_len = xfer_len;
85 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
86 spin_unlock_bh(&cmd->r2t_lock);
87
88 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
89
90 spin_lock_bh(&cmd->r2t_lock);
91 return 0;
92}
93
94struct iscsi_r2t *iscsit_get_r2t_for_eos(
95 struct iscsi_cmd *cmd,
96 u32 offset,
97 u32 length)
98{
99 struct iscsi_r2t *r2t;
100
101 spin_lock_bh(&cmd->r2t_lock);
102 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
103 if ((r2t->offset <= offset) &&
104 (r2t->offset + r2t->xfer_len) >= (offset + length)) {
105 spin_unlock_bh(&cmd->r2t_lock);
106 return r2t;
107 }
108 }
109 spin_unlock_bh(&cmd->r2t_lock);
110
111 pr_err("Unable to locate R2T for Offset: %u, Length:"
112 " %u\n", offset, length);
113 return NULL;
114}
115
116struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
117{
118 struct iscsi_r2t *r2t;
119
120 spin_lock_bh(&cmd->r2t_lock);
121 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
122 if (!r2t->sent_r2t) {
123 spin_unlock_bh(&cmd->r2t_lock);
124 return r2t;
125 }
126 }
127 spin_unlock_bh(&cmd->r2t_lock);
128
129 pr_err("Unable to locate next R2T to send for ITT:"
130 " 0x%08x.\n", cmd->init_task_tag);
131 return NULL;
132}
133
134/*
135 * Called with cmd->r2t_lock held.
136 */
137void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
138{
139 list_del(&r2t->r2t_list);
140 kmem_cache_free(lio_r2t_cache, r2t);
141}
142
143void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
144{
145 struct iscsi_r2t *r2t, *r2t_tmp;
146
147 spin_lock_bh(&cmd->r2t_lock);
148 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
149 iscsit_free_r2t(r2t, cmd);
150 spin_unlock_bh(&cmd->r2t_lock);
151}
152
153/*
154 * May be called from software interrupt (timer) context for allocating
155 * iSCSI NopINs.
156 */
157struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
158{
159 struct iscsi_cmd *cmd;
160
161 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
162 if (!cmd) {
163 pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
164 return NULL;
165 }
166
167 cmd->conn = conn;
168 INIT_LIST_HEAD(&cmd->i_list);
169 INIT_LIST_HEAD(&cmd->datain_list);
170 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
171 init_completion(&cmd->reject_comp);
172 spin_lock_init(&cmd->datain_lock);
173 spin_lock_init(&cmd->dataout_timeout_lock);
174 spin_lock_init(&cmd->istate_lock);
175 spin_lock_init(&cmd->error_lock);
176 spin_lock_init(&cmd->r2t_lock);
177
178 return cmd;
179}
180
181/*
182 * Called from iscsi_handle_scsi_cmd()
183 */
184struct iscsi_cmd *iscsit_allocate_se_cmd(
185 struct iscsi_conn *conn,
186 u32 data_length,
187 int data_direction,
188 int iscsi_task_attr)
189{
190 struct iscsi_cmd *cmd;
191 struct se_cmd *se_cmd;
192 int sam_task_attr;
193
194 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
195 if (!cmd)
196 return NULL;
197
198 cmd->data_direction = data_direction;
199 cmd->data_length = data_length;
200 /*
201 * Figure out the SAM Task Attribute for the incoming SCSI CDB
202 */
203 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
204 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
205 sam_task_attr = MSG_SIMPLE_TAG;
206 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
207 sam_task_attr = MSG_ORDERED_TAG;
208 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
209 sam_task_attr = MSG_HEAD_TAG;
210 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
211 sam_task_attr = MSG_ACA_TAG;
212 else {
213 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
214 " MSG_SIMPLE_TAG\n", iscsi_task_attr);
215 sam_task_attr = MSG_SIMPLE_TAG;
216 }
217
218 se_cmd = &cmd->se_cmd;
219 /*
220 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
221 */
222 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
223 conn->sess->se_sess, data_length, data_direction,
224 sam_task_attr, &cmd->sense_buffer[0]);
225 return cmd;
226}
227
228struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
229 struct iscsi_conn *conn,
230 u8 function)
231{
232 struct iscsi_cmd *cmd;
233 struct se_cmd *se_cmd;
234 u8 tcm_function;
235
236 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
237 if (!cmd)
238 return NULL;
239
240 cmd->data_direction = DMA_NONE;
241
242 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
243 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for"
245 " Task Management command!\n");
246 return NULL;
247 }
248 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of
250 * LIO-Target $FABRIC_MOD
251 */
252 if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
253 return cmd;
254
255 se_cmd = &cmd->se_cmd;
256 /*
257 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
258 */
259 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
260 conn->sess->se_sess, 0, DMA_NONE,
261 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
262
263 switch (function) {
264 case ISCSI_TM_FUNC_ABORT_TASK:
265 tcm_function = TMR_ABORT_TASK;
266 break;
267 case ISCSI_TM_FUNC_ABORT_TASK_SET:
268 tcm_function = TMR_ABORT_TASK_SET;
269 break;
270 case ISCSI_TM_FUNC_CLEAR_ACA:
271 tcm_function = TMR_CLEAR_ACA;
272 break;
273 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
274 tcm_function = TMR_CLEAR_TASK_SET;
275 break;
276 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
277 tcm_function = TMR_LUN_RESET;
278 break;
279 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
280 tcm_function = TMR_TARGET_WARM_RESET;
281 break;
282 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
283 tcm_function = TMR_TARGET_COLD_RESET;
284 break;
285 default:
286 pr_err("Unknown iSCSI TMR Function:"
287 " 0x%02x\n", function);
288 goto out;
289 }
290
291 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
292 (void *)cmd->tmr_req, tcm_function);
293 if (!se_cmd->se_tmr_req)
294 goto out;
295
296 cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
297
298 return cmd;
299out:
300 iscsit_release_cmd(cmd);
301 if (se_cmd)
302 transport_free_se_cmd(se_cmd);
303 return NULL;
304}
305
306int iscsit_decide_list_to_build(
307 struct iscsi_cmd *cmd,
308 u32 immediate_data_length)
309{
310 struct iscsi_build_list bl;
311 struct iscsi_conn *conn = cmd->conn;
312 struct iscsi_session *sess = conn->sess;
313 struct iscsi_node_attrib *na;
314
315 if (sess->sess_ops->DataSequenceInOrder &&
316 sess->sess_ops->DataPDUInOrder)
317 return 0;
318
319 if (cmd->data_direction == DMA_NONE)
320 return 0;
321
322 na = iscsit_tpg_get_node_attrib(sess);
323 memset(&bl, 0, sizeof(struct iscsi_build_list));
324
325 if (cmd->data_direction == DMA_FROM_DEVICE) {
326 bl.data_direction = ISCSI_PDU_READ;
327 bl.type = PDULIST_NORMAL;
328 if (na->random_datain_pdu_offsets)
329 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
330 if (na->random_datain_seq_offsets)
331 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
332 } else {
333 bl.data_direction = ISCSI_PDU_WRITE;
334 bl.immediate_data_length = immediate_data_length;
335 if (na->random_r2t_offsets)
336 bl.randomize |= RANDOM_R2T_OFFSETS;
337
338 if (!cmd->immediate_data && !cmd->unsolicited_data)
339 bl.type = PDULIST_NORMAL;
340 else if (cmd->immediate_data && !cmd->unsolicited_data)
341 bl.type = PDULIST_IMMEDIATE;
342 else if (!cmd->immediate_data && cmd->unsolicited_data)
343 bl.type = PDULIST_UNSOLICITED;
344 else if (cmd->immediate_data && cmd->unsolicited_data)
345 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
346 }
347
348 return iscsit_do_build_list(cmd, &bl);
349}
350
351struct iscsi_seq *iscsit_get_seq_holder_for_datain(
352 struct iscsi_cmd *cmd,
353 u32 seq_send_order)
354{
355 u32 i;
356
357 for (i = 0; i < cmd->seq_count; i++)
358 if (cmd->seq_list[i].seq_send_order == seq_send_order)
359 return &cmd->seq_list[i];
360
361 return NULL;
362}
363
364struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
365{
366 u32 i;
367
368 if (!cmd->seq_list) {
369 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
370 return NULL;
371 }
372
373 for (i = 0; i < cmd->seq_count; i++) {
374 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
375 continue;
376 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
377 cmd->seq_send_order++;
378 return &cmd->seq_list[i];
379 }
380 }
381
382 return NULL;
383}
384
385struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
386 struct iscsi_cmd *cmd,
387 u32 r2t_sn)
388{
389 struct iscsi_r2t *r2t;
390
391 spin_lock_bh(&cmd->r2t_lock);
392 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
393 if (r2t->r2t_sn == r2t_sn) {
394 spin_unlock_bh(&cmd->r2t_lock);
395 return r2t;
396 }
397 }
398 spin_unlock_bh(&cmd->r2t_lock);
399
400 return NULL;
401}
402
403static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
404{
405 int ret;
406
407 /*
408 * This is the proper method of checking received CmdSN against
409 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
410 * or order CmdSNs due to multiple connection sessions and/or
411 * CRC failures.
412 */
413 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
414 pr_err("Received CmdSN: 0x%08x is greater than"
415 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
416 sess->max_cmd_sn);
417 ret = CMDSN_ERROR_CANNOT_RECOVER;
418
419 } else if (cmdsn == sess->exp_cmd_sn) {
420 sess->exp_cmd_sn++;
421 pr_debug("Received CmdSN matches ExpCmdSN,"
422 " incremented ExpCmdSN to: 0x%08x\n",
423 sess->exp_cmd_sn);
424 ret = CMDSN_NORMAL_OPERATION;
425
426 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
427 pr_debug("Received CmdSN: 0x%08x is greater"
428 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
429 cmdsn, sess->exp_cmd_sn);
430 ret = CMDSN_HIGHER_THAN_EXP;
431
432 } else {
433 pr_err("Received CmdSN: 0x%08x is less than"
434 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
435 sess->exp_cmd_sn);
436 ret = CMDSN_LOWER_THAN_EXP;
437 }
438
439 return ret;
440}
441
442/*
443 * Commands may be received out of order if MC/S is in use.
444 * Ensure they are executed in CmdSN order.
445 */
446int iscsit_sequence_cmd(
447 struct iscsi_conn *conn,
448 struct iscsi_cmd *cmd,
449 u32 cmdsn)
450{
451 int ret;
452 int cmdsn_ret;
453
454 mutex_lock(&conn->sess->cmdsn_mutex);
455
456 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
457 switch (cmdsn_ret) {
458 case CMDSN_NORMAL_OPERATION:
459 ret = iscsit_execute_cmd(cmd, 0);
460 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
461 iscsit_execute_ooo_cmdsns(conn->sess);
462 break;
463 case CMDSN_HIGHER_THAN_EXP:
464 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
465 break;
466 case CMDSN_LOWER_THAN_EXP:
467 cmd->i_state = ISTATE_REMOVE;
468 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
469 ret = cmdsn_ret;
470 break;
471 default:
472 ret = cmdsn_ret;
473 break;
474 }
475 mutex_unlock(&conn->sess->cmdsn_mutex);
476
477 return ret;
478}
479
480int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
481{
482 struct iscsi_conn *conn = cmd->conn;
483 struct se_cmd *se_cmd = &cmd->se_cmd;
484 struct iscsi_data *hdr = (struct iscsi_data *) buf;
485 u32 payload_length = ntoh24(hdr->dlength);
486
487 if (conn->sess->sess_ops->InitialR2T) {
488 pr_err("Received unexpected unsolicited data"
489 " while InitialR2T=Yes, protocol error.\n");
490 transport_send_check_condition_and_sense(se_cmd,
491 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
492 return -1;
493 }
494
495 if ((cmd->first_burst_len + payload_length) >
496 conn->sess->sess_ops->FirstBurstLength) {
497 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
498 " for this Unsolicited DataOut Burst.\n",
499 (cmd->first_burst_len + payload_length),
500 conn->sess->sess_ops->FirstBurstLength);
501 transport_send_check_condition_and_sense(se_cmd,
502 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
503 return -1;
504 }
505
506 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
507 return 0;
508
509 if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
510 ((cmd->first_burst_len + payload_length) !=
511 conn->sess->sess_ops->FirstBurstLength)) {
512 pr_err("Unsolicited non-immediate data received %u"
513 " does not equal FirstBurstLength: %u, and does"
514 " not equal ExpXferLen %u.\n",
515 (cmd->first_burst_len + payload_length),
516 conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
517 transport_send_check_condition_and_sense(se_cmd,
518 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
519 return -1;
520 }
521 return 0;
522}
523
524struct iscsi_cmd *iscsit_find_cmd_from_itt(
525 struct iscsi_conn *conn,
526 u32 init_task_tag)
527{
528 struct iscsi_cmd *cmd;
529
530 spin_lock_bh(&conn->cmd_lock);
531 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
532 if (cmd->init_task_tag == init_task_tag) {
533 spin_unlock_bh(&conn->cmd_lock);
534 return cmd;
535 }
536 }
537 spin_unlock_bh(&conn->cmd_lock);
538
539 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
540 init_task_tag, conn->cid);
541 return NULL;
542}
543
544struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
545 struct iscsi_conn *conn,
546 u32 init_task_tag,
547 u32 length)
548{
549 struct iscsi_cmd *cmd;
550
551 spin_lock_bh(&conn->cmd_lock);
552 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
553 if (cmd->init_task_tag == init_task_tag) {
554 spin_unlock_bh(&conn->cmd_lock);
555 return cmd;
556 }
557 }
558 spin_unlock_bh(&conn->cmd_lock);
559
560 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
561 " dumping payload\n", init_task_tag, conn->cid);
562 if (length)
563 iscsit_dump_data_payload(conn, length, 1);
564
565 return NULL;
566}
567
568struct iscsi_cmd *iscsit_find_cmd_from_ttt(
569 struct iscsi_conn *conn,
570 u32 targ_xfer_tag)
571{
572 struct iscsi_cmd *cmd = NULL;
573
574 spin_lock_bh(&conn->cmd_lock);
575 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
576 if (cmd->targ_xfer_tag == targ_xfer_tag) {
577 spin_unlock_bh(&conn->cmd_lock);
578 return cmd;
579 }
580 }
581 spin_unlock_bh(&conn->cmd_lock);
582
583 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
584 targ_xfer_tag, conn->cid);
585 return NULL;
586}
587
588int iscsit_find_cmd_for_recovery(
589 struct iscsi_session *sess,
590 struct iscsi_cmd **cmd_ptr,
591 struct iscsi_conn_recovery **cr_ptr,
592 u32 init_task_tag)
593{
594 struct iscsi_cmd *cmd = NULL;
595 struct iscsi_conn_recovery *cr;
596 /*
597 * Scan through the inactive connection recovery list's command list.
598 * If init_task_tag matches the command is still alligent.
599 */
600 spin_lock(&sess->cr_i_lock);
601 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
602 spin_lock(&cr->conn_recovery_cmd_lock);
603 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
604 if (cmd->init_task_tag == init_task_tag) {
605 spin_unlock(&cr->conn_recovery_cmd_lock);
606 spin_unlock(&sess->cr_i_lock);
607
608 *cr_ptr = cr;
609 *cmd_ptr = cmd;
610 return -2;
611 }
612 }
613 spin_unlock(&cr->conn_recovery_cmd_lock);
614 }
615 spin_unlock(&sess->cr_i_lock);
616 /*
617 * Scan through the active connection recovery list's command list.
618 * If init_task_tag matches the command is ready to be reassigned.
619 */
620 spin_lock(&sess->cr_a_lock);
621 list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
622 spin_lock(&cr->conn_recovery_cmd_lock);
623 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
624 if (cmd->init_task_tag == init_task_tag) {
625 spin_unlock(&cr->conn_recovery_cmd_lock);
626 spin_unlock(&sess->cr_a_lock);
627
628 *cr_ptr = cr;
629 *cmd_ptr = cmd;
630 return 0;
631 }
632 }
633 spin_unlock(&cr->conn_recovery_cmd_lock);
634 }
635 spin_unlock(&sess->cr_a_lock);
636
637 return -1;
638}
639
640void iscsit_add_cmd_to_immediate_queue(
641 struct iscsi_cmd *cmd,
642 struct iscsi_conn *conn,
643 u8 state)
644{
645 struct iscsi_queue_req *qr;
646
647 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
648 if (!qr) {
649 pr_err("Unable to allocate memory for"
650 " struct iscsi_queue_req\n");
651 return;
652 }
653 INIT_LIST_HEAD(&qr->qr_list);
654 qr->cmd = cmd;
655 qr->state = state;
656
657 spin_lock_bh(&conn->immed_queue_lock);
658 list_add_tail(&qr->qr_list, &conn->immed_queue_list);
659 atomic_inc(&cmd->immed_queue_count);
660 atomic_set(&conn->check_immediate_queue, 1);
661 spin_unlock_bh(&conn->immed_queue_lock);
662
663 wake_up_process(conn->thread_set->tx_thread);
664}
665
666struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
667{
668 struct iscsi_queue_req *qr;
669
670 spin_lock_bh(&conn->immed_queue_lock);
671 if (list_empty(&conn->immed_queue_list)) {
672 spin_unlock_bh(&conn->immed_queue_lock);
673 return NULL;
674 }
675 list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
676 break;
677
678 list_del(&qr->qr_list);
679 if (qr->cmd)
680 atomic_dec(&qr->cmd->immed_queue_count);
681 spin_unlock_bh(&conn->immed_queue_lock);
682
683 return qr;
684}
685
686static void iscsit_remove_cmd_from_immediate_queue(
687 struct iscsi_cmd *cmd,
688 struct iscsi_conn *conn)
689{
690 struct iscsi_queue_req *qr, *qr_tmp;
691
692 spin_lock_bh(&conn->immed_queue_lock);
693 if (!atomic_read(&cmd->immed_queue_count)) {
694 spin_unlock_bh(&conn->immed_queue_lock);
695 return;
696 }
697
698 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
699 if (qr->cmd != cmd)
700 continue;
701
702 atomic_dec(&qr->cmd->immed_queue_count);
703 list_del(&qr->qr_list);
704 kmem_cache_free(lio_qr_cache, qr);
705 }
706 spin_unlock_bh(&conn->immed_queue_lock);
707
708 if (atomic_read(&cmd->immed_queue_count)) {
709 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
710 cmd->init_task_tag,
711 atomic_read(&cmd->immed_queue_count));
712 }
713}
714
715void iscsit_add_cmd_to_response_queue(
716 struct iscsi_cmd *cmd,
717 struct iscsi_conn *conn,
718 u8 state)
719{
720 struct iscsi_queue_req *qr;
721
722 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
723 if (!qr) {
724 pr_err("Unable to allocate memory for"
725 " struct iscsi_queue_req\n");
726 return;
727 }
728 INIT_LIST_HEAD(&qr->qr_list);
729 qr->cmd = cmd;
730 qr->state = state;
731
732 spin_lock_bh(&conn->response_queue_lock);
733 list_add_tail(&qr->qr_list, &conn->response_queue_list);
734 atomic_inc(&cmd->response_queue_count);
735 spin_unlock_bh(&conn->response_queue_lock);
736
737 wake_up_process(conn->thread_set->tx_thread);
738}
739
740struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
741{
742 struct iscsi_queue_req *qr;
743
744 spin_lock_bh(&conn->response_queue_lock);
745 if (list_empty(&conn->response_queue_list)) {
746 spin_unlock_bh(&conn->response_queue_lock);
747 return NULL;
748 }
749
750 list_for_each_entry(qr, &conn->response_queue_list, qr_list)
751 break;
752
753 list_del(&qr->qr_list);
754 if (qr->cmd)
755 atomic_dec(&qr->cmd->response_queue_count);
756 spin_unlock_bh(&conn->response_queue_lock);
757
758 return qr;
759}
760
761static void iscsit_remove_cmd_from_response_queue(
762 struct iscsi_cmd *cmd,
763 struct iscsi_conn *conn)
764{
765 struct iscsi_queue_req *qr, *qr_tmp;
766
767 spin_lock_bh(&conn->response_queue_lock);
768 if (!atomic_read(&cmd->response_queue_count)) {
769 spin_unlock_bh(&conn->response_queue_lock);
770 return;
771 }
772
773 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
774 qr_list) {
775 if (qr->cmd != cmd)
776 continue;
777
778 atomic_dec(&qr->cmd->response_queue_count);
779 list_del(&qr->qr_list);
780 kmem_cache_free(lio_qr_cache, qr);
781 }
782 spin_unlock_bh(&conn->response_queue_lock);
783
784 if (atomic_read(&cmd->response_queue_count)) {
785 pr_err("ITT: 0x%08x response_queue_count: %d\n",
786 cmd->init_task_tag,
787 atomic_read(&cmd->response_queue_count));
788 }
789}
790
791void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
792{
793 struct iscsi_queue_req *qr, *qr_tmp;
794
795 spin_lock_bh(&conn->immed_queue_lock);
796 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
797 list_del(&qr->qr_list);
798 if (qr->cmd)
799 atomic_dec(&qr->cmd->immed_queue_count);
800
801 kmem_cache_free(lio_qr_cache, qr);
802 }
803 spin_unlock_bh(&conn->immed_queue_lock);
804
805 spin_lock_bh(&conn->response_queue_lock);
806 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
807 qr_list) {
808 list_del(&qr->qr_list);
809 if (qr->cmd)
810 atomic_dec(&qr->cmd->response_queue_count);
811
812 kmem_cache_free(lio_qr_cache, qr);
813 }
814 spin_unlock_bh(&conn->response_queue_lock);
815}
816
817void iscsit_release_cmd(struct iscsi_cmd *cmd)
818{
819 struct iscsi_conn *conn = cmd->conn;
820 int i;
821
822 iscsit_free_r2ts_from_list(cmd);
823 iscsit_free_all_datain_reqs(cmd);
824
825 kfree(cmd->buf_ptr);
826 kfree(cmd->pdu_list);
827 kfree(cmd->seq_list);
828 kfree(cmd->tmr_req);
829 kfree(cmd->iov_data);
830
831 for (i = 0; i < cmd->t_mem_sg_nents; i++)
832 __free_page(sg_page(&cmd->t_mem_sg[i]));
833
834 kfree(cmd->t_mem_sg);
835
836 if (conn) {
837 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
838 iscsit_remove_cmd_from_response_queue(cmd, conn);
839 }
840
841 kmem_cache_free(lio_cmd_cache, cmd);
842}
843
844int iscsit_check_session_usage_count(struct iscsi_session *sess)
845{
846 spin_lock_bh(&sess->session_usage_lock);
847 if (sess->session_usage_count != 0) {
848 sess->session_waiting_on_uc = 1;
849 spin_unlock_bh(&sess->session_usage_lock);
850 if (in_interrupt())
851 return 2;
852
853 wait_for_completion(&sess->session_waiting_on_uc_comp);
854 return 1;
855 }
856 spin_unlock_bh(&sess->session_usage_lock);
857
858 return 0;
859}
860
861void iscsit_dec_session_usage_count(struct iscsi_session *sess)
862{
863 spin_lock_bh(&sess->session_usage_lock);
864 sess->session_usage_count--;
865
866 if (!sess->session_usage_count && sess->session_waiting_on_uc)
867 complete(&sess->session_waiting_on_uc_comp);
868
869 spin_unlock_bh(&sess->session_usage_lock);
870}
871
872void iscsit_inc_session_usage_count(struct iscsi_session *sess)
873{
874 spin_lock_bh(&sess->session_usage_lock);
875 sess->session_usage_count++;
876 spin_unlock_bh(&sess->session_usage_lock);
877}
878
879/*
880 * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
881 * array counts needed for sync and steering.
882 */
883static int iscsit_determine_sync_and_steering_counts(
884 struct iscsi_conn *conn,
885 struct iscsi_data_count *count)
886{
887 u32 length = count->data_length;
888 u32 marker, markint;
889
890 count->sync_and_steering = 1;
891
892 marker = (count->type == ISCSI_RX_DATA) ?
893 conn->of_marker : conn->if_marker;
894 markint = (count->type == ISCSI_RX_DATA) ?
895 (conn->conn_ops->OFMarkInt * 4) :
896 (conn->conn_ops->IFMarkInt * 4);
897 count->ss_iov_count = count->iov_count;
898
899 while (length > 0) {
900 if (length >= marker) {
901 count->ss_iov_count += 3;
902 count->ss_marker_count += 2;
903
904 length -= marker;
905 marker = markint;
906 } else
907 length = 0;
908 }
909
910 return 0;
911}
912
913/*
914 * Setup conn->if_marker and conn->of_marker values based upon
915 * the initial marker-less interval. (see iSCSI v19 A.2)
916 */
917int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
918{
919 int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
920 /*
921 * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
922 */
923 u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
924 u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
925
926 if (conn->conn_ops->OFMarker) {
927 /*
928 * Account for the first Login Command received not
929 * via iscsi_recv_msg().
930 */
931 conn->of_marker += ISCSI_HDR_LEN;
932 if (conn->of_marker <= OFMarkInt) {
933 conn->of_marker = (OFMarkInt - conn->of_marker);
934 } else {
935 login_ofmarker_count = (conn->of_marker / OFMarkInt);
936 next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
937 (login_ofmarker_count * MARKER_SIZE);
938 conn->of_marker = (next_marker - conn->of_marker);
939 }
940 conn->of_marker_offset = 0;
941 pr_debug("Setting OFMarker value to %u based on Initial"
942 " Markerless Interval.\n", conn->of_marker);
943 }
944
945 if (conn->conn_ops->IFMarker) {
946 if (conn->if_marker <= IFMarkInt) {
947 conn->if_marker = (IFMarkInt - conn->if_marker);
948 } else {
949 login_ifmarker_count = (conn->if_marker / IFMarkInt);
950 next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
951 (login_ifmarker_count * MARKER_SIZE);
952 conn->if_marker = (next_marker - conn->if_marker);
953 }
954 pr_debug("Setting IFMarker value to %u based on Initial"
955 " Markerless Interval.\n", conn->if_marker);
956 }
957
958 return 0;
959}
960
961struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
962{
963 struct iscsi_conn *conn;
964
965 spin_lock_bh(&sess->conn_lock);
966 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
967 if ((conn->cid == cid) &&
968 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
969 iscsit_inc_conn_usage_count(conn);
970 spin_unlock_bh(&sess->conn_lock);
971 return conn;
972 }
973 }
974 spin_unlock_bh(&sess->conn_lock);
975
976 return NULL;
977}
978
979struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
980{
981 struct iscsi_conn *conn;
982
983 spin_lock_bh(&sess->conn_lock);
984 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
985 if (conn->cid == cid) {
986 iscsit_inc_conn_usage_count(conn);
987 spin_lock(&conn->state_lock);
988 atomic_set(&conn->connection_wait_rcfr, 1);
989 spin_unlock(&conn->state_lock);
990 spin_unlock_bh(&sess->conn_lock);
991 return conn;
992 }
993 }
994 spin_unlock_bh(&sess->conn_lock);
995
996 return NULL;
997}
998
999void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
1000{
1001 spin_lock_bh(&conn->conn_usage_lock);
1002 if (conn->conn_usage_count != 0) {
1003 conn->conn_waiting_on_uc = 1;
1004 spin_unlock_bh(&conn->conn_usage_lock);
1005
1006 wait_for_completion(&conn->conn_waiting_on_uc_comp);
1007 return;
1008 }
1009 spin_unlock_bh(&conn->conn_usage_lock);
1010}
1011
1012void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
1013{
1014 spin_lock_bh(&conn->conn_usage_lock);
1015 conn->conn_usage_count--;
1016
1017 if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
1018 complete(&conn->conn_waiting_on_uc_comp);
1019
1020 spin_unlock_bh(&conn->conn_usage_lock);
1021}
1022
1023void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
1024{
1025 spin_lock_bh(&conn->conn_usage_lock);
1026 conn->conn_usage_count++;
1027 spin_unlock_bh(&conn->conn_usage_lock);
1028}
1029
1030static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
1031{
1032 u8 state;
1033 struct iscsi_cmd *cmd;
1034
1035 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
1036 if (!cmd)
1037 return -1;
1038
1039 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
1040 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
1041 ISTATE_SEND_NOPIN_NO_RESPONSE;
1042 cmd->init_task_tag = 0xFFFFFFFF;
1043 spin_lock_bh(&conn->sess->ttt_lock);
1044 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
1045 0xFFFFFFFF;
1046 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
1047 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
1048 spin_unlock_bh(&conn->sess->ttt_lock);
1049
1050 spin_lock_bh(&conn->cmd_lock);
1051 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1052 spin_unlock_bh(&conn->cmd_lock);
1053
1054 if (want_response)
1055 iscsit_start_nopin_response_timer(conn);
1056 iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
1057
1058 return 0;
1059}
1060
1061static void iscsit_handle_nopin_response_timeout(unsigned long data)
1062{
1063 struct iscsi_conn *conn = (struct iscsi_conn *) data;
1064
1065 iscsit_inc_conn_usage_count(conn);
1066
1067 spin_lock_bh(&conn->nopin_timer_lock);
1068 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
1069 spin_unlock_bh(&conn->nopin_timer_lock);
1070 iscsit_dec_conn_usage_count(conn);
1071 return;
1072 }
1073
1074 pr_debug("Did not receive response to NOPIN on CID: %hu on"
1075 " SID: %u, failing connection.\n", conn->cid,
1076 conn->sess->sid);
1077 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1078 spin_unlock_bh(&conn->nopin_timer_lock);
1079
1080 {
1081 struct iscsi_portal_group *tpg = conn->sess->tpg;
1082 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1083
1084 if (tiqn) {
1085 spin_lock_bh(&tiqn->sess_err_stats.lock);
1086 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1087 (void *)conn->sess->sess_ops->InitiatorName);
1088 tiqn->sess_err_stats.last_sess_failure_type =
1089 ISCSI_SESS_ERR_CXN_TIMEOUT;
1090 tiqn->sess_err_stats.cxn_timeout_errors++;
1091 conn->sess->conn_timeout_errors++;
1092 spin_unlock_bh(&tiqn->sess_err_stats.lock);
1093 }
1094 }
1095
1096 iscsit_cause_connection_reinstatement(conn, 0);
1097 iscsit_dec_conn_usage_count(conn);
1098}
1099
1100void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
1101{
1102 struct iscsi_session *sess = conn->sess;
1103 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1104
1105 spin_lock_bh(&conn->nopin_timer_lock);
1106 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1107 spin_unlock_bh(&conn->nopin_timer_lock);
1108 return;
1109 }
1110
1111 mod_timer(&conn->nopin_response_timer,
1112 (get_jiffies_64() + na->nopin_response_timeout * HZ));
1113 spin_unlock_bh(&conn->nopin_timer_lock);
1114}
1115
1116/*
1117 * Called with conn->nopin_timer_lock held.
1118 */
1119void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
1120{
1121 struct iscsi_session *sess = conn->sess;
1122 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1123
1124 spin_lock_bh(&conn->nopin_timer_lock);
1125 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
1126 spin_unlock_bh(&conn->nopin_timer_lock);
1127 return;
1128 }
1129
1130 init_timer(&conn->nopin_response_timer);
1131 conn->nopin_response_timer.expires =
1132 (get_jiffies_64() + na->nopin_response_timeout * HZ);
1133 conn->nopin_response_timer.data = (unsigned long)conn;
1134 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
1135 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
1136 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
1137 add_timer(&conn->nopin_response_timer);
1138
1139 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
1140 " seconds\n", conn->cid, na->nopin_response_timeout);
1141 spin_unlock_bh(&conn->nopin_timer_lock);
1142}
1143
1144void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
1145{
1146 spin_lock_bh(&conn->nopin_timer_lock);
1147 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1148 spin_unlock_bh(&conn->nopin_timer_lock);
1149 return;
1150 }
1151 conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
1152 spin_unlock_bh(&conn->nopin_timer_lock);
1153
1154 del_timer_sync(&conn->nopin_response_timer);
1155
1156 spin_lock_bh(&conn->nopin_timer_lock);
1157 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1158 spin_unlock_bh(&conn->nopin_timer_lock);
1159}
1160
1161static void iscsit_handle_nopin_timeout(unsigned long data)
1162{
1163 struct iscsi_conn *conn = (struct iscsi_conn *) data;
1164
1165 iscsit_inc_conn_usage_count(conn);
1166
1167 spin_lock_bh(&conn->nopin_timer_lock);
1168 if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
1169 spin_unlock_bh(&conn->nopin_timer_lock);
1170 iscsit_dec_conn_usage_count(conn);
1171 return;
1172 }
1173 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1174 spin_unlock_bh(&conn->nopin_timer_lock);
1175
1176 iscsit_add_nopin(conn, 1);
1177 iscsit_dec_conn_usage_count(conn);
1178}
1179
1180/*
1181 * Called with conn->nopin_timer_lock held.
1182 */
1183void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1184{
1185 struct iscsi_session *sess = conn->sess;
1186 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1187 /*
1188 * NOPIN timeout is disabled.
1189 */
1190 if (!na->nopin_timeout)
1191 return;
1192
1193 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1194 return;
1195
1196 init_timer(&conn->nopin_timer);
1197 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1198 conn->nopin_timer.data = (unsigned long)conn;
1199 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1200 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1201 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1202 add_timer(&conn->nopin_timer);
1203
1204 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1205 " interval\n", conn->cid, na->nopin_timeout);
1206}
1207
1208void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1209{
1210 struct iscsi_session *sess = conn->sess;
1211 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1212 /*
1213 * NOPIN timeout is disabled..
1214 */
1215 if (!na->nopin_timeout)
1216 return;
1217
1218 spin_lock_bh(&conn->nopin_timer_lock);
1219 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1220 spin_unlock_bh(&conn->nopin_timer_lock);
1221 return;
1222 }
1223
1224 init_timer(&conn->nopin_timer);
1225 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1226 conn->nopin_timer.data = (unsigned long)conn;
1227 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1228 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1229 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1230 add_timer(&conn->nopin_timer);
1231
1232 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1233 " interval\n", conn->cid, na->nopin_timeout);
1234 spin_unlock_bh(&conn->nopin_timer_lock);
1235}
1236
1237void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1238{
1239 spin_lock_bh(&conn->nopin_timer_lock);
1240 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1241 spin_unlock_bh(&conn->nopin_timer_lock);
1242 return;
1243 }
1244 conn->nopin_timer_flags |= ISCSI_TF_STOP;
1245 spin_unlock_bh(&conn->nopin_timer_lock);
1246
1247 del_timer_sync(&conn->nopin_timer);
1248
1249 spin_lock_bh(&conn->nopin_timer_lock);
1250 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1251 spin_unlock_bh(&conn->nopin_timer_lock);
1252}
1253
1254int iscsit_send_tx_data(
1255 struct iscsi_cmd *cmd,
1256 struct iscsi_conn *conn,
1257 int use_misc)
1258{
1259 int tx_sent, tx_size;
1260 u32 iov_count;
1261 struct kvec *iov;
1262
1263send_data:
1264 tx_size = cmd->tx_size;
1265
1266 if (!use_misc) {
1267 iov = &cmd->iov_data[0];
1268 iov_count = cmd->iov_data_count;
1269 } else {
1270 iov = &cmd->iov_misc[0];
1271 iov_count = cmd->iov_misc_count;
1272 }
1273
1274 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1275 if (tx_size != tx_sent) {
1276 if (tx_sent == -EAGAIN) {
1277 pr_err("tx_data() returned -EAGAIN\n");
1278 goto send_data;
1279 } else
1280 return -1;
1281 }
1282 cmd->tx_size = 0;
1283
1284 return 0;
1285}
1286
1287int iscsit_fe_sendpage_sg(
1288 struct iscsi_cmd *cmd,
1289 struct iscsi_conn *conn)
1290{
1291 struct scatterlist *sg = cmd->first_data_sg;
1292 struct kvec iov;
1293 u32 tx_hdr_size, data_len;
1294 u32 offset = cmd->first_data_sg_off;
1295 int tx_sent;
1296
1297send_hdr:
1298 tx_hdr_size = ISCSI_HDR_LEN;
1299 if (conn->conn_ops->HeaderDigest)
1300 tx_hdr_size += ISCSI_CRC_LEN;
1301
1302 iov.iov_base = cmd->pdu;
1303 iov.iov_len = tx_hdr_size;
1304
1305 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1306 if (tx_hdr_size != tx_sent) {
1307 if (tx_sent == -EAGAIN) {
1308 pr_err("tx_data() returned -EAGAIN\n");
1309 goto send_hdr;
1310 }
1311 return -1;
1312 }
1313
1314 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1315 if (conn->conn_ops->DataDigest)
1316 data_len -= ISCSI_CRC_LEN;
1317
1318 /*
1319 * Perform sendpage() for each page in the scatterlist
1320 */
1321 while (data_len) {
1322 u32 space = (sg->length - offset);
1323 u32 sub_len = min_t(u32, data_len, space);
1324send_pg:
1325 tx_sent = conn->sock->ops->sendpage(conn->sock,
1326 sg_page(sg), sg->offset + offset, sub_len, 0);
1327 if (tx_sent != sub_len) {
1328 if (tx_sent == -EAGAIN) {
1329 pr_err("tcp_sendpage() returned"
1330 " -EAGAIN\n");
1331 goto send_pg;
1332 }
1333
1334 pr_err("tcp_sendpage() failure: %d\n",
1335 tx_sent);
1336 return -1;
1337 }
1338
1339 data_len -= sub_len;
1340 offset = 0;
1341 sg = sg_next(sg);
1342 }
1343
1344send_padding:
1345 if (cmd->padding) {
1346 struct kvec *iov_p =
1347 &cmd->iov_data[cmd->iov_data_count-1];
1348
1349 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1350 if (cmd->padding != tx_sent) {
1351 if (tx_sent == -EAGAIN) {
1352 pr_err("tx_data() returned -EAGAIN\n");
1353 goto send_padding;
1354 }
1355 return -1;
1356 }
1357 }
1358
1359send_datacrc:
1360 if (conn->conn_ops->DataDigest) {
1361 struct kvec *iov_d =
1362 &cmd->iov_data[cmd->iov_data_count];
1363
1364 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1365 if (ISCSI_CRC_LEN != tx_sent) {
1366 if (tx_sent == -EAGAIN) {
1367 pr_err("tx_data() returned -EAGAIN\n");
1368 goto send_datacrc;
1369 }
1370 return -1;
1371 }
1372 }
1373
1374 return 0;
1375}
1376
1377/*
1378 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1379 * back to the Initiator when an expection condition occurs with the
1380 * errors set in status_class and status_detail.
1381 *
1382 * Parameters: iSCSI Connection, Status Class, Status Detail.
1383 * Returns: 0 on success, -1 on error.
1384 */
1385int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1386{
1387 u8 iscsi_hdr[ISCSI_HDR_LEN];
1388 int err;
1389 struct kvec iov;
1390 struct iscsi_login_rsp *hdr;
1391
1392 iscsit_collect_login_stats(conn, status_class, status_detail);
1393
1394 memset(&iov, 0, sizeof(struct kvec));
1395 memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
1396
1397 hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
1398 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1399 hdr->status_class = status_class;
1400 hdr->status_detail = status_detail;
1401 hdr->itt = cpu_to_be32(conn->login_itt);
1402
1403 iov.iov_base = &iscsi_hdr;
1404 iov.iov_len = ISCSI_HDR_LEN;
1405
1406 PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
1407
1408 err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
1409 if (err != ISCSI_HDR_LEN) {
1410 pr_err("tx_data returned less than expected\n");
1411 return -1;
1412 }
1413
1414 return 0;
1415}
1416
1417void iscsit_print_session_params(struct iscsi_session *sess)
1418{
1419 struct iscsi_conn *conn;
1420
1421 pr_debug("-----------------------------[Session Params for"
1422 " SID: %u]-----------------------------\n", sess->sid);
1423 spin_lock_bh(&sess->conn_lock);
1424 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1425 iscsi_dump_conn_ops(conn->conn_ops);
1426 spin_unlock_bh(&sess->conn_lock);
1427
1428 iscsi_dump_sess_ops(sess->sess_ops);
1429}
1430
1431static int iscsit_do_rx_data(
1432 struct iscsi_conn *conn,
1433 struct iscsi_data_count *count)
1434{
1435 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1436 u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
1437 struct kvec iov[count->ss_iov_count], *iov_p;
1438 struct msghdr msg;
1439
1440 if (!conn || !conn->sock || !conn->conn_ops)
1441 return -1;
1442
1443 memset(&msg, 0, sizeof(struct msghdr));
1444
1445 if (count->sync_and_steering) {
1446 int size = 0;
1447 u32 i, orig_iov_count = 0;
1448 u32 orig_iov_len = 0, orig_iov_loc = 0;
1449 u32 iov_count = 0, per_iov_bytes = 0;
1450 u32 *rx_marker, old_rx_marker = 0;
1451 struct kvec *iov_record;
1452
1453 memset(&rx_marker_val, 0,
1454 count->ss_marker_count * sizeof(u32));
1455 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1456
1457 iov_record = count->iov;
1458 orig_iov_count = count->iov_count;
1459 rx_marker = &conn->of_marker;
1460
1461 i = 0;
1462 size = data;
1463 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1464 while (size > 0) {
1465 pr_debug("rx_data: #1 orig_iov_len %u,"
1466 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1467 pr_debug("rx_data: #2 rx_marker %u, size"
1468 " %u\n", *rx_marker, size);
1469
1470 if (orig_iov_len >= *rx_marker) {
1471 iov[iov_count].iov_len = *rx_marker;
1472 iov[iov_count++].iov_base =
1473 (iov_record[orig_iov_loc].iov_base +
1474 per_iov_bytes);
1475
1476 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1477 iov[iov_count++].iov_base =
1478 &rx_marker_val[rx_marker_iov++];
1479 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1480 iov[iov_count++].iov_base =
1481 &rx_marker_val[rx_marker_iov++];
1482 old_rx_marker = *rx_marker;
1483
1484 /*
1485 * OFMarkInt is in 32-bit words.
1486 */
1487 *rx_marker = (conn->conn_ops->OFMarkInt * 4);
1488 size -= old_rx_marker;
1489 orig_iov_len -= old_rx_marker;
1490 per_iov_bytes += old_rx_marker;
1491
1492 pr_debug("rx_data: #3 new_rx_marker"
1493 " %u, size %u\n", *rx_marker, size);
1494 } else {
1495 iov[iov_count].iov_len = orig_iov_len;
1496 iov[iov_count++].iov_base =
1497 (iov_record[orig_iov_loc].iov_base +
1498 per_iov_bytes);
1499
1500 per_iov_bytes = 0;
1501 *rx_marker -= orig_iov_len;
1502 size -= orig_iov_len;
1503
1504 if (size)
1505 orig_iov_len =
1506 iov_record[++orig_iov_loc].iov_len;
1507
1508 pr_debug("rx_data: #4 new_rx_marker"
1509 " %u, size %u\n", *rx_marker, size);
1510 }
1511 }
1512 data += (rx_marker_iov * (MARKER_SIZE / 2));
1513
1514 iov_p = &iov[0];
1515 iov_len = iov_count;
1516
1517 if (iov_count > count->ss_iov_count) {
1518 pr_err("iov_count: %d, count->ss_iov_count:"
1519 " %d\n", iov_count, count->ss_iov_count);
1520 return -1;
1521 }
1522 if (rx_marker_iov > count->ss_marker_count) {
1523 pr_err("rx_marker_iov: %d, count->ss_marker"
1524 "_count: %d\n", rx_marker_iov,
1525 count->ss_marker_count);
1526 return -1;
1527 }
1528 } else {
1529 iov_p = count->iov;
1530 iov_len = count->iov_count;
1531 }
1532
1533 while (total_rx < data) {
1534 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
1535 (data - total_rx), MSG_WAITALL);
1536 if (rx_loop <= 0) {
1537 pr_debug("rx_loop: %d total_rx: %d\n",
1538 rx_loop, total_rx);
1539 return rx_loop;
1540 }
1541 total_rx += rx_loop;
1542 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1543 rx_loop, total_rx, data);
1544 }
1545
1546 if (count->sync_and_steering) {
1547 int j;
1548 for (j = 0; j < rx_marker_iov; j++) {
1549 pr_debug("rx_data: #5 j: %d, offset: %d\n",
1550 j, rx_marker_val[j]);
1551 conn->of_marker_offset = rx_marker_val[j];
1552 }
1553 total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
1554 }
1555
1556 return total_rx;
1557}
1558
1559static int iscsit_do_tx_data(
1560 struct iscsi_conn *conn,
1561 struct iscsi_data_count *count)
1562{
1563 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1564 u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
1565 struct kvec iov[count->ss_iov_count], *iov_p;
1566 struct msghdr msg;
1567
1568 if (!conn || !conn->sock || !conn->conn_ops)
1569 return -1;
1570
1571 if (data <= 0) {
1572 pr_err("Data length is: %d\n", data);
1573 return -1;
1574 }
1575
1576 memset(&msg, 0, sizeof(struct msghdr));
1577
1578 if (count->sync_and_steering) {
1579 int size = 0;
1580 u32 i, orig_iov_count = 0;
1581 u32 orig_iov_len = 0, orig_iov_loc = 0;
1582 u32 iov_count = 0, per_iov_bytes = 0;
1583 u32 *tx_marker, old_tx_marker = 0;
1584 struct kvec *iov_record;
1585
1586 memset(&tx_marker_val, 0,
1587 count->ss_marker_count * sizeof(u32));
1588 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1589
1590 iov_record = count->iov;
1591 orig_iov_count = count->iov_count;
1592 tx_marker = &conn->if_marker;
1593
1594 i = 0;
1595 size = data;
1596 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1597 while (size > 0) {
1598 pr_debug("tx_data: #1 orig_iov_len %u,"
1599 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1600 pr_debug("tx_data: #2 tx_marker %u, size"
1601 " %u\n", *tx_marker, size);
1602
1603 if (orig_iov_len >= *tx_marker) {
1604 iov[iov_count].iov_len = *tx_marker;
1605 iov[iov_count++].iov_base =
1606 (iov_record[orig_iov_loc].iov_base +
1607 per_iov_bytes);
1608
1609 tx_marker_val[tx_marker_iov] =
1610 (size - *tx_marker);
1611 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1612 iov[iov_count++].iov_base =
1613 &tx_marker_val[tx_marker_iov++];
1614 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1615 iov[iov_count++].iov_base =
1616 &tx_marker_val[tx_marker_iov++];
1617 old_tx_marker = *tx_marker;
1618
1619 /*
1620 * IFMarkInt is in 32-bit words.
1621 */
1622 *tx_marker = (conn->conn_ops->IFMarkInt * 4);
1623 size -= old_tx_marker;
1624 orig_iov_len -= old_tx_marker;
1625 per_iov_bytes += old_tx_marker;
1626
1627 pr_debug("tx_data: #3 new_tx_marker"
1628 " %u, size %u\n", *tx_marker, size);
1629 pr_debug("tx_data: #4 offset %u\n",
1630 tx_marker_val[tx_marker_iov-1]);
1631 } else {
1632 iov[iov_count].iov_len = orig_iov_len;
1633 iov[iov_count++].iov_base
1634 = (iov_record[orig_iov_loc].iov_base +
1635 per_iov_bytes);
1636
1637 per_iov_bytes = 0;
1638 *tx_marker -= orig_iov_len;
1639 size -= orig_iov_len;
1640
1641 if (size)
1642 orig_iov_len =
1643 iov_record[++orig_iov_loc].iov_len;
1644
1645 pr_debug("tx_data: #5 new_tx_marker"
1646 " %u, size %u\n", *tx_marker, size);
1647 }
1648 }
1649
1650 data += (tx_marker_iov * (MARKER_SIZE / 2));
1651
1652 iov_p = &iov[0];
1653 iov_len = iov_count;
1654
1655 if (iov_count > count->ss_iov_count) {
1656 pr_err("iov_count: %d, count->ss_iov_count:"
1657 " %d\n", iov_count, count->ss_iov_count);
1658 return -1;
1659 }
1660 if (tx_marker_iov > count->ss_marker_count) {
1661 pr_err("tx_marker_iov: %d, count->ss_marker"
1662 "_count: %d\n", tx_marker_iov,
1663 count->ss_marker_count);
1664 return -1;
1665 }
1666 } else {
1667 iov_p = count->iov;
1668 iov_len = count->iov_count;
1669 }
1670
1671 while (total_tx < data) {
1672 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1673 (data - total_tx));
1674 if (tx_loop <= 0) {
1675 pr_debug("tx_loop: %d total_tx %d\n",
1676 tx_loop, total_tx);
1677 return tx_loop;
1678 }
1679 total_tx += tx_loop;
1680 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1681 tx_loop, total_tx, data);
1682 }
1683
1684 if (count->sync_and_steering)
1685 total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
1686
1687 return total_tx;
1688}
1689
1690int rx_data(
1691 struct iscsi_conn *conn,
1692 struct kvec *iov,
1693 int iov_count,
1694 int data)
1695{
1696 struct iscsi_data_count c;
1697
1698 if (!conn || !conn->sock || !conn->conn_ops)
1699 return -1;
1700
1701 memset(&c, 0, sizeof(struct iscsi_data_count));
1702 c.iov = iov;
1703 c.iov_count = iov_count;
1704 c.data_length = data;
1705 c.type = ISCSI_RX_DATA;
1706
1707 if (conn->conn_ops->OFMarker &&
1708 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1709 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1710 return -1;
1711 }
1712
1713 return iscsit_do_rx_data(conn, &c);
1714}
1715
1716int tx_data(
1717 struct iscsi_conn *conn,
1718 struct kvec *iov,
1719 int iov_count,
1720 int data)
1721{
1722 struct iscsi_data_count c;
1723
1724 if (!conn || !conn->sock || !conn->conn_ops)
1725 return -1;
1726
1727 memset(&c, 0, sizeof(struct iscsi_data_count));
1728 c.iov = iov;
1729 c.iov_count = iov_count;
1730 c.data_length = data;
1731 c.type = ISCSI_TX_DATA;
1732
1733 if (conn->conn_ops->IFMarker &&
1734 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1735 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1736 return -1;
1737 }
1738
1739 return iscsit_do_tx_data(conn, &c);
1740}
1741
1742void iscsit_collect_login_stats(
1743 struct iscsi_conn *conn,
1744 u8 status_class,
1745 u8 status_detail)
1746{
1747 struct iscsi_param *intrname = NULL;
1748 struct iscsi_tiqn *tiqn;
1749 struct iscsi_login_stats *ls;
1750
1751 tiqn = iscsit_snmp_get_tiqn(conn);
1752 if (!tiqn)
1753 return;
1754
1755 ls = &tiqn->login_stats;
1756
1757 spin_lock(&ls->lock);
1758 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
1759 ((get_jiffies_64() - ls->last_fail_time) < 10)) {
1760 /* We already have the failure info for this login */
1761 spin_unlock(&ls->lock);
1762 return;
1763 }
1764
1765 if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1766 ls->accepts++;
1767 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1768 ls->redirects++;
1769 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1770 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1771 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1772 ls->authenticate_fails++;
1773 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
1774 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1775 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1776 ls->authorize_fails++;
1777 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1778 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1779 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1780 ls->negotiate_fails++;
1781 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1782 } else {
1783 ls->other_fails++;
1784 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1785 }
1786
1787 /* Save initiator name, ip address and time, if it is a failed login */
1788 if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1789 if (conn->param_list)
1790 intrname = iscsi_find_param_from_key(INITIATORNAME,
1791 conn->param_list);
1792 strcpy(ls->last_intr_fail_name,
1793 (intrname ? intrname->value : "Unknown"));
1794
1795 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
1796 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
1797 "%s", conn->login_ip);
1798 ls->last_fail_time = get_jiffies_64();
1799 }
1800
1801 spin_unlock(&ls->lock);
1802}
1803
1804struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1805{
1806 struct iscsi_portal_group *tpg;
1807
1808 if (!conn || !conn->sess)
1809 return NULL;
1810
1811 tpg = conn->sess->tpg;
1812 if (!tpg)
1813 return NULL;
1814
1815 if (!tpg->tpg_tiqn)
1816 return NULL;
1817
1818 return tpg->tpg_tiqn;
1819}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 000000000000..2cd49d607bda
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,60 @@
1#ifndef ISCSI_TARGET_UTIL_H
2#define ISCSI_TARGET_UTIL_H
3
4#define MARKER_SIZE 8
5
6extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
7extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
8extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
11extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
12extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
13extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
14extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
15extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
16extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
17extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
18int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
19extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
20extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
21extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
22 u32, u32);
23extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
24extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
25 struct iscsi_conn_recovery **, u32);
26extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
27extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
28extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
29extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
30extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
31extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
32extern void iscsit_release_cmd(struct iscsi_cmd *);
33extern int iscsit_check_session_usage_count(struct iscsi_session *);
34extern void iscsit_dec_session_usage_count(struct iscsi_session *);
35extern void iscsit_inc_session_usage_count(struct iscsi_session *);
36extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
37extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
38extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
39extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
40extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
41extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
42extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
43extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
44extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
45extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
46extern void iscsit_start_nopin_timer(struct iscsi_conn *);
47extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
48extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
49extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
50extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
51extern void iscsit_print_session_params(struct iscsi_session *);
52extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
53extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
54extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
55extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
56extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
57extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
58extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
59
60#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 46352d658e35..c75a01a1c475 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4052,17 +4052,16 @@ static int transport_allocate_data_tasks(
4052 struct se_task *task; 4052 struct se_task *task;
4053 struct se_device *dev = cmd->se_dev; 4053 struct se_device *dev = cmd->se_dev;
4054 unsigned long flags; 4054 unsigned long flags;
4055 sector_t sectors;
4056 int task_count, i, ret; 4055 int task_count, i, ret;
4057 sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 4056 sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4058 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 4057 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
4059 struct scatterlist *sg; 4058 struct scatterlist *sg;
4060 struct scatterlist *cmd_sg; 4059 struct scatterlist *cmd_sg;
4061 4060
4062 WARN_ON(cmd->data_length % sector_size); 4061 WARN_ON(cmd->data_length % sector_size);
4063 sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 4062 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
4064 task_count = DIV_ROUND_UP(sectors, dev_max_sectors); 4063 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
4065 4064
4066 cmd_sg = sgl; 4065 cmd_sg = sgl;
4067 for (i = 0; i < task_count; i++) { 4066 for (i = 0; i < task_count; i++) {
4068 unsigned int task_size; 4067 unsigned int task_size;
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 4d433d34736f..f11e43ed907d 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
187 */ 187 */
188static struct inode *anon_inode_mkinode(void) 188static struct inode *anon_inode_mkinode(void)
189{ 189{
190 struct inode *inode = new_inode(anon_inode_mnt->mnt_sb); 190 struct inode *inode = new_inode_pseudo(anon_inode_mnt->mnt_sb);
191 191
192 if (!inode) 192 if (!inode)
193 return ERR_PTR(-ENOMEM); 193 return ERR_PTR(-ENOMEM);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e91b097e7252..caa26ab5ed68 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4467,7 +4467,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4467 inode->i_generation = BTRFS_I(inode)->generation; 4467 inode->i_generation = BTRFS_I(inode)->generation;
4468 btrfs_set_inode_space_info(root, inode); 4468 btrfs_set_inode_space_info(root, inode);
4469 4469
4470 if (mode & S_IFDIR) 4470 if (S_ISDIR(mode))
4471 owner = 0; 4471 owner = 0;
4472 else 4472 else
4473 owner = 1; 4473 owner = 1;
@@ -4512,7 +4512,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4512 4512
4513 btrfs_inherit_iflags(inode, dir); 4513 btrfs_inherit_iflags(inode, dir);
4514 4514
4515 if ((mode & S_IFREG)) { 4515 if (S_ISREG(mode)) {
4516 if (btrfs_test_opt(root, NODATASUM)) 4516 if (btrfs_test_opt(root, NODATASUM))
4517 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 4517 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4518 if (btrfs_test_opt(root, NODATACOW) || 4518 if (btrfs_test_opt(root, NODATACOW) ||
diff --git a/fs/dcache.c b/fs/dcache.c
index be18598c7fd7..b05aac3a8cfc 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2138,8 +2138,9 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
2138 * @target: new dentry 2138 * @target: new dentry
2139 * 2139 *
2140 * Update the dcache to reflect the move of a file name. Negative 2140 * Update the dcache to reflect the move of a file name. Negative
2141 * dcache entries should not be moved in this way. Caller hold 2141 * dcache entries should not be moved in this way. Caller must hold
2142 * rename_lock. 2142 * rename_lock, the i_mutex of the source and target directories,
2143 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2143 */ 2144 */
2144static void __d_move(struct dentry * dentry, struct dentry * target) 2145static void __d_move(struct dentry * dentry, struct dentry * target)
2145{ 2146{
@@ -2202,7 +2203,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
2202 * @target: new dentry 2203 * @target: new dentry
2203 * 2204 *
2204 * Update the dcache to reflect the move of a file name. Negative 2205 * Update the dcache to reflect the move of a file name. Negative
2205 * dcache entries should not be moved in this way. 2206 * dcache entries should not be moved in this way. See the locking
2207 * requirements for __d_move.
2206 */ 2208 */
2207void d_move(struct dentry *dentry, struct dentry *target) 2209void d_move(struct dentry *dentry, struct dentry *target)
2208{ 2210{
@@ -2320,7 +2322,8 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2320 * @inode: inode to bind to the dentry, to which aliases may be attached 2322 * @inode: inode to bind to the dentry, to which aliases may be attached
2321 * 2323 *
2322 * Introduces an dentry into the tree, substituting an extant disconnected 2324 * Introduces an dentry into the tree, substituting an extant disconnected
2323 * root directory alias in its place if there is one 2325 * root directory alias in its place if there is one. Caller must hold the
2326 * i_mutex of the parent directory.
2324 */ 2327 */
2325struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2328struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2326{ 2329{
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 516516e0c2a2..3bc073a4cf82 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1018,13 +1018,13 @@ hostdata_error:
1018 fsname++; 1018 fsname++;
1019 if (lm->lm_mount == NULL) { 1019 if (lm->lm_mount == NULL) {
1020 fs_info(sdp, "Now mounting FS...\n"); 1020 fs_info(sdp, "Now mounting FS...\n");
1021 complete(&sdp->sd_locking_init); 1021 complete_all(&sdp->sd_locking_init);
1022 return 0; 1022 return 0;
1023 } 1023 }
1024 ret = lm->lm_mount(sdp, fsname); 1024 ret = lm->lm_mount(sdp, fsname);
1025 if (ret == 0) 1025 if (ret == 0)
1026 fs_info(sdp, "Joined cluster. Now mounting FS...\n"); 1026 fs_info(sdp, "Joined cluster. Now mounting FS...\n");
1027 complete(&sdp->sd_locking_init); 1027 complete_all(&sdp->sd_locking_init);
1028 return ret; 1028 return ret;
1029} 1029}
1030 1030
diff --git a/fs/inode.c b/fs/inode.c
index a48fa5355fb4..d0c72ff6b30e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -361,9 +361,11 @@ EXPORT_SYMBOL_GPL(inode_sb_list_add);
361 361
362static inline void inode_sb_list_del(struct inode *inode) 362static inline void inode_sb_list_del(struct inode *inode)
363{ 363{
364 spin_lock(&inode_sb_list_lock); 364 if (!list_empty(&inode->i_sb_list)) {
365 list_del_init(&inode->i_sb_list); 365 spin_lock(&inode_sb_list_lock);
366 spin_unlock(&inode_sb_list_lock); 366 list_del_init(&inode->i_sb_list);
367 spin_unlock(&inode_sb_list_lock);
368 }
367} 369}
368 370
369static unsigned long hash(struct super_block *sb, unsigned long hashval) 371static unsigned long hash(struct super_block *sb, unsigned long hashval)
@@ -796,6 +798,29 @@ unsigned int get_next_ino(void)
796EXPORT_SYMBOL(get_next_ino); 798EXPORT_SYMBOL(get_next_ino);
797 799
798/** 800/**
801 * new_inode_pseudo - obtain an inode
802 * @sb: superblock
803 *
804 * Allocates a new inode for given superblock.
805 * Inode wont be chained in superblock s_inodes list
806 * This means :
807 * - fs can't be unmount
808 * - quotas, fsnotify, writeback can't work
809 */
810struct inode *new_inode_pseudo(struct super_block *sb)
811{
812 struct inode *inode = alloc_inode(sb);
813
814 if (inode) {
815 spin_lock(&inode->i_lock);
816 inode->i_state = 0;
817 spin_unlock(&inode->i_lock);
818 INIT_LIST_HEAD(&inode->i_sb_list);
819 }
820 return inode;
821}
822
823/**
799 * new_inode - obtain an inode 824 * new_inode - obtain an inode
800 * @sb: superblock 825 * @sb: superblock
801 * 826 *
@@ -813,13 +838,9 @@ struct inode *new_inode(struct super_block *sb)
813 838
814 spin_lock_prefetch(&inode_sb_list_lock); 839 spin_lock_prefetch(&inode_sb_list_lock);
815 840
816 inode = alloc_inode(sb); 841 inode = new_inode_pseudo(sb);
817 if (inode) { 842 if (inode)
818 spin_lock(&inode->i_lock);
819 inode->i_state = 0;
820 spin_unlock(&inode->i_lock);
821 inode_sb_list_add(inode); 843 inode_sb_list_add(inode);
822 }
823 return inode; 844 return inode;
824} 845}
825EXPORT_SYMBOL(new_inode); 846EXPORT_SYMBOL(new_inode);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index eeead33d8ef0..b81b35ddf4e4 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -80,7 +80,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
80 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 80 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
81 if (ret) { 81 if (ret) {
82 jffs2_free_raw_inode(ri); 82 jffs2_free_raw_inode(ri);
83 if (S_ISLNK(inode->i_mode & S_IFMT)) 83 if (S_ISLNK(inode->i_mode))
84 kfree(mdata); 84 kfree(mdata);
85 return ret; 85 return ret;
86 } 86 }
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 4496872cf4e7..9cbd11a3f804 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -3161,7 +3161,7 @@ static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
3161{ 3161{
3162 int rc; 3162 int rc;
3163 int dbitno, word, rembits, nb, nwords, wbitno, agno; 3163 int dbitno, word, rembits, nb, nwords, wbitno, agno;
3164 s8 oldroot, *leaf; 3164 s8 oldroot;
3165 struct dmaptree *tp = (struct dmaptree *) & dp->tree; 3165 struct dmaptree *tp = (struct dmaptree *) & dp->tree;
3166 3166
3167 /* save the current value of the root (i.e. maximum free string) 3167 /* save the current value of the root (i.e. maximum free string)
@@ -3169,9 +3169,6 @@ static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
3169 */ 3169 */
3170 oldroot = tp->stree[ROOT]; 3170 oldroot = tp->stree[ROOT];
3171 3171
3172 /* pick up a pointer to the leaves of the dmap tree */
3173 leaf = tp->stree + LEAFIND;
3174
3175 /* determine the bit number and word within the dmap of the 3172 /* determine the bit number and word within the dmap of the
3176 * starting block. 3173 * starting block.
3177 */ 3174 */
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index f6cc0c09ec63..af9606057dde 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1143,7 +1143,6 @@ int txCommit(tid_t tid, /* transaction identifier */
1143 struct jfs_log *log; 1143 struct jfs_log *log;
1144 struct tblock *tblk; 1144 struct tblock *tblk;
1145 struct lrd *lrd; 1145 struct lrd *lrd;
1146 int lsn;
1147 struct inode *ip; 1146 struct inode *ip;
1148 struct jfs_inode_info *jfs_ip; 1147 struct jfs_inode_info *jfs_ip;
1149 int k, n; 1148 int k, n;
@@ -1310,7 +1309,7 @@ int txCommit(tid_t tid, /* transaction identifier */
1310 */ 1309 */
1311 lrd->type = cpu_to_le16(LOG_COMMIT); 1310 lrd->type = cpu_to_le16(LOG_COMMIT);
1312 lrd->length = 0; 1311 lrd->length = 0;
1313 lsn = lmLog(log, tblk, lrd, NULL); 1312 lmLog(log, tblk, lrd, NULL);
1314 1313
1315 lmGroupCommit(log, tblk); 1314 lmGroupCommit(log, tblk);
1316 1315
@@ -2935,7 +2934,6 @@ int jfs_sync(void *arg)
2935{ 2934{
2936 struct inode *ip; 2935 struct inode *ip;
2937 struct jfs_inode_info *jfs_ip; 2936 struct jfs_inode_info *jfs_ip;
2938 int rc;
2939 tid_t tid; 2937 tid_t tid;
2940 2938
2941 do { 2939 do {
@@ -2961,7 +2959,7 @@ int jfs_sync(void *arg)
2961 */ 2959 */
2962 TXN_UNLOCK(); 2960 TXN_UNLOCK();
2963 tid = txBegin(ip->i_sb, COMMIT_INODE); 2961 tid = txBegin(ip->i_sb, COMMIT_INODE);
2964 rc = txCommit(tid, 1, &ip, 0); 2962 txCommit(tid, 1, &ip, 0);
2965 txEnd(tid); 2963 txEnd(tid);
2966 mutex_unlock(&jfs_ip->commit_mutex); 2964 mutex_unlock(&jfs_ip->commit_mutex);
2967 2965
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 29b1f1a21142..e17545e15664 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -893,7 +893,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
893 unchar *i_fastsymlink; 893 unchar *i_fastsymlink;
894 s64 xlen = 0; 894 s64 xlen = 0;
895 int bmask = 0, xsize; 895 int bmask = 0, xsize;
896 s64 extent = 0, xaddr; 896 s64 xaddr;
897 struct metapage *mp; 897 struct metapage *mp;
898 struct super_block *sb; 898 struct super_block *sb;
899 struct tblock *tblk; 899 struct tblock *tblk;
@@ -993,7 +993,6 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
993 txAbort(tid, 0); 993 txAbort(tid, 0);
994 goto out3; 994 goto out3;
995 } 995 }
996 extent = xaddr;
997 ip->i_size = ssize - 1; 996 ip->i_size = ssize - 1;
998 while (ssize) { 997 while (ssize) {
999 /* This is kind of silly since PATH_MAX == 4K */ 998 /* This is kind of silly since PATH_MAX == 4K */
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index e374050a911c..8392cb85bd54 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -302,7 +302,8 @@ nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
302 /* We appear to be out of the grace period */ 302 /* We appear to be out of the grace period */
303 wake_up_all(&host->h_gracewait); 303 wake_up_all(&host->h_gracewait);
304 } 304 }
305 dprintk("lockd: server returns status %d\n", resp->status); 305 dprintk("lockd: server returns status %d\n",
306 ntohl(resp->status));
306 return 0; /* Okay, call complete */ 307 return 0; /* Okay, call complete */
307 } 308 }
308 309
@@ -690,7 +691,8 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
690 goto out; 691 goto out;
691 692
692 if (resp->status != nlm_lck_denied_nolocks) 693 if (resp->status != nlm_lck_denied_nolocks)
693 printk("lockd: unexpected unlock status: %d\n", resp->status); 694 printk("lockd: unexpected unlock status: %d\n",
695 ntohl(resp->status));
694 /* What to do now? I'm out of my depth... */ 696 /* What to do now? I'm out of my depth... */
695 status = -ENOLCK; 697 status = -ENOLCK;
696out: 698out:
@@ -843,6 +845,7 @@ nlm_stat_to_errno(__be32 status)
843 return -ENOLCK; 845 return -ENOLCK;
844#endif 846#endif
845 } 847 }
846 printk(KERN_NOTICE "lockd: unexpected server status %d\n", status); 848 printk(KERN_NOTICE "lockd: unexpected server status %d\n",
849 ntohl(status));
847 return -ENOLCK; 850 return -ENOLCK;
848} 851}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 81515545ba75..2cde5d954750 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -77,6 +77,7 @@ config NFS_V4
77config NFS_V4_1 77config NFS_V4_1
78 bool "NFS client support for NFSv4.1 (EXPERIMENTAL)" 78 bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
79 depends on NFS_FS && NFS_V4 && EXPERIMENTAL 79 depends on NFS_FS && NFS_V4 && EXPERIMENTAL
80 select SUNRPC_BACKCHANNEL
80 select PNFS_FILE_LAYOUT 81 select PNFS_FILE_LAYOUT
81 help 82 help
82 This option enables support for minor version 1 of the NFSv4 protocol 83 This option enables support for minor version 1 of the NFSv4 protocol
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index d4d1954e9bb9..74780f9f852c 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -111,6 +111,7 @@ int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nf
111static u32 initiate_file_draining(struct nfs_client *clp, 111static u32 initiate_file_draining(struct nfs_client *clp,
112 struct cb_layoutrecallargs *args) 112 struct cb_layoutrecallargs *args)
113{ 113{
114 struct nfs_server *server;
114 struct pnfs_layout_hdr *lo; 115 struct pnfs_layout_hdr *lo;
115 struct inode *ino; 116 struct inode *ino;
116 bool found = false; 117 bool found = false;
@@ -118,21 +119,28 @@ static u32 initiate_file_draining(struct nfs_client *clp,
118 LIST_HEAD(free_me_list); 119 LIST_HEAD(free_me_list);
119 120
120 spin_lock(&clp->cl_lock); 121 spin_lock(&clp->cl_lock);
121 list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) { 122 rcu_read_lock();
122 if (nfs_compare_fh(&args->cbl_fh, 123 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
123 &NFS_I(lo->plh_inode)->fh)) 124 list_for_each_entry(lo, &server->layouts, plh_layouts) {
124 continue; 125 if (nfs_compare_fh(&args->cbl_fh,
125 ino = igrab(lo->plh_inode); 126 &NFS_I(lo->plh_inode)->fh))
126 if (!ino) 127 continue;
127 continue; 128 ino = igrab(lo->plh_inode);
128 found = true; 129 if (!ino)
129 /* Without this, layout can be freed as soon 130 continue;
130 * as we release cl_lock. 131 found = true;
131 */ 132 /* Without this, layout can be freed as soon
132 get_layout_hdr(lo); 133 * as we release cl_lock.
133 break; 134 */
135 get_layout_hdr(lo);
136 break;
137 }
138 if (found)
139 break;
134 } 140 }
141 rcu_read_unlock();
135 spin_unlock(&clp->cl_lock); 142 spin_unlock(&clp->cl_lock);
143
136 if (!found) 144 if (!found)
137 return NFS4ERR_NOMATCHING_LAYOUT; 145 return NFS4ERR_NOMATCHING_LAYOUT;
138 146
@@ -154,6 +162,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
154static u32 initiate_bulk_draining(struct nfs_client *clp, 162static u32 initiate_bulk_draining(struct nfs_client *clp,
155 struct cb_layoutrecallargs *args) 163 struct cb_layoutrecallargs *args)
156{ 164{
165 struct nfs_server *server;
157 struct pnfs_layout_hdr *lo; 166 struct pnfs_layout_hdr *lo;
158 struct inode *ino; 167 struct inode *ino;
159 u32 rv = NFS4ERR_NOMATCHING_LAYOUT; 168 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
@@ -167,18 +176,24 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
167 }; 176 };
168 177
169 spin_lock(&clp->cl_lock); 178 spin_lock(&clp->cl_lock);
170 list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) { 179 rcu_read_lock();
180 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
171 if ((args->cbl_recall_type == RETURN_FSID) && 181 if ((args->cbl_recall_type == RETURN_FSID) &&
172 memcmp(&NFS_SERVER(lo->plh_inode)->fsid, 182 memcmp(&server->fsid, &args->cbl_fsid,
173 &args->cbl_fsid, sizeof(struct nfs_fsid))) 183 sizeof(struct nfs_fsid)))
174 continue;
175 if (!igrab(lo->plh_inode))
176 continue; 184 continue;
177 get_layout_hdr(lo); 185
178 BUG_ON(!list_empty(&lo->plh_bulk_recall)); 186 list_for_each_entry(lo, &server->layouts, plh_layouts) {
179 list_add(&lo->plh_bulk_recall, &recall_list); 187 if (!igrab(lo->plh_inode))
188 continue;
189 get_layout_hdr(lo);
190 BUG_ON(!list_empty(&lo->plh_bulk_recall));
191 list_add(&lo->plh_bulk_recall, &recall_list);
192 }
180 } 193 }
194 rcu_read_unlock();
181 spin_unlock(&clp->cl_lock); 195 spin_unlock(&clp->cl_lock);
196
182 list_for_each_entry_safe(lo, tmp, 197 list_for_each_entry_safe(lo, tmp,
183 &recall_list, plh_bulk_recall) { 198 &recall_list, plh_bulk_recall) {
184 ino = lo->plh_inode; 199 ino = lo->plh_inode;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index b3dc2b88b65b..19ea7d9c75e6 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -188,9 +188,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
188 cred = rpc_lookup_machine_cred(); 188 cred = rpc_lookup_machine_cred();
189 if (!IS_ERR(cred)) 189 if (!IS_ERR(cred))
190 clp->cl_machine_cred = cred; 190 clp->cl_machine_cred = cred;
191#if defined(CONFIG_NFS_V4_1)
192 INIT_LIST_HEAD(&clp->cl_layouts);
193#endif
194 nfs_fscache_get_client_cookie(clp); 191 nfs_fscache_get_client_cookie(clp);
195 192
196 return clp; 193 return clp;
@@ -293,6 +290,7 @@ static void nfs_free_client(struct nfs_client *clp)
293 nfs4_deviceid_purge_client(clp); 290 nfs4_deviceid_purge_client(clp);
294 291
295 kfree(clp->cl_hostname); 292 kfree(clp->cl_hostname);
293 kfree(clp->server_scope);
296 kfree(clp); 294 kfree(clp);
297 295
298 dprintk("<-- nfs_free_client()\n"); 296 dprintk("<-- nfs_free_client()\n");
@@ -1062,6 +1060,7 @@ static struct nfs_server *nfs_alloc_server(void)
1062 INIT_LIST_HEAD(&server->client_link); 1060 INIT_LIST_HEAD(&server->client_link);
1063 INIT_LIST_HEAD(&server->master_link); 1061 INIT_LIST_HEAD(&server->master_link);
1064 INIT_LIST_HEAD(&server->delegations); 1062 INIT_LIST_HEAD(&server->delegations);
1063 INIT_LIST_HEAD(&server->layouts);
1065 1064
1066 atomic_set(&server->active, 0); 1065 atomic_set(&server->active, 0);
1067 1066
@@ -1464,7 +1463,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
1464 dprintk("<-- %s %p\n", __func__, clp); 1463 dprintk("<-- %s %p\n", __func__, clp);
1465 return clp; 1464 return clp;
1466} 1465}
1467EXPORT_SYMBOL(nfs4_set_ds_client); 1466EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
1468 1467
1469/* 1468/*
1470 * Session has been established, and the client marked ready. 1469 * Session has been established, and the client marked ready.
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index dd25c2aec375..321a66bc3846 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -398,12 +398,11 @@ int nfs_inode_return_delegation(struct inode *inode)
398 return err; 398 return err;
399} 399}
400 400
401static void nfs_mark_return_delegation(struct nfs_delegation *delegation) 401static void nfs_mark_return_delegation(struct nfs_server *server,
402 struct nfs_delegation *delegation)
402{ 403{
403 struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
404
405 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 404 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
406 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 405 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
407} 406}
408 407
409/** 408/**
@@ -441,7 +440,7 @@ static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
441 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE)) 440 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
442 continue; 441 continue;
443 if (delegation->type & flags) 442 if (delegation->type & flags)
444 nfs_mark_return_delegation(delegation); 443 nfs_mark_return_delegation(server, delegation);
445 } 444 }
446} 445}
447 446
@@ -508,7 +507,7 @@ static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
508 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 507 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
509 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) 508 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
510 continue; 509 continue;
511 nfs_mark_return_delegation(delegation); 510 nfs_mark_return_delegation(server, delegation);
512 } 511 }
513} 512}
514 513
@@ -539,7 +538,8 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
539int nfs_async_inode_return_delegation(struct inode *inode, 538int nfs_async_inode_return_delegation(struct inode *inode,
540 const nfs4_stateid *stateid) 539 const nfs4_stateid *stateid)
541{ 540{
542 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 541 struct nfs_server *server = NFS_SERVER(inode);
542 struct nfs_client *clp = server->nfs_client;
543 struct nfs_delegation *delegation; 543 struct nfs_delegation *delegation;
544 544
545 rcu_read_lock(); 545 rcu_read_lock();
@@ -549,7 +549,7 @@ int nfs_async_inode_return_delegation(struct inode *inode,
549 rcu_read_unlock(); 549 rcu_read_unlock();
550 return -ENOENT; 550 return -ENOENT;
551 } 551 }
552 nfs_mark_return_delegation(delegation); 552 nfs_mark_return_delegation(server, delegation);
553 rcu_read_unlock(); 553 rcu_read_unlock();
554 554
555 nfs_delegation_run_state_manager(clp); 555 nfs_delegation_run_state_manager(clp);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 2a55347a2daa..ab12913dd473 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -277,6 +277,9 @@ extern void nfs_sb_deactive(struct super_block *sb);
277extern char *nfs_path(char **p, struct dentry *dentry, 277extern char *nfs_path(char **p, struct dentry *dentry,
278 char *buffer, ssize_t buflen); 278 char *buffer, ssize_t buflen);
279extern struct vfsmount *nfs_d_automount(struct path *path); 279extern struct vfsmount *nfs_d_automount(struct path *path);
280#ifdef CONFIG_NFS_V4
281rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
282#endif
280 283
281/* getroot.c */ 284/* getroot.c */
282extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *, 285extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
@@ -288,12 +291,22 @@ extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
288extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh); 291extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
289#endif 292#endif
290 293
294struct nfs_pageio_descriptor;
291/* read.c */ 295/* read.c */
292extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt, 296extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
293 const struct rpc_call_ops *call_ops); 297 const struct rpc_call_ops *call_ops);
294extern void nfs_read_prepare(struct rpc_task *task, void *calldata); 298extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
299extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
300 struct list_head *head);
301
302extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
303extern void nfs_readdata_release(struct nfs_read_data *rdata);
295 304
296/* write.c */ 305/* write.c */
306extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
307 struct list_head *head);
308extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
309extern void nfs_writedata_release(struct nfs_write_data *wdata);
297extern void nfs_commit_free(struct nfs_write_data *p); 310extern void nfs_commit_free(struct nfs_write_data *p);
298extern int nfs_initiate_write(struct nfs_write_data *data, 311extern int nfs_initiate_write(struct nfs_write_data *data,
299 struct rpc_clnt *clnt, 312 struct rpc_clnt *clnt,
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 1f063bacd285..8102391bb374 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -119,7 +119,7 @@ Elong:
119} 119}
120 120
121#ifdef CONFIG_NFS_V4 121#ifdef CONFIG_NFS_V4
122static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors) 122rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
123{ 123{
124 struct gss_api_mech *mech; 124 struct gss_api_mech *mech;
125 struct xdr_netobj oid; 125 struct xdr_netobj oid;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index b788f2eb1ba0..1909ee8be350 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -48,6 +48,7 @@ enum nfs4_client_state {
48 NFS4CLNT_SESSION_RESET, 48 NFS4CLNT_SESSION_RESET,
49 NFS4CLNT_RECALL_SLOT, 49 NFS4CLNT_RECALL_SLOT,
50 NFS4CLNT_LEASE_CONFIRM, 50 NFS4CLNT_LEASE_CONFIRM,
51 NFS4CLNT_SERVER_SCOPE_MISMATCH,
51}; 52};
52 53
53enum nfs4_session_state { 54enum nfs4_session_state {
@@ -66,6 +67,8 @@ struct nfs4_minor_version_ops {
66 int cache_reply); 67 int cache_reply);
67 int (*validate_stateid)(struct nfs_delegation *, 68 int (*validate_stateid)(struct nfs_delegation *,
68 const nfs4_stateid *); 69 const nfs4_stateid *);
70 int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
71 struct nfs_fsinfo *);
69 const struct nfs4_state_recovery_ops *reboot_recovery_ops; 72 const struct nfs4_state_recovery_ops *reboot_recovery_ops;
70 const struct nfs4_state_recovery_ops *nograce_recovery_ops; 73 const struct nfs4_state_recovery_ops *nograce_recovery_ops;
71 const struct nfs4_state_maintenance_ops *state_renewal_ops; 74 const struct nfs4_state_maintenance_ops *state_renewal_ops;
@@ -349,6 +352,8 @@ extern void nfs4_schedule_state_manager(struct nfs_client *);
349extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); 352extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
350extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 353extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
351extern void nfs41_handle_recall_slot(struct nfs_client *clp); 354extern void nfs41_handle_recall_slot(struct nfs_client *clp);
355extern void nfs41_handle_server_scope(struct nfs_client *,
356 struct server_scope **);
352extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 357extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
353extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 358extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
354extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t); 359extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index f9d03abcd04c..be93a622872c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -334,6 +334,9 @@ filelayout_read_pagelist(struct nfs_read_data *data)
334 __func__, data->inode->i_ino, 334 __func__, data->inode->i_ino,
335 data->args.pgbase, (size_t)data->args.count, offset); 335 data->args.pgbase, (size_t)data->args.count, offset);
336 336
337 if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
338 return PNFS_NOT_ATTEMPTED;
339
337 /* Retrieve the correct rpc_client for the byte range */ 340 /* Retrieve the correct rpc_client for the byte range */
338 j = nfs4_fl_calc_j_index(lseg, offset); 341 j = nfs4_fl_calc_j_index(lseg, offset);
339 idx = nfs4_fl_calc_ds_index(lseg, j); 342 idx = nfs4_fl_calc_ds_index(lseg, j);
@@ -344,8 +347,7 @@ filelayout_read_pagelist(struct nfs_read_data *data)
344 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); 347 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
345 return PNFS_NOT_ATTEMPTED; 348 return PNFS_NOT_ATTEMPTED;
346 } 349 }
347 dprintk("%s USE DS:ip %x %hu\n", __func__, 350 dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
348 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
349 351
350 /* No multipath support. Use first DS */ 352 /* No multipath support. Use first DS */
351 data->ds_clp = ds->ds_clp; 353 data->ds_clp = ds->ds_clp;
@@ -374,6 +376,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
374 struct nfs_fh *fh; 376 struct nfs_fh *fh;
375 int status; 377 int status;
376 378
379 if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
380 return PNFS_NOT_ATTEMPTED;
381
377 /* Retrieve the correct rpc_client for the byte range */ 382 /* Retrieve the correct rpc_client for the byte range */
378 j = nfs4_fl_calc_j_index(lseg, offset); 383 j = nfs4_fl_calc_j_index(lseg, offset);
379 idx = nfs4_fl_calc_ds_index(lseg, j); 384 idx = nfs4_fl_calc_ds_index(lseg, j);
@@ -384,9 +389,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
384 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags); 389 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
385 return PNFS_NOT_ATTEMPTED; 390 return PNFS_NOT_ATTEMPTED;
386 } 391 }
387 dprintk("%s ino %lu sync %d req %Zu@%llu DS:%x:%hu\n", __func__, 392 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
388 data->inode->i_ino, sync, (size_t) data->args.count, offset, 393 data->inode->i_ino, sync, (size_t) data->args.count, offset,
389 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port)); 394 ds->ds_remotestr);
390 395
391 data->write_done_cb = filelayout_write_done_cb; 396 data->write_done_cb = filelayout_write_done_cb;
392 data->ds_clp = ds->ds_clp; 397 data->ds_clp = ds->ds_clp;
@@ -428,6 +433,14 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
428 433
429 dprintk("--> %s\n", __func__); 434 dprintk("--> %s\n", __func__);
430 435
436 /* FIXME: remove this check when layout segment support is added */
437 if (lgr->range.offset != 0 ||
438 lgr->range.length != NFS4_MAX_UINT64) {
439 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
440 __func__);
441 goto out;
442 }
443
431 if (fl->pattern_offset > lgr->range.offset) { 444 if (fl->pattern_offset > lgr->range.offset) {
432 dprintk("%s pattern_offset %lld too large\n", 445 dprintk("%s pattern_offset %lld too large\n",
433 __func__, fl->pattern_offset); 446 __func__, fl->pattern_offset);
@@ -449,6 +462,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
449 goto out; 462 goto out;
450 } else 463 } else
451 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node); 464 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
465 /* Found deviceid is being reaped */
466 if (test_bit(NFS_DEVICEID_INVALID, &dsaddr->id_node.flags))
467 goto out_put;
468
452 fl->dsaddr = dsaddr; 469 fl->dsaddr = dsaddr;
453 470
454 if (fl->first_stripe_index < 0 || 471 if (fl->first_stripe_index < 0 ||
@@ -659,7 +676,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
659 * return true : coalesce page 676 * return true : coalesce page
660 * return false : don't coalesce page 677 * return false : don't coalesce page
661 */ 678 */
662bool 679static bool
663filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 680filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
664 struct nfs_page *req) 681 struct nfs_page *req)
665{ 682{
@@ -670,8 +687,6 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
670 !nfs_generic_pg_test(pgio, prev, req)) 687 !nfs_generic_pg_test(pgio, prev, req))
671 return false; 688 return false;
672 689
673 if (!pgio->pg_lseg)
674 return 1;
675 p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT; 690 p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
676 r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT; 691 r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
677 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit; 692 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
@@ -682,6 +697,52 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
682 return (p_stripe == r_stripe); 697 return (p_stripe == r_stripe);
683} 698}
684 699
700void
701filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
702 struct nfs_page *req)
703{
704 BUG_ON(pgio->pg_lseg != NULL);
705
706 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
707 req->wb_context,
708 0,
709 NFS4_MAX_UINT64,
710 IOMODE_READ,
711 GFP_KERNEL);
712 /* If no lseg, fall back to read through mds */
713 if (pgio->pg_lseg == NULL)
714 nfs_pageio_reset_read_mds(pgio);
715}
716
717void
718filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
719 struct nfs_page *req)
720{
721 BUG_ON(pgio->pg_lseg != NULL);
722
723 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
724 req->wb_context,
725 0,
726 NFS4_MAX_UINT64,
727 IOMODE_RW,
728 GFP_NOFS);
729 /* If no lseg, fall back to write through mds */
730 if (pgio->pg_lseg == NULL)
731 nfs_pageio_reset_write_mds(pgio);
732}
733
734static const struct nfs_pageio_ops filelayout_pg_read_ops = {
735 .pg_init = filelayout_pg_init_read,
736 .pg_test = filelayout_pg_test,
737 .pg_doio = pnfs_generic_pg_readpages,
738};
739
740static const struct nfs_pageio_ops filelayout_pg_write_ops = {
741 .pg_init = filelayout_pg_init_write,
742 .pg_test = filelayout_pg_test,
743 .pg_doio = pnfs_generic_pg_writepages,
744};
745
685static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg) 746static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg)
686{ 747{
687 return !FILELAYOUT_LSEG(lseg)->commit_through_mds; 748 return !FILELAYOUT_LSEG(lseg)->commit_through_mds;
@@ -879,7 +940,8 @@ static struct pnfs_layoutdriver_type filelayout_type = {
879 .owner = THIS_MODULE, 940 .owner = THIS_MODULE,
880 .alloc_lseg = filelayout_alloc_lseg, 941 .alloc_lseg = filelayout_alloc_lseg,
881 .free_lseg = filelayout_free_lseg, 942 .free_lseg = filelayout_free_lseg,
882 .pg_test = filelayout_pg_test, 943 .pg_read_ops = &filelayout_pg_read_ops,
944 .pg_write_ops = &filelayout_pg_write_ops,
883 .mark_pnfs_commit = filelayout_mark_pnfs_commit, 945 .mark_pnfs_commit = filelayout_mark_pnfs_commit,
884 .choose_commit_list = filelayout_choose_commit_list, 946 .choose_commit_list = filelayout_choose_commit_list,
885 .commit_pagelist = filelayout_commit_pagelist, 947 .commit_pagelist = filelayout_commit_pagelist,
@@ -902,5 +964,7 @@ static void __exit nfs4filelayout_exit(void)
902 pnfs_unregister_layoutdriver(&filelayout_type); 964 pnfs_unregister_layoutdriver(&filelayout_type);
903} 965}
904 966
967MODULE_ALIAS("nfs-layouttype4-1");
968
905module_init(nfs4filelayout_init); 969module_init(nfs4filelayout_init);
906module_exit(nfs4filelayout_exit); 970module_exit(nfs4filelayout_exit);
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index cebe01e3795e..2e42284253fa 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -47,10 +47,17 @@ enum stripetype4 {
47}; 47};
48 48
49/* Individual ip address */ 49/* Individual ip address */
50struct nfs4_pnfs_ds_addr {
51 struct sockaddr_storage da_addr;
52 size_t da_addrlen;
53 struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */
54 char *da_remotestr; /* human readable addr+port */
55};
56
50struct nfs4_pnfs_ds { 57struct nfs4_pnfs_ds {
51 struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ 58 struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
52 u32 ds_ip_addr; 59 char *ds_remotestr; /* comma sep list of addrs */
53 u32 ds_port; 60 struct list_head ds_addrs;
54 struct nfs_client *ds_clp; 61 struct nfs_client *ds_clp;
55 atomic_t ds_count; 62 atomic_t ds_count;
56}; 63};
@@ -89,6 +96,12 @@ FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
89 generic_hdr); 96 generic_hdr);
90} 97}
91 98
99static inline struct nfs4_deviceid_node *
100FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
101{
102 return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
103}
104
92extern struct nfs_fh * 105extern struct nfs_fh *
93nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j); 106nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
94 107
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 3b7bf1377264..ed388aae9689 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -56,54 +56,139 @@ print_ds(struct nfs4_pnfs_ds *ds)
56 printk("%s NULL device\n", __func__); 56 printk("%s NULL device\n", __func__);
57 return; 57 return;
58 } 58 }
59 printk(" ip_addr %x port %hu\n" 59 printk(" ds %s\n"
60 " ref count %d\n" 60 " ref count %d\n"
61 " client %p\n" 61 " client %p\n"
62 " cl_exchange_flags %x\n", 62 " cl_exchange_flags %x\n",
63 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port), 63 ds->ds_remotestr,
64 atomic_read(&ds->ds_count), ds->ds_clp, 64 atomic_read(&ds->ds_count), ds->ds_clp,
65 ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); 65 ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
66} 66}
67 67
68/* nfs4_ds_cache_lock is held */ 68static bool
69static struct nfs4_pnfs_ds * 69same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
70_data_server_lookup_locked(u32 ip_addr, u32 port)
71{ 70{
72 struct nfs4_pnfs_ds *ds; 71 struct sockaddr_in *a, *b;
72 struct sockaddr_in6 *a6, *b6;
73
74 if (addr1->sa_family != addr2->sa_family)
75 return false;
76
77 switch (addr1->sa_family) {
78 case AF_INET:
79 a = (struct sockaddr_in *)addr1;
80 b = (struct sockaddr_in *)addr2;
81
82 if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
83 a->sin_port == b->sin_port)
84 return true;
85 break;
86
87 case AF_INET6:
88 a6 = (struct sockaddr_in6 *)addr1;
89 b6 = (struct sockaddr_in6 *)addr2;
90
91 /* LINKLOCAL addresses must have matching scope_id */
92 if (ipv6_addr_scope(&a6->sin6_addr) ==
93 IPV6_ADDR_SCOPE_LINKLOCAL &&
94 a6->sin6_scope_id != b6->sin6_scope_id)
95 return false;
96
97 if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
98 a6->sin6_port == b6->sin6_port)
99 return true;
100 break;
101
102 default:
103 dprintk("%s: unhandled address family: %u\n",
104 __func__, addr1->sa_family);
105 return false;
106 }
73 107
74 dprintk("_data_server_lookup: ip_addr=%x port=%hu\n", 108 return false;
75 ntohl(ip_addr), ntohs(port)); 109}
76 110
77 list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) { 111/*
78 if (ds->ds_ip_addr == ip_addr && 112 * Lookup DS by addresses. The first matching address returns true.
79 ds->ds_port == port) { 113 * nfs4_ds_cache_lock is held
80 return ds; 114 */
115static struct nfs4_pnfs_ds *
116_data_server_lookup_locked(struct list_head *dsaddrs)
117{
118 struct nfs4_pnfs_ds *ds;
119 struct nfs4_pnfs_ds_addr *da1, *da2;
120
121 list_for_each_entry(da1, dsaddrs, da_node) {
122 list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) {
123 list_for_each_entry(da2, &ds->ds_addrs, da_node) {
124 if (same_sockaddr(
125 (struct sockaddr *)&da1->da_addr,
126 (struct sockaddr *)&da2->da_addr))
127 return ds;
128 }
81 } 129 }
82 } 130 }
83 return NULL; 131 return NULL;
84} 132}
85 133
86/* 134/*
135 * Compare two lists of addresses.
136 */
137static bool
138_data_server_match_all_addrs_locked(struct list_head *dsaddrs1,
139 struct list_head *dsaddrs2)
140{
141 struct nfs4_pnfs_ds_addr *da1, *da2;
142 size_t count1 = 0,
143 count2 = 0;
144
145 list_for_each_entry(da1, dsaddrs1, da_node)
146 count1++;
147
148 list_for_each_entry(da2, dsaddrs2, da_node) {
149 bool found = false;
150 count2++;
151 list_for_each_entry(da1, dsaddrs1, da_node) {
152 if (same_sockaddr((struct sockaddr *)&da1->da_addr,
153 (struct sockaddr *)&da2->da_addr)) {
154 found = true;
155 break;
156 }
157 }
158 if (!found)
159 return false;
160 }
161
162 return (count1 == count2);
163}
164
165/*
87 * Create an rpc connection to the nfs4_pnfs_ds data server 166 * Create an rpc connection to the nfs4_pnfs_ds data server
88 * Currently only support IPv4 167 * Currently only supports IPv4 and IPv6 addresses
89 */ 168 */
90static int 169static int
91nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds) 170nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
92{ 171{
93 struct nfs_client *clp; 172 struct nfs_client *clp = ERR_PTR(-EIO);
94 struct sockaddr_in sin; 173 struct nfs4_pnfs_ds_addr *da;
95 int status = 0; 174 int status = 0;
96 175
97 dprintk("--> %s ip:port %x:%hu au_flavor %d\n", __func__, 176 dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
98 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port),
99 mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); 177 mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
100 178
101 sin.sin_family = AF_INET; 179 BUG_ON(list_empty(&ds->ds_addrs));
102 sin.sin_addr.s_addr = ds->ds_ip_addr; 180
103 sin.sin_port = ds->ds_port; 181 list_for_each_entry(da, &ds->ds_addrs, da_node) {
182 dprintk("%s: DS %s: trying address %s\n",
183 __func__, ds->ds_remotestr, da->da_remotestr);
184
185 clp = nfs4_set_ds_client(mds_srv->nfs_client,
186 (struct sockaddr *)&da->da_addr,
187 da->da_addrlen, IPPROTO_TCP);
188 if (!IS_ERR(clp))
189 break;
190 }
104 191
105 clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&sin,
106 sizeof(sin), IPPROTO_TCP);
107 if (IS_ERR(clp)) { 192 if (IS_ERR(clp)) {
108 status = PTR_ERR(clp); 193 status = PTR_ERR(clp);
109 goto out; 194 goto out;
@@ -115,8 +200,8 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
115 goto out_put; 200 goto out_put;
116 } 201 }
117 ds->ds_clp = clp; 202 ds->ds_clp = clp;
118 dprintk("%s [existing] ip=%x, port=%hu\n", __func__, 203 dprintk("%s [existing] server=%s\n", __func__,
119 ntohl(ds->ds_ip_addr), ntohs(ds->ds_port)); 204 ds->ds_remotestr);
120 goto out; 205 goto out;
121 } 206 }
122 207
@@ -135,8 +220,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
135 goto out_put; 220 goto out_put;
136 221
137 ds->ds_clp = clp; 222 ds->ds_clp = clp;
138 dprintk("%s [new] ip=%x, port=%hu\n", __func__, ntohl(ds->ds_ip_addr), 223 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
139 ntohs(ds->ds_port));
140out: 224out:
141 return status; 225 return status;
142out_put: 226out_put:
@@ -147,12 +231,25 @@ out_put:
147static void 231static void
148destroy_ds(struct nfs4_pnfs_ds *ds) 232destroy_ds(struct nfs4_pnfs_ds *ds)
149{ 233{
234 struct nfs4_pnfs_ds_addr *da;
235
150 dprintk("--> %s\n", __func__); 236 dprintk("--> %s\n", __func__);
151 ifdebug(FACILITY) 237 ifdebug(FACILITY)
152 print_ds(ds); 238 print_ds(ds);
153 239
154 if (ds->ds_clp) 240 if (ds->ds_clp)
155 nfs_put_client(ds->ds_clp); 241 nfs_put_client(ds->ds_clp);
242
243 while (!list_empty(&ds->ds_addrs)) {
244 da = list_first_entry(&ds->ds_addrs,
245 struct nfs4_pnfs_ds_addr,
246 da_node);
247 list_del_init(&da->da_node);
248 kfree(da->da_remotestr);
249 kfree(da);
250 }
251
252 kfree(ds->ds_remotestr);
156 kfree(ds); 253 kfree(ds);
157} 254}
158 255
@@ -179,31 +276,96 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
179 kfree(dsaddr); 276 kfree(dsaddr);
180} 277}
181 278
279/*
280 * Create a string with a human readable address and port to avoid
281 * complicated setup around many dprinks.
282 */
283static char *
284nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
285{
286 struct nfs4_pnfs_ds_addr *da;
287 char *remotestr;
288 size_t len;
289 char *p;
290
291 len = 3; /* '{', '}' and eol */
292 list_for_each_entry(da, dsaddrs, da_node) {
293 len += strlen(da->da_remotestr) + 1; /* string plus comma */
294 }
295
296 remotestr = kzalloc(len, gfp_flags);
297 if (!remotestr)
298 return NULL;
299
300 p = remotestr;
301 *(p++) = '{';
302 len--;
303 list_for_each_entry(da, dsaddrs, da_node) {
304 size_t ll = strlen(da->da_remotestr);
305
306 if (ll > len)
307 goto out_err;
308
309 memcpy(p, da->da_remotestr, ll);
310 p += ll;
311 len -= ll;
312
313 if (len < 1)
314 goto out_err;
315 (*p++) = ',';
316 len--;
317 }
318 if (len < 2)
319 goto out_err;
320 *(p++) = '}';
321 *p = '\0';
322 return remotestr;
323out_err:
324 kfree(remotestr);
325 return NULL;
326}
327
182static struct nfs4_pnfs_ds * 328static struct nfs4_pnfs_ds *
183nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) 329nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
184{ 330{
185 struct nfs4_pnfs_ds *tmp_ds, *ds; 331 struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
332 char *remotestr;
186 333
187 ds = kzalloc(sizeof(*tmp_ds), gfp_flags); 334 if (list_empty(dsaddrs)) {
335 dprintk("%s: no addresses defined\n", __func__);
336 goto out;
337 }
338
339 ds = kzalloc(sizeof(*ds), gfp_flags);
188 if (!ds) 340 if (!ds)
189 goto out; 341 goto out;
190 342
343 /* this is only used for debugging, so it's ok if its NULL */
344 remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
345
191 spin_lock(&nfs4_ds_cache_lock); 346 spin_lock(&nfs4_ds_cache_lock);
192 tmp_ds = _data_server_lookup_locked(ip_addr, port); 347 tmp_ds = _data_server_lookup_locked(dsaddrs);
193 if (tmp_ds == NULL) { 348 if (tmp_ds == NULL) {
194 ds->ds_ip_addr = ip_addr; 349 INIT_LIST_HEAD(&ds->ds_addrs);
195 ds->ds_port = port; 350 list_splice_init(dsaddrs, &ds->ds_addrs);
351 ds->ds_remotestr = remotestr;
196 atomic_set(&ds->ds_count, 1); 352 atomic_set(&ds->ds_count, 1);
197 INIT_LIST_HEAD(&ds->ds_node); 353 INIT_LIST_HEAD(&ds->ds_node);
198 ds->ds_clp = NULL; 354 ds->ds_clp = NULL;
199 list_add(&ds->ds_node, &nfs4_data_server_cache); 355 list_add(&ds->ds_node, &nfs4_data_server_cache);
200 dprintk("%s add new data server ip 0x%x\n", __func__, 356 dprintk("%s add new data server %s\n", __func__,
201 ds->ds_ip_addr); 357 ds->ds_remotestr);
202 } else { 358 } else {
359 if (!_data_server_match_all_addrs_locked(&tmp_ds->ds_addrs,
360 dsaddrs)) {
361 dprintk("%s: multipath address mismatch: %s != %s",
362 __func__, tmp_ds->ds_remotestr, remotestr);
363 }
364 kfree(remotestr);
203 kfree(ds); 365 kfree(ds);
204 atomic_inc(&tmp_ds->ds_count); 366 atomic_inc(&tmp_ds->ds_count);
205 dprintk("%s data server found ip 0x%x, inc'ed ds_count to %d\n", 367 dprintk("%s data server %s found, inc'ed ds_count to %d\n",
206 __func__, tmp_ds->ds_ip_addr, 368 __func__, tmp_ds->ds_remotestr,
207 atomic_read(&tmp_ds->ds_count)); 369 atomic_read(&tmp_ds->ds_count));
208 ds = tmp_ds; 370 ds = tmp_ds;
209 } 371 }
@@ -213,18 +375,22 @@ out:
213} 375}
214 376
215/* 377/*
216 * Currently only support ipv4, and one multi-path address. 378 * Currently only supports ipv4, ipv6 and one multi-path address.
217 */ 379 */
218static struct nfs4_pnfs_ds * 380static struct nfs4_pnfs_ds_addr *
219decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) 381decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags)
220{ 382{
221 struct nfs4_pnfs_ds *ds = NULL; 383 struct nfs4_pnfs_ds_addr *da = NULL;
222 char *buf; 384 char *buf, *portstr;
223 const char *ipend, *pstr; 385 u32 port;
224 u32 ip_addr, port; 386 int nlen, rlen;
225 int nlen, rlen, i;
226 int tmp[2]; 387 int tmp[2];
227 __be32 *p; 388 __be32 *p;
389 char *netid, *match_netid;
390 size_t len, match_netid_len;
391 char *startsep = "";
392 char *endsep = "";
393
228 394
229 /* r_netid */ 395 /* r_netid */
230 p = xdr_inline_decode(streamp, 4); 396 p = xdr_inline_decode(streamp, 4);
@@ -236,64 +402,123 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_fla
236 if (unlikely(!p)) 402 if (unlikely(!p))
237 goto out_err; 403 goto out_err;
238 404
239 /* Check that netid is "tcp" */ 405 netid = kmalloc(nlen+1, gfp_flags);
240 if (nlen != 3 || memcmp((char *)p, "tcp", 3)) { 406 if (unlikely(!netid))
241 dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__);
242 goto out_err; 407 goto out_err;
243 }
244 408
245 /* r_addr */ 409 netid[nlen] = '\0';
410 memcpy(netid, p, nlen);
411
412 /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
246 p = xdr_inline_decode(streamp, 4); 413 p = xdr_inline_decode(streamp, 4);
247 if (unlikely(!p)) 414 if (unlikely(!p))
248 goto out_err; 415 goto out_free_netid;
249 rlen = be32_to_cpup(p); 416 rlen = be32_to_cpup(p);
250 417
251 p = xdr_inline_decode(streamp, rlen); 418 p = xdr_inline_decode(streamp, rlen);
252 if (unlikely(!p)) 419 if (unlikely(!p))
253 goto out_err; 420 goto out_free_netid;
254 421
255 /* ipv6 length plus port is legal */ 422 /* port is ".ABC.DEF", 8 chars max */
256 if (rlen > INET6_ADDRSTRLEN + 8) { 423 if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
257 dprintk("%s: Invalid address, length %d\n", __func__, 424 dprintk("%s: Invalid address, length %d\n", __func__,
258 rlen); 425 rlen);
259 goto out_err; 426 goto out_free_netid;
260 } 427 }
261 buf = kmalloc(rlen + 1, gfp_flags); 428 buf = kmalloc(rlen + 1, gfp_flags);
262 if (!buf) { 429 if (!buf) {
263 dprintk("%s: Not enough memory\n", __func__); 430 dprintk("%s: Not enough memory\n", __func__);
264 goto out_err; 431 goto out_free_netid;
265 } 432 }
266 buf[rlen] = '\0'; 433 buf[rlen] = '\0';
267 memcpy(buf, p, rlen); 434 memcpy(buf, p, rlen);
268 435
269 /* replace the port dots with dashes for the in4_pton() delimiter*/ 436 /* replace port '.' with '-' */
270 for (i = 0; i < 2; i++) { 437 portstr = strrchr(buf, '.');
271 char *res = strrchr(buf, '.'); 438 if (!portstr) {
272 if (!res) { 439 dprintk("%s: Failed finding expected dot in port\n",
273 dprintk("%s: Failed finding expected dots in port\n", 440 __func__);
274 __func__); 441 goto out_free_buf;
275 goto out_free; 442 }
276 } 443 *portstr = '-';
277 *res = '-'; 444
445 /* find '.' between address and port */
446 portstr = strrchr(buf, '.');
447 if (!portstr) {
448 dprintk("%s: Failed finding expected dot between address and "
449 "port\n", __func__);
450 goto out_free_buf;
278 } 451 }
452 *portstr = '\0';
279 453
280 /* Currently only support ipv4 address */ 454 da = kzalloc(sizeof(*da), gfp_flags);
281 if (in4_pton(buf, rlen, (u8 *)&ip_addr, '-', &ipend) == 0) { 455 if (unlikely(!da))
282 dprintk("%s: Only ipv4 addresses supported\n", __func__); 456 goto out_free_buf;
283 goto out_free; 457
458 INIT_LIST_HEAD(&da->da_node);
459
460 if (!rpc_pton(buf, portstr-buf, (struct sockaddr *)&da->da_addr,
461 sizeof(da->da_addr))) {
462 dprintk("%s: error parsing address %s\n", __func__, buf);
463 goto out_free_da;
284 } 464 }
285 465
286 /* port */ 466 portstr++;
287 pstr = ipend; 467 sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
288 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
289 port = htons((tmp[0] << 8) | (tmp[1])); 468 port = htons((tmp[0] << 8) | (tmp[1]));
290 469
291 ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); 470 switch (da->da_addr.ss_family) {
292 dprintk("%s: Decoded address and port %s\n", __func__, buf); 471 case AF_INET:
293out_free: 472 ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
473 da->da_addrlen = sizeof(struct sockaddr_in);
474 match_netid = "tcp";
475 match_netid_len = 3;
476 break;
477
478 case AF_INET6:
479 ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
480 da->da_addrlen = sizeof(struct sockaddr_in6);
481 match_netid = "tcp6";
482 match_netid_len = 4;
483 startsep = "[";
484 endsep = "]";
485 break;
486
487 default:
488 dprintk("%s: unsupported address family: %u\n",
489 __func__, da->da_addr.ss_family);
490 goto out_free_da;
491 }
492
493 if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
494 dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
495 __func__, netid, match_netid);
496 goto out_free_da;
497 }
498
499 /* save human readable address */
500 len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
501 da->da_remotestr = kzalloc(len, gfp_flags);
502
503 /* NULL is ok, only used for dprintk */
504 if (da->da_remotestr)
505 snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
506 buf, endsep, ntohs(port));
507
508 dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
294 kfree(buf); 509 kfree(buf);
510 kfree(netid);
511 return da;
512
513out_free_da:
514 kfree(da);
515out_free_buf:
516 dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
517 kfree(buf);
518out_free_netid:
519 kfree(netid);
295out_err: 520out_err:
296 return ds; 521 return NULL;
297} 522}
298 523
299/* Decode opaque device data and return the result */ 524/* Decode opaque device data and return the result */
@@ -310,6 +535,8 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
310 struct xdr_stream stream; 535 struct xdr_stream stream;
311 struct xdr_buf buf; 536 struct xdr_buf buf;
312 struct page *scratch; 537 struct page *scratch;
538 struct list_head dsaddrs;
539 struct nfs4_pnfs_ds_addr *da;
313 540
314 /* set up xdr stream */ 541 /* set up xdr stream */
315 scratch = alloc_page(gfp_flags); 542 scratch = alloc_page(gfp_flags);
@@ -386,6 +613,8 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
386 NFS_SERVER(ino)->nfs_client, 613 NFS_SERVER(ino)->nfs_client,
387 &pdev->dev_id); 614 &pdev->dev_id);
388 615
616 INIT_LIST_HEAD(&dsaddrs);
617
389 for (i = 0; i < dsaddr->ds_num; i++) { 618 for (i = 0; i < dsaddr->ds_num; i++) {
390 int j; 619 int j;
391 u32 mp_count; 620 u32 mp_count;
@@ -395,48 +624,43 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
395 goto out_err_free_deviceid; 624 goto out_err_free_deviceid;
396 625
397 mp_count = be32_to_cpup(p); /* multipath count */ 626 mp_count = be32_to_cpup(p); /* multipath count */
398 if (mp_count > 1) {
399 printk(KERN_WARNING
400 "%s: Multipath count %d not supported, "
401 "skipping all greater than 1\n", __func__,
402 mp_count);
403 }
404 for (j = 0; j < mp_count; j++) { 627 for (j = 0; j < mp_count; j++) {
405 if (j == 0) { 628 da = decode_ds_addr(&stream, gfp_flags);
406 dsaddr->ds_list[i] = decode_and_add_ds(&stream, 629 if (da)
407 ino, gfp_flags); 630 list_add_tail(&da->da_node, &dsaddrs);
408 if (dsaddr->ds_list[i] == NULL) 631 }
409 goto out_err_free_deviceid; 632 if (list_empty(&dsaddrs)) {
410 } else { 633 dprintk("%s: no suitable DS addresses found\n",
411 u32 len; 634 __func__);
412 /* skip extra multipath */ 635 goto out_err_free_deviceid;
413 636 }
414 /* read len, skip */ 637
415 p = xdr_inline_decode(&stream, 4); 638 dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
416 if (unlikely(!p)) 639 if (!dsaddr->ds_list[i])
417 goto out_err_free_deviceid; 640 goto out_err_drain_dsaddrs;
418 len = be32_to_cpup(p); 641
419 642 /* If DS was already in cache, free ds addrs */
420 p = xdr_inline_decode(&stream, len); 643 while (!list_empty(&dsaddrs)) {
421 if (unlikely(!p)) 644 da = list_first_entry(&dsaddrs,
422 goto out_err_free_deviceid; 645 struct nfs4_pnfs_ds_addr,
423 646 da_node);
424 /* read len, skip */ 647 list_del_init(&da->da_node);
425 p = xdr_inline_decode(&stream, 4); 648 kfree(da->da_remotestr);
426 if (unlikely(!p)) 649 kfree(da);
427 goto out_err_free_deviceid;
428 len = be32_to_cpup(p);
429
430 p = xdr_inline_decode(&stream, len);
431 if (unlikely(!p))
432 goto out_err_free_deviceid;
433 }
434 } 650 }
435 } 651 }
436 652
437 __free_page(scratch); 653 __free_page(scratch);
438 return dsaddr; 654 return dsaddr;
439 655
656out_err_drain_dsaddrs:
657 while (!list_empty(&dsaddrs)) {
658 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
659 da_node);
660 list_del_init(&da->da_node);
661 kfree(da->da_remotestr);
662 kfree(da);
663 }
440out_err_free_deviceid: 664out_err_free_deviceid:
441 nfs4_fl_free_deviceid(dsaddr); 665 nfs4_fl_free_deviceid(dsaddr);
442 /* stripe_indicies was part of dsaddr */ 666 /* stripe_indicies was part of dsaddr */
@@ -591,13 +815,13 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
591 815
592static void 816static void
593filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr, 817filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
594 int err, u32 ds_addr) 818 int err, const char *ds_remotestr)
595{ 819{
596 u32 *p = (u32 *)&dsaddr->id_node.deviceid; 820 u32 *p = (u32 *)&dsaddr->id_node.deviceid;
597 821
598 printk(KERN_ERR "NFS: data server %x connection error %d." 822 printk(KERN_ERR "NFS: data server %s connection error %d."
599 " Deviceid [%x%x%x%x] marked out of use.\n", 823 " Deviceid [%x%x%x%x] marked out of use.\n",
600 ds_addr, err, p[0], p[1], p[2], p[3]); 824 ds_remotestr, err, p[0], p[1], p[2], p[3]);
601 825
602 spin_lock(&nfs4_ds_cache_lock); 826 spin_lock(&nfs4_ds_cache_lock);
603 dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY; 827 dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
@@ -628,7 +852,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
628 err = nfs4_ds_connect(s, ds); 852 err = nfs4_ds_connect(s, ds);
629 if (err) { 853 if (err) {
630 filelayout_mark_devid_negative(dsaddr, err, 854 filelayout_mark_devid_negative(dsaddr, err,
631 ntohl(ds->ds_ip_addr)); 855 ds->ds_remotestr);
632 return NULL; 856 return NULL;
633 } 857 }
634 } 858 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 26bece8f3083..079614deca3f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -80,7 +80,10 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
80static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 80static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
81 struct nfs_fattr *fattr, struct iattr *sattr, 81 struct nfs_fattr *fattr, struct iattr *sattr,
82 struct nfs4_state *state); 82 struct nfs4_state *state);
83 83#ifdef CONFIG_NFS_V4_1
84static int nfs41_test_stateid(struct nfs_server *, struct nfs4_state *);
85static int nfs41_free_stateid(struct nfs_server *, struct nfs4_state *);
86#endif
84/* Prevent leaks of NFSv4 errors into userland */ 87/* Prevent leaks of NFSv4 errors into userland */
85static int nfs4_map_errors(int err) 88static int nfs4_map_errors(int err)
86{ 89{
@@ -1689,6 +1692,20 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
1689 return ret; 1692 return ret;
1690} 1693}
1691 1694
1695#if defined(CONFIG_NFS_V4_1)
1696static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1697{
1698 int status;
1699 struct nfs_server *server = NFS_SERVER(state->inode);
1700
1701 status = nfs41_test_stateid(server, state);
1702 if (status == NFS_OK)
1703 return 0;
1704 nfs41_free_stateid(server, state);
1705 return nfs4_open_expired(sp, state);
1706}
1707#endif
1708
1692/* 1709/*
1693 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1710 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1694 * fields corresponding to attributes that were used to store the verifier. 1711 * fields corresponding to attributes that were used to store the verifier.
@@ -2252,13 +2269,14 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2252static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, 2269static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2253 struct nfs_fsinfo *info) 2270 struct nfs_fsinfo *info)
2254{ 2271{
2272 int minor_version = server->nfs_client->cl_minorversion;
2255 int status = nfs4_lookup_root(server, fhandle, info); 2273 int status = nfs4_lookup_root(server, fhandle, info);
2256 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2274 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2257 /* 2275 /*
2258 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2276 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2259 * by nfs4_map_errors() as this function exits. 2277 * by nfs4_map_errors() as this function exits.
2260 */ 2278 */
2261 status = nfs4_find_root_sec(server, fhandle, info); 2279 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2262 if (status == 0) 2280 if (status == 0)
2263 status = nfs4_server_capabilities(server, fhandle); 2281 status = nfs4_server_capabilities(server, fhandle);
2264 if (status == 0) 2282 if (status == 0)
@@ -4441,6 +4459,20 @@ out:
4441 return err; 4459 return err;
4442} 4460}
4443 4461
4462#if defined(CONFIG_NFS_V4_1)
4463static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4464{
4465 int status;
4466 struct nfs_server *server = NFS_SERVER(state->inode);
4467
4468 status = nfs41_test_stateid(server, state);
4469 if (status == NFS_OK)
4470 return 0;
4471 nfs41_free_stateid(server, state);
4472 return nfs4_lock_expired(state, request);
4473}
4474#endif
4475
4444static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4476static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4445{ 4477{
4446 struct nfs_inode *nfsi = NFS_I(state->inode); 4478 struct nfs_inode *nfsi = NFS_I(state->inode);
@@ -4779,6 +4811,16 @@ out_inval:
4779 return -NFS4ERR_INVAL; 4811 return -NFS4ERR_INVAL;
4780} 4812}
4781 4813
4814static bool
4815nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
4816{
4817 if (a->server_scope_sz == b->server_scope_sz &&
4818 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
4819 return true;
4820
4821 return false;
4822}
4823
4782/* 4824/*
4783 * nfs4_proc_exchange_id() 4825 * nfs4_proc_exchange_id()
4784 * 4826 *
@@ -4821,9 +4863,31 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4821 init_utsname()->domainname, 4863 init_utsname()->domainname,
4822 clp->cl_rpcclient->cl_auth->au_flavor); 4864 clp->cl_rpcclient->cl_auth->au_flavor);
4823 4865
4866 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
4867 if (unlikely(!res.server_scope))
4868 return -ENOMEM;
4869
4824 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4870 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4825 if (!status) 4871 if (!status)
4826 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); 4872 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
4873
4874 if (!status) {
4875 if (clp->server_scope &&
4876 !nfs41_same_server_scope(clp->server_scope,
4877 res.server_scope)) {
4878 dprintk("%s: server_scope mismatch detected\n",
4879 __func__);
4880 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
4881 kfree(clp->server_scope);
4882 clp->server_scope = NULL;
4883 }
4884
4885 if (!clp->server_scope)
4886 clp->server_scope = res.server_scope;
4887 else
4888 kfree(res.server_scope);
4889 }
4890
4827 dprintk("<-- %s status= %d\n", __func__, status); 4891 dprintk("<-- %s status= %d\n", __func__, status);
4828 return status; 4892 return status;
4829} 4893}
@@ -5704,7 +5768,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
5704{ 5768{
5705 struct nfs4_layoutreturn *lrp = calldata; 5769 struct nfs4_layoutreturn *lrp = calldata;
5706 struct nfs_server *server; 5770 struct nfs_server *server;
5707 struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout; 5771 struct pnfs_layout_hdr *lo = lrp->args.layout;
5708 5772
5709 dprintk("--> %s\n", __func__); 5773 dprintk("--> %s\n", __func__);
5710 5774
@@ -5733,7 +5797,7 @@ static void nfs4_layoutreturn_release(void *calldata)
5733 struct nfs4_layoutreturn *lrp = calldata; 5797 struct nfs4_layoutreturn *lrp = calldata;
5734 5798
5735 dprintk("--> %s\n", __func__); 5799 dprintk("--> %s\n", __func__);
5736 put_layout_hdr(NFS_I(lrp->args.inode)->layout); 5800 put_layout_hdr(lrp->args.layout);
5737 kfree(calldata); 5801 kfree(calldata);
5738 dprintk("<-- %s\n", __func__); 5802 dprintk("<-- %s\n", __func__);
5739} 5803}
@@ -5901,6 +5965,143 @@ out:
5901 rpc_put_task(task); 5965 rpc_put_task(task);
5902 return status; 5966 return status;
5903} 5967}
5968
5969static int
5970_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
5971 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
5972{
5973 struct nfs41_secinfo_no_name_args args = {
5974 .style = SECINFO_STYLE_CURRENT_FH,
5975 };
5976 struct nfs4_secinfo_res res = {
5977 .flavors = flavors,
5978 };
5979 struct rpc_message msg = {
5980 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
5981 .rpc_argp = &args,
5982 .rpc_resp = &res,
5983 };
5984 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5985}
5986
5987static int
5988nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
5989 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
5990{
5991 struct nfs4_exception exception = { };
5992 int err;
5993 do {
5994 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
5995 switch (err) {
5996 case 0:
5997 case -NFS4ERR_WRONGSEC:
5998 case -NFS4ERR_NOTSUPP:
5999 break;
6000 default:
6001 err = nfs4_handle_exception(server, err, &exception);
6002 }
6003 } while (exception.retry);
6004 return err;
6005}
6006
6007static int
6008nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6009 struct nfs_fsinfo *info)
6010{
6011 int err;
6012 struct page *page;
6013 rpc_authflavor_t flavor;
6014 struct nfs4_secinfo_flavors *flavors;
6015
6016 page = alloc_page(GFP_KERNEL);
6017 if (!page) {
6018 err = -ENOMEM;
6019 goto out;
6020 }
6021
6022 flavors = page_address(page);
6023 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6024
6025 /*
6026 * Fall back on "guess and check" method if
6027 * the server doesn't support SECINFO_NO_NAME
6028 */
6029 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6030 err = nfs4_find_root_sec(server, fhandle, info);
6031 goto out_freepage;
6032 }
6033 if (err)
6034 goto out_freepage;
6035
6036 flavor = nfs_find_best_sec(flavors);
6037 if (err == 0)
6038 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6039
6040out_freepage:
6041 put_page(page);
6042 if (err == -EACCES)
6043 return -EPERM;
6044out:
6045 return err;
6046}
6047static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
6048{
6049 int status;
6050 struct nfs41_test_stateid_args args = {
6051 .stateid = &state->stateid,
6052 };
6053 struct nfs41_test_stateid_res res;
6054 struct rpc_message msg = {
6055 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6056 .rpc_argp = &args,
6057 .rpc_resp = &res,
6058 };
6059 args.seq_args.sa_session = res.seq_res.sr_session = NULL;
6060 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
6061 return status;
6062}
6063
6064static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
6065{
6066 struct nfs4_exception exception = { };
6067 int err;
6068 do {
6069 err = nfs4_handle_exception(server,
6070 _nfs41_test_stateid(server, state),
6071 &exception);
6072 } while (exception.retry);
6073 return err;
6074}
6075
6076static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state)
6077{
6078 int status;
6079 struct nfs41_free_stateid_args args = {
6080 .stateid = &state->stateid,
6081 };
6082 struct nfs41_free_stateid_res res;
6083 struct rpc_message msg = {
6084 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6085 .rpc_argp = &args,
6086 .rpc_resp = &res,
6087 };
6088
6089 args.seq_args.sa_session = res.seq_res.sr_session = NULL;
6090 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
6091 return status;
6092}
6093
6094static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state)
6095{
6096 struct nfs4_exception exception = { };
6097 int err;
6098 do {
6099 err = nfs4_handle_exception(server,
6100 _nfs4_free_stateid(server, state),
6101 &exception);
6102 } while (exception.retry);
6103 return err;
6104}
5904#endif /* CONFIG_NFS_V4_1 */ 6105#endif /* CONFIG_NFS_V4_1 */
5905 6106
5906struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6107struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -5937,8 +6138,8 @@ struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
5937struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 6138struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
5938 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6139 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
5939 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6140 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
5940 .recover_open = nfs4_open_expired, 6141 .recover_open = nfs41_open_expired,
5941 .recover_lock = nfs4_lock_expired, 6142 .recover_lock = nfs41_lock_expired,
5942 .establish_clid = nfs41_init_clientid, 6143 .establish_clid = nfs41_init_clientid,
5943 .get_clid_cred = nfs4_get_exchange_id_cred, 6144 .get_clid_cred = nfs4_get_exchange_id_cred,
5944}; 6145};
@@ -5962,6 +6163,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
5962 .minor_version = 0, 6163 .minor_version = 0,
5963 .call_sync = _nfs4_call_sync, 6164 .call_sync = _nfs4_call_sync,
5964 .validate_stateid = nfs4_validate_delegation_stateid, 6165 .validate_stateid = nfs4_validate_delegation_stateid,
6166 .find_root_sec = nfs4_find_root_sec,
5965 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 6167 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
5966 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 6168 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
5967 .state_renewal_ops = &nfs40_state_renewal_ops, 6169 .state_renewal_ops = &nfs40_state_renewal_ops,
@@ -5972,6 +6174,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
5972 .minor_version = 1, 6174 .minor_version = 1,
5973 .call_sync = _nfs4_call_sync_session, 6175 .call_sync = _nfs4_call_sync_session,
5974 .validate_stateid = nfs41_validate_delegation_stateid, 6176 .validate_stateid = nfs41_validate_delegation_stateid,
6177 .find_root_sec = nfs41_find_root_sec,
5975 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 6178 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
5976 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 6179 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
5977 .state_renewal_ops = &nfs41_state_renewal_ops, 6180 .state_renewal_ops = &nfs41_state_renewal_ops,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7acfe8843626..72ab97ef3d61 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1643,7 +1643,14 @@ static void nfs4_state_manager(struct nfs_client *clp)
1643 goto out_error; 1643 goto out_error;
1644 } 1644 }
1645 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1645 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1646 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); 1646
1647 if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
1648 &clp->cl_state))
1649 nfs4_state_start_reclaim_nograce(clp);
1650 else
1651 set_bit(NFS4CLNT_RECLAIM_REBOOT,
1652 &clp->cl_state);
1653
1647 pnfs_destroy_all_layouts(clp); 1654 pnfs_destroy_all_layouts(clp);
1648 } 1655 }
1649 1656
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e6e8f3b9a1de..c191a9baa422 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -343,6 +343,14 @@ static int nfs4_stat_to_errno(int);
343 1 /* FIXME: opaque lrf_body always empty at the moment */) 343 1 /* FIXME: opaque lrf_body always empty at the moment */)
344#define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \ 344#define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \
345 1 + decode_stateid_maxsz) 345 1 + decode_stateid_maxsz)
346#define encode_secinfo_no_name_maxsz (op_encode_hdr_maxsz + 1)
347#define decode_secinfo_no_name_maxsz decode_secinfo_maxsz
348#define encode_test_stateid_maxsz (op_encode_hdr_maxsz + 2 + \
349 XDR_QUADLEN(NFS4_STATEID_SIZE))
350#define decode_test_stateid_maxsz (op_decode_hdr_maxsz + 2 + 1)
351#define encode_free_stateid_maxsz (op_encode_hdr_maxsz + 1 + \
352 XDR_QUADLEN(NFS4_STATEID_SIZE))
353#define decode_free_stateid_maxsz (op_decode_hdr_maxsz + 1)
346#else /* CONFIG_NFS_V4_1 */ 354#else /* CONFIG_NFS_V4_1 */
347#define encode_sequence_maxsz 0 355#define encode_sequence_maxsz 0
348#define decode_sequence_maxsz 0 356#define decode_sequence_maxsz 0
@@ -772,6 +780,26 @@ static int nfs4_stat_to_errno(int);
772 decode_sequence_maxsz + \ 780 decode_sequence_maxsz + \
773 decode_putfh_maxsz + \ 781 decode_putfh_maxsz + \
774 decode_layoutreturn_maxsz) 782 decode_layoutreturn_maxsz)
783#define NFS4_enc_secinfo_no_name_sz (compound_encode_hdr_maxsz + \
784 encode_sequence_maxsz + \
785 encode_putrootfh_maxsz +\
786 encode_secinfo_no_name_maxsz)
787#define NFS4_dec_secinfo_no_name_sz (compound_decode_hdr_maxsz + \
788 decode_sequence_maxsz + \
789 decode_putrootfh_maxsz + \
790 decode_secinfo_no_name_maxsz)
791#define NFS4_enc_test_stateid_sz (compound_encode_hdr_maxsz + \
792 encode_sequence_maxsz + \
793 encode_test_stateid_maxsz)
794#define NFS4_dec_test_stateid_sz (compound_decode_hdr_maxsz + \
795 decode_sequence_maxsz + \
796 decode_test_stateid_maxsz)
797#define NFS4_enc_free_stateid_sz (compound_encode_hdr_maxsz + \
798 encode_sequence_maxsz + \
799 encode_free_stateid_maxsz)
800#define NFS4_dec_free_stateid_sz (compound_decode_hdr_maxsz + \
801 decode_sequence_maxsz + \
802 decode_free_stateid_maxsz)
775 803
776const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH + 804const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
777 compound_encode_hdr_maxsz + 805 compound_encode_hdr_maxsz +
@@ -1938,6 +1966,46 @@ encode_layoutreturn(struct xdr_stream *xdr,
1938 hdr->nops++; 1966 hdr->nops++;
1939 hdr->replen += decode_layoutreturn_maxsz; 1967 hdr->replen += decode_layoutreturn_maxsz;
1940} 1968}
1969
1970static int
1971encode_secinfo_no_name(struct xdr_stream *xdr,
1972 const struct nfs41_secinfo_no_name_args *args,
1973 struct compound_hdr *hdr)
1974{
1975 __be32 *p;
1976 p = reserve_space(xdr, 8);
1977 *p++ = cpu_to_be32(OP_SECINFO_NO_NAME);
1978 *p++ = cpu_to_be32(args->style);
1979 hdr->nops++;
1980 hdr->replen += decode_secinfo_no_name_maxsz;
1981 return 0;
1982}
1983
1984static void encode_test_stateid(struct xdr_stream *xdr,
1985 struct nfs41_test_stateid_args *args,
1986 struct compound_hdr *hdr)
1987{
1988 __be32 *p;
1989
1990 p = reserve_space(xdr, 8 + NFS4_STATEID_SIZE);
1991 *p++ = cpu_to_be32(OP_TEST_STATEID);
1992 *p++ = cpu_to_be32(1);
1993 xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
1994 hdr->nops++;
1995 hdr->replen += decode_test_stateid_maxsz;
1996}
1997
1998static void encode_free_stateid(struct xdr_stream *xdr,
1999 struct nfs41_free_stateid_args *args,
2000 struct compound_hdr *hdr)
2001{
2002 __be32 *p;
2003 p = reserve_space(xdr, 4 + NFS4_STATEID_SIZE);
2004 *p++ = cpu_to_be32(OP_FREE_STATEID);
2005 xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
2006 hdr->nops++;
2007 hdr->replen += decode_free_stateid_maxsz;
2008}
1941#endif /* CONFIG_NFS_V4_1 */ 2009#endif /* CONFIG_NFS_V4_1 */
1942 2010
1943/* 2011/*
@@ -2790,6 +2858,59 @@ static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
2790 encode_layoutreturn(xdr, args, &hdr); 2858 encode_layoutreturn(xdr, args, &hdr);
2791 encode_nops(&hdr); 2859 encode_nops(&hdr);
2792} 2860}
2861
2862/*
2863 * Encode SECINFO_NO_NAME request
2864 */
2865static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
2866 struct xdr_stream *xdr,
2867 struct nfs41_secinfo_no_name_args *args)
2868{
2869 struct compound_hdr hdr = {
2870 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2871 };
2872
2873 encode_compound_hdr(xdr, req, &hdr);
2874 encode_sequence(xdr, &args->seq_args, &hdr);
2875 encode_putrootfh(xdr, &hdr);
2876 encode_secinfo_no_name(xdr, args, &hdr);
2877 encode_nops(&hdr);
2878 return 0;
2879}
2880
2881/*
2882 * Encode TEST_STATEID request
2883 */
2884static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
2885 struct xdr_stream *xdr,
2886 struct nfs41_test_stateid_args *args)
2887{
2888 struct compound_hdr hdr = {
2889 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2890 };
2891
2892 encode_compound_hdr(xdr, req, &hdr);
2893 encode_sequence(xdr, &args->seq_args, &hdr);
2894 encode_test_stateid(xdr, args, &hdr);
2895 encode_nops(&hdr);
2896}
2897
2898/*
2899 * Encode FREE_STATEID request
2900 */
2901static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
2902 struct xdr_stream *xdr,
2903 struct nfs41_free_stateid_args *args)
2904{
2905 struct compound_hdr hdr = {
2906 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2907 };
2908
2909 encode_compound_hdr(xdr, req, &hdr);
2910 encode_sequence(xdr, &args->seq_args, &hdr);
2911 encode_free_stateid(xdr, args, &hdr);
2912 encode_nops(&hdr);
2913}
2793#endif /* CONFIG_NFS_V4_1 */ 2914#endif /* CONFIG_NFS_V4_1 */
2794 2915
2795static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) 2916static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -4977,11 +5098,17 @@ static int decode_exchange_id(struct xdr_stream *xdr,
4977 if (unlikely(status)) 5098 if (unlikely(status))
4978 return status; 5099 return status;
4979 5100
4980 /* Throw away server_scope */ 5101 /* Save server_scope */
4981 status = decode_opaque_inline(xdr, &dummy, &dummy_str); 5102 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
4982 if (unlikely(status)) 5103 if (unlikely(status))
4983 return status; 5104 return status;
4984 5105
5106 if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
5107 return -EIO;
5108
5109 memcpy(res->server_scope->server_scope, dummy_str, dummy);
5110 res->server_scope->server_scope_sz = dummy;
5111
4985 /* Throw away Implementation id array */ 5112 /* Throw away Implementation id array */
4986 status = decode_opaque_inline(xdr, &dummy, &dummy_str); 5113 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
4987 if (unlikely(status)) 5114 if (unlikely(status))
@@ -5322,6 +5449,55 @@ out_overflow:
5322 print_overflow_msg(__func__, xdr); 5449 print_overflow_msg(__func__, xdr);
5323 return -EIO; 5450 return -EIO;
5324} 5451}
5452
5453static int decode_test_stateid(struct xdr_stream *xdr,
5454 struct nfs41_test_stateid_res *res)
5455{
5456 __be32 *p;
5457 int status;
5458 int num_res;
5459
5460 status = decode_op_hdr(xdr, OP_TEST_STATEID);
5461 if (status)
5462 return status;
5463
5464 p = xdr_inline_decode(xdr, 4);
5465 if (unlikely(!p))
5466 goto out_overflow;
5467 num_res = be32_to_cpup(p++);
5468 if (num_res != 1)
5469 goto out;
5470
5471 p = xdr_inline_decode(xdr, 4);
5472 if (unlikely(!p))
5473 goto out_overflow;
5474 res->status = be32_to_cpup(p++);
5475 return res->status;
5476out_overflow:
5477 print_overflow_msg(__func__, xdr);
5478out:
5479 return -EIO;
5480}
5481
5482static int decode_free_stateid(struct xdr_stream *xdr,
5483 struct nfs41_free_stateid_res *res)
5484{
5485 __be32 *p;
5486 int status;
5487
5488 status = decode_op_hdr(xdr, OP_FREE_STATEID);
5489 if (status)
5490 return status;
5491
5492 p = xdr_inline_decode(xdr, 4);
5493 if (unlikely(!p))
5494 goto out_overflow;
5495 res->status = be32_to_cpup(p++);
5496 return res->status;
5497out_overflow:
5498 print_overflow_msg(__func__, xdr);
5499 return -EIO;
5500}
5325#endif /* CONFIG_NFS_V4_1 */ 5501#endif /* CONFIG_NFS_V4_1 */
5326 5502
5327/* 5503/*
@@ -6461,6 +6637,72 @@ static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
6461out: 6637out:
6462 return status; 6638 return status;
6463} 6639}
6640
6641/*
6642 * Decode SECINFO_NO_NAME response
6643 */
6644static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp,
6645 struct xdr_stream *xdr,
6646 struct nfs4_secinfo_res *res)
6647{
6648 struct compound_hdr hdr;
6649 int status;
6650
6651 status = decode_compound_hdr(xdr, &hdr);
6652 if (status)
6653 goto out;
6654 status = decode_sequence(xdr, &res->seq_res, rqstp);
6655 if (status)
6656 goto out;
6657 status = decode_putrootfh(xdr);
6658 if (status)
6659 goto out;
6660 status = decode_secinfo(xdr, res);
6661out:
6662 return status;
6663}
6664
6665/*
6666 * Decode TEST_STATEID response
6667 */
6668static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp,
6669 struct xdr_stream *xdr,
6670 struct nfs41_test_stateid_res *res)
6671{
6672 struct compound_hdr hdr;
6673 int status;
6674
6675 status = decode_compound_hdr(xdr, &hdr);
6676 if (status)
6677 goto out;
6678 status = decode_sequence(xdr, &res->seq_res, rqstp);
6679 if (status)
6680 goto out;
6681 status = decode_test_stateid(xdr, res);
6682out:
6683 return status;
6684}
6685
6686/*
6687 * Decode FREE_STATEID response
6688 */
6689static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp,
6690 struct xdr_stream *xdr,
6691 struct nfs41_free_stateid_res *res)
6692{
6693 struct compound_hdr hdr;
6694 int status;
6695
6696 status = decode_compound_hdr(xdr, &hdr);
6697 if (status)
6698 goto out;
6699 status = decode_sequence(xdr, &res->seq_res, rqstp);
6700 if (status)
6701 goto out;
6702 status = decode_free_stateid(xdr, res);
6703out:
6704 return status;
6705}
6464#endif /* CONFIG_NFS_V4_1 */ 6706#endif /* CONFIG_NFS_V4_1 */
6465 6707
6466/** 6708/**
@@ -6663,6 +6905,9 @@ struct rpc_procinfo nfs4_procedures[] = {
6663 PROC(LAYOUTGET, enc_layoutget, dec_layoutget), 6905 PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
6664 PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), 6906 PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
6665 PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), 6907 PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
6908 PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
6909 PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
6910 PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
6666#endif /* CONFIG_NFS_V4_1 */ 6911#endif /* CONFIG_NFS_V4_1 */
6667}; 6912};
6668 6913
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 8ff2ea3f10ef..9383ca7245bc 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -1000,13 +1000,22 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
1000 if (!pnfs_generic_pg_test(pgio, prev, req)) 1000 if (!pnfs_generic_pg_test(pgio, prev, req))
1001 return false; 1001 return false;
1002 1002
1003 if (pgio->pg_lseg == NULL)
1004 return true;
1005
1006 return pgio->pg_count + req->wb_bytes <= 1003 return pgio->pg_count + req->wb_bytes <=
1007 OBJIO_LSEG(pgio->pg_lseg)->max_io_size; 1004 OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
1008} 1005}
1009 1006
1007static const struct nfs_pageio_ops objio_pg_read_ops = {
1008 .pg_init = pnfs_generic_pg_init_read,
1009 .pg_test = objio_pg_test,
1010 .pg_doio = pnfs_generic_pg_readpages,
1011};
1012
1013static const struct nfs_pageio_ops objio_pg_write_ops = {
1014 .pg_init = pnfs_generic_pg_init_write,
1015 .pg_test = objio_pg_test,
1016 .pg_doio = pnfs_generic_pg_writepages,
1017};
1018
1010static struct pnfs_layoutdriver_type objlayout_type = { 1019static struct pnfs_layoutdriver_type objlayout_type = {
1011 .id = LAYOUT_OSD2_OBJECTS, 1020 .id = LAYOUT_OSD2_OBJECTS,
1012 .name = "LAYOUT_OSD2_OBJECTS", 1021 .name = "LAYOUT_OSD2_OBJECTS",
@@ -1020,7 +1029,8 @@ static struct pnfs_layoutdriver_type objlayout_type = {
1020 1029
1021 .read_pagelist = objlayout_read_pagelist, 1030 .read_pagelist = objlayout_read_pagelist,
1022 .write_pagelist = objlayout_write_pagelist, 1031 .write_pagelist = objlayout_write_pagelist,
1023 .pg_test = objio_pg_test, 1032 .pg_read_ops = &objio_pg_read_ops,
1033 .pg_write_ops = &objio_pg_write_ops,
1024 1034
1025 .free_deviceid_node = objio_free_deviceid_node, 1035 .free_deviceid_node = objio_free_deviceid_node,
1026 1036
@@ -1055,5 +1065,7 @@ objlayout_exit(void)
1055 __func__); 1065 __func__);
1056} 1066}
1057 1067
1068MODULE_ALIAS("nfs-layouttype4-2");
1069
1058module_init(objlayout_init); 1070module_init(objlayout_init);
1059module_exit(objlayout_exit); 1071module_exit(objlayout_exit);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 18449f43c568..b60970cc7f1f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -230,7 +230,7 @@ EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
230 */ 230 */
231void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 231void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
232 struct inode *inode, 232 struct inode *inode,
233 int (*doio)(struct nfs_pageio_descriptor *), 233 const struct nfs_pageio_ops *pg_ops,
234 size_t bsize, 234 size_t bsize,
235 int io_flags) 235 int io_flags)
236{ 236{
@@ -240,13 +240,12 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
240 desc->pg_bsize = bsize; 240 desc->pg_bsize = bsize;
241 desc->pg_base = 0; 241 desc->pg_base = 0;
242 desc->pg_moreio = 0; 242 desc->pg_moreio = 0;
243 desc->pg_recoalesce = 0;
243 desc->pg_inode = inode; 244 desc->pg_inode = inode;
244 desc->pg_doio = doio; 245 desc->pg_ops = pg_ops;
245 desc->pg_ioflags = io_flags; 246 desc->pg_ioflags = io_flags;
246 desc->pg_error = 0; 247 desc->pg_error = 0;
247 desc->pg_lseg = NULL; 248 desc->pg_lseg = NULL;
248 desc->pg_test = nfs_generic_pg_test;
249 pnfs_pageio_init(desc, inode);
250} 249}
251 250
252/** 251/**
@@ -276,7 +275,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
276 return false; 275 return false;
277 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 276 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
278 return false; 277 return false;
279 return pgio->pg_test(pgio, prev, req); 278 return pgio->pg_ops->pg_test(pgio, prev, req);
280} 279}
281 280
282/** 281/**
@@ -297,6 +296,8 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
297 if (!nfs_can_coalesce_requests(prev, req, desc)) 296 if (!nfs_can_coalesce_requests(prev, req, desc))
298 return 0; 297 return 0;
299 } else { 298 } else {
299 if (desc->pg_ops->pg_init)
300 desc->pg_ops->pg_init(desc, req);
300 desc->pg_base = req->wb_pgbase; 301 desc->pg_base = req->wb_pgbase;
301 } 302 }
302 nfs_list_remove_request(req); 303 nfs_list_remove_request(req);
@@ -311,7 +312,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
311static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) 312static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
312{ 313{
313 if (!list_empty(&desc->pg_list)) { 314 if (!list_empty(&desc->pg_list)) {
314 int error = desc->pg_doio(desc); 315 int error = desc->pg_ops->pg_doio(desc);
315 if (error < 0) 316 if (error < 0)
316 desc->pg_error = error; 317 desc->pg_error = error;
317 else 318 else
@@ -331,7 +332,7 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
331 * Returns true if the request 'req' was successfully coalesced into the 332 * Returns true if the request 'req' was successfully coalesced into the
332 * existing list of pages 'desc'. 333 * existing list of pages 'desc'.
333 */ 334 */
334int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, 335static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
335 struct nfs_page *req) 336 struct nfs_page *req)
336{ 337{
337 while (!nfs_pageio_do_add_request(desc, req)) { 338 while (!nfs_pageio_do_add_request(desc, req)) {
@@ -340,17 +341,67 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
340 if (desc->pg_error < 0) 341 if (desc->pg_error < 0)
341 return 0; 342 return 0;
342 desc->pg_moreio = 0; 343 desc->pg_moreio = 0;
344 if (desc->pg_recoalesce)
345 return 0;
343 } 346 }
344 return 1; 347 return 1;
345} 348}
346 349
350static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
351{
352 LIST_HEAD(head);
353
354 do {
355 list_splice_init(&desc->pg_list, &head);
356 desc->pg_bytes_written -= desc->pg_count;
357 desc->pg_count = 0;
358 desc->pg_base = 0;
359 desc->pg_recoalesce = 0;
360
361 while (!list_empty(&head)) {
362 struct nfs_page *req;
363
364 req = list_first_entry(&head, struct nfs_page, wb_list);
365 nfs_list_remove_request(req);
366 if (__nfs_pageio_add_request(desc, req))
367 continue;
368 if (desc->pg_error < 0)
369 return 0;
370 break;
371 }
372 } while (desc->pg_recoalesce);
373 return 1;
374}
375
376int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
377 struct nfs_page *req)
378{
379 int ret;
380
381 do {
382 ret = __nfs_pageio_add_request(desc, req);
383 if (ret)
384 break;
385 if (desc->pg_error < 0)
386 break;
387 ret = nfs_do_recoalesce(desc);
388 } while (ret);
389 return ret;
390}
391
347/** 392/**
348 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor 393 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
349 * @desc: pointer to io descriptor 394 * @desc: pointer to io descriptor
350 */ 395 */
351void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) 396void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
352{ 397{
353 nfs_pageio_doio(desc); 398 for (;;) {
399 nfs_pageio_doio(desc);
400 if (!desc->pg_recoalesce)
401 break;
402 if (!nfs_do_recoalesce(desc))
403 break;
404 }
354} 405}
355 406
356/** 407/**
@@ -369,7 +420,7 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
369 if (!list_empty(&desc->pg_list)) { 420 if (!list_empty(&desc->pg_list)) {
370 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); 421 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
371 if (index != prev->wb_index + 1) 422 if (index != prev->wb_index + 1)
372 nfs_pageio_doio(desc); 423 nfs_pageio_complete(desc);
373 } 424 }
374} 425}
375 426
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 29c0ca7fc347..38e5508555c6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <linux/nfs_fs.h> 30#include <linux/nfs_fs.h>
31#include <linux/nfs_page.h>
31#include "internal.h" 32#include "internal.h"
32#include "pnfs.h" 33#include "pnfs.h"
33#include "iostat.h" 34#include "iostat.h"
@@ -448,11 +449,20 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
448void 449void
449pnfs_destroy_all_layouts(struct nfs_client *clp) 450pnfs_destroy_all_layouts(struct nfs_client *clp)
450{ 451{
452 struct nfs_server *server;
451 struct pnfs_layout_hdr *lo; 453 struct pnfs_layout_hdr *lo;
452 LIST_HEAD(tmp_list); 454 LIST_HEAD(tmp_list);
453 455
456 nfs4_deviceid_mark_client_invalid(clp);
457 nfs4_deviceid_purge_client(clp);
458
454 spin_lock(&clp->cl_lock); 459 spin_lock(&clp->cl_lock);
455 list_splice_init(&clp->cl_layouts, &tmp_list); 460 rcu_read_lock();
461 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
462 if (!list_empty(&server->layouts))
463 list_splice_init(&server->layouts, &tmp_list);
464 }
465 rcu_read_unlock();
456 spin_unlock(&clp->cl_lock); 466 spin_unlock(&clp->cl_lock);
457 467
458 while (!list_empty(&tmp_list)) { 468 while (!list_empty(&tmp_list)) {
@@ -661,6 +671,7 @@ _pnfs_return_layout(struct inode *ino)
661 lrp->args.stateid = stateid; 671 lrp->args.stateid = stateid;
662 lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; 672 lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
663 lrp->args.inode = ino; 673 lrp->args.inode = ino;
674 lrp->args.layout = lo;
664 lrp->clp = NFS_SERVER(ino)->nfs_client; 675 lrp->clp = NFS_SERVER(ino)->nfs_client;
665 676
666 status = nfs4_proc_layoutreturn(lrp); 677 status = nfs4_proc_layoutreturn(lrp);
@@ -920,7 +931,8 @@ pnfs_update_layout(struct inode *ino,
920 }; 931 };
921 unsigned pg_offset; 932 unsigned pg_offset;
922 struct nfs_inode *nfsi = NFS_I(ino); 933 struct nfs_inode *nfsi = NFS_I(ino);
923 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; 934 struct nfs_server *server = NFS_SERVER(ino);
935 struct nfs_client *clp = server->nfs_client;
924 struct pnfs_layout_hdr *lo; 936 struct pnfs_layout_hdr *lo;
925 struct pnfs_layout_segment *lseg = NULL; 937 struct pnfs_layout_segment *lseg = NULL;
926 bool first = false; 938 bool first = false;
@@ -964,7 +976,7 @@ pnfs_update_layout(struct inode *ino,
964 */ 976 */
965 spin_lock(&clp->cl_lock); 977 spin_lock(&clp->cl_lock);
966 BUG_ON(!list_empty(&lo->plh_layouts)); 978 BUG_ON(!list_empty(&lo->plh_layouts));
967 list_add_tail(&lo->plh_layouts, &clp->cl_layouts); 979 list_add_tail(&lo->plh_layouts, &server->layouts);
968 spin_unlock(&clp->cl_lock); 980 spin_unlock(&clp->cl_lock);
969 } 981 }
970 982
@@ -973,7 +985,8 @@ pnfs_update_layout(struct inode *ino,
973 arg.offset -= pg_offset; 985 arg.offset -= pg_offset;
974 arg.length += pg_offset; 986 arg.length += pg_offset;
975 } 987 }
976 arg.length = PAGE_CACHE_ALIGN(arg.length); 988 if (arg.length != NFS4_MAX_UINT64)
989 arg.length = PAGE_CACHE_ALIGN(arg.length);
977 990
978 lseg = send_layoutget(lo, ctx, &arg, gfp_flags); 991 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
979 if (!lseg && first) { 992 if (!lseg && first) {
@@ -991,6 +1004,7 @@ out_unlock:
991 spin_unlock(&ino->i_lock); 1004 spin_unlock(&ino->i_lock);
992 goto out; 1005 goto out;
993} 1006}
1007EXPORT_SYMBOL_GPL(pnfs_update_layout);
994 1008
995int 1009int
996pnfs_layout_process(struct nfs4_layoutget *lgp) 1010pnfs_layout_process(struct nfs4_layoutget *lgp)
@@ -1048,35 +1062,71 @@ out_forget_reply:
1048 goto out; 1062 goto out;
1049} 1063}
1050 1064
1065void
1066pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1067{
1068 BUG_ON(pgio->pg_lseg != NULL);
1069
1070 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1071 req->wb_context,
1072 req_offset(req),
1073 req->wb_bytes,
1074 IOMODE_READ,
1075 GFP_KERNEL);
1076 /* If no lseg, fall back to read through mds */
1077 if (pgio->pg_lseg == NULL)
1078 nfs_pageio_reset_read_mds(pgio);
1079
1080}
1081EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1082
1083void
1084pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1085{
1086 BUG_ON(pgio->pg_lseg != NULL);
1087
1088 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1089 req->wb_context,
1090 req_offset(req),
1091 req->wb_bytes,
1092 IOMODE_RW,
1093 GFP_NOFS);
1094 /* If no lseg, fall back to write through mds */
1095 if (pgio->pg_lseg == NULL)
1096 nfs_pageio_reset_write_mds(pgio);
1097}
1098EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1099
1051bool 1100bool
1052pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 1101pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
1053 struct nfs_page *req)
1054{ 1102{
1055 enum pnfs_iomode access_type; 1103 struct nfs_server *server = NFS_SERVER(inode);
1056 gfp_t gfp_flags; 1104 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1057 1105
1058 /* We assume that pg_ioflags == 0 iff we're reading a page */ 1106 if (ld == NULL)
1059 if (pgio->pg_ioflags == 0) { 1107 return false;
1060 access_type = IOMODE_READ; 1108 nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
1061 gfp_flags = GFP_KERNEL; 1109 return true;
1062 } else { 1110}
1063 access_type = IOMODE_RW;
1064 gfp_flags = GFP_NOFS;
1065 }
1066 1111
1067 if (pgio->pg_lseg == NULL) { 1112bool
1068 if (pgio->pg_count != prev->wb_bytes) 1113pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
1069 return true; 1114{
1070 /* This is first coelesce call for a series of nfs_pages */ 1115 struct nfs_server *server = NFS_SERVER(inode);
1071 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1116 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1072 prev->wb_context, 1117
1073 req_offset(prev), 1118 if (ld == NULL)
1074 pgio->pg_count, 1119 return false;
1075 access_type, 1120 nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
1076 gfp_flags); 1121 return true;
1077 if (pgio->pg_lseg == NULL) 1122}
1078 return true; 1123
1079 } 1124bool
1125pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1126 struct nfs_page *req)
1127{
1128 if (pgio->pg_lseg == NULL)
1129 return nfs_generic_pg_test(pgio, prev, req);
1080 1130
1081 /* 1131 /*
1082 * Test if a nfs_page is fully contained in the pnfs_layout_range. 1132 * Test if a nfs_page is fully contained in the pnfs_layout_range.
@@ -1120,15 +1170,30 @@ pnfs_ld_write_done(struct nfs_write_data *data)
1120} 1170}
1121EXPORT_SYMBOL_GPL(pnfs_ld_write_done); 1171EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1122 1172
1123enum pnfs_try_status 1173static void
1174pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1175 struct nfs_write_data *data)
1176{
1177 list_splice_tail_init(&data->pages, &desc->pg_list);
1178 if (data->req && list_empty(&data->req->wb_list))
1179 nfs_list_add_request(data->req, &desc->pg_list);
1180 nfs_pageio_reset_write_mds(desc);
1181 desc->pg_recoalesce = 1;
1182 nfs_writedata_release(data);
1183}
1184
1185static enum pnfs_try_status
1124pnfs_try_to_write_data(struct nfs_write_data *wdata, 1186pnfs_try_to_write_data(struct nfs_write_data *wdata,
1125 const struct rpc_call_ops *call_ops, int how) 1187 const struct rpc_call_ops *call_ops,
1188 struct pnfs_layout_segment *lseg,
1189 int how)
1126{ 1190{
1127 struct inode *inode = wdata->inode; 1191 struct inode *inode = wdata->inode;
1128 enum pnfs_try_status trypnfs; 1192 enum pnfs_try_status trypnfs;
1129 struct nfs_server *nfss = NFS_SERVER(inode); 1193 struct nfs_server *nfss = NFS_SERVER(inode);
1130 1194
1131 wdata->mds_ops = call_ops; 1195 wdata->mds_ops = call_ops;
1196 wdata->lseg = get_lseg(lseg);
1132 1197
1133 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, 1198 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1134 inode->i_ino, wdata->args.count, wdata->args.offset, how); 1199 inode->i_ino, wdata->args.count, wdata->args.offset, how);
@@ -1144,6 +1209,44 @@ pnfs_try_to_write_data(struct nfs_write_data *wdata,
1144 return trypnfs; 1209 return trypnfs;
1145} 1210}
1146 1211
1212static void
1213pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1214{
1215 struct nfs_write_data *data;
1216 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1217 struct pnfs_layout_segment *lseg = desc->pg_lseg;
1218
1219 desc->pg_lseg = NULL;
1220 while (!list_empty(head)) {
1221 enum pnfs_try_status trypnfs;
1222
1223 data = list_entry(head->next, struct nfs_write_data, list);
1224 list_del_init(&data->list);
1225
1226 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1227 if (trypnfs == PNFS_NOT_ATTEMPTED)
1228 pnfs_write_through_mds(desc, data);
1229 }
1230 put_lseg(lseg);
1231}
1232
1233int
1234pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1235{
1236 LIST_HEAD(head);
1237 int ret;
1238
1239 ret = nfs_generic_flush(desc, &head);
1240 if (ret != 0) {
1241 put_lseg(desc->pg_lseg);
1242 desc->pg_lseg = NULL;
1243 return ret;
1244 }
1245 pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
1246 return 0;
1247}
1248EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1249
1147/* 1250/*
1148 * Called by non rpc-based layout drivers 1251 * Called by non rpc-based layout drivers
1149 */ 1252 */
@@ -1167,18 +1270,32 @@ pnfs_ld_read_done(struct nfs_read_data *data)
1167} 1270}
1168EXPORT_SYMBOL_GPL(pnfs_ld_read_done); 1271EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1169 1272
1273static void
1274pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1275 struct nfs_read_data *data)
1276{
1277 list_splice_tail_init(&data->pages, &desc->pg_list);
1278 if (data->req && list_empty(&data->req->wb_list))
1279 nfs_list_add_request(data->req, &desc->pg_list);
1280 nfs_pageio_reset_read_mds(desc);
1281 desc->pg_recoalesce = 1;
1282 nfs_readdata_release(data);
1283}
1284
1170/* 1285/*
1171 * Call the appropriate parallel I/O subsystem read function. 1286 * Call the appropriate parallel I/O subsystem read function.
1172 */ 1287 */
1173enum pnfs_try_status 1288static enum pnfs_try_status
1174pnfs_try_to_read_data(struct nfs_read_data *rdata, 1289pnfs_try_to_read_data(struct nfs_read_data *rdata,
1175 const struct rpc_call_ops *call_ops) 1290 const struct rpc_call_ops *call_ops,
1291 struct pnfs_layout_segment *lseg)
1176{ 1292{
1177 struct inode *inode = rdata->inode; 1293 struct inode *inode = rdata->inode;
1178 struct nfs_server *nfss = NFS_SERVER(inode); 1294 struct nfs_server *nfss = NFS_SERVER(inode);
1179 enum pnfs_try_status trypnfs; 1295 enum pnfs_try_status trypnfs;
1180 1296
1181 rdata->mds_ops = call_ops; 1297 rdata->mds_ops = call_ops;
1298 rdata->lseg = get_lseg(lseg);
1182 1299
1183 dprintk("%s: Reading ino:%lu %u@%llu\n", 1300 dprintk("%s: Reading ino:%lu %u@%llu\n",
1184 __func__, inode->i_ino, rdata->args.count, rdata->args.offset); 1301 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
@@ -1194,6 +1311,44 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
1194 return trypnfs; 1311 return trypnfs;
1195} 1312}
1196 1313
1314static void
1315pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1316{
1317 struct nfs_read_data *data;
1318 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1319 struct pnfs_layout_segment *lseg = desc->pg_lseg;
1320
1321 desc->pg_lseg = NULL;
1322 while (!list_empty(head)) {
1323 enum pnfs_try_status trypnfs;
1324
1325 data = list_entry(head->next, struct nfs_read_data, list);
1326 list_del_init(&data->list);
1327
1328 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1329 if (trypnfs == PNFS_NOT_ATTEMPTED)
1330 pnfs_read_through_mds(desc, data);
1331 }
1332 put_lseg(lseg);
1333}
1334
1335int
1336pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1337{
1338 LIST_HEAD(head);
1339 int ret;
1340
1341 ret = nfs_generic_pagein(desc, &head);
1342 if (ret != 0) {
1343 put_lseg(desc->pg_lseg);
1344 desc->pg_lseg = NULL;
1345 return ret;
1346 }
1347 pnfs_do_multiple_reads(desc, &head);
1348 return 0;
1349}
1350EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1351
1197/* 1352/*
1198 * Currently there is only one (whole file) write lseg. 1353 * Currently there is only one (whole file) write lseg.
1199 */ 1354 */
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 96bf4e6f45be..078670dfbe04 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -87,7 +87,8 @@ struct pnfs_layoutdriver_type {
87 void (*free_lseg) (struct pnfs_layout_segment *lseg); 87 void (*free_lseg) (struct pnfs_layout_segment *lseg);
88 88
89 /* test for nfs page cache coalescing */ 89 /* test for nfs page cache coalescing */
90 bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); 90 const struct nfs_pageio_ops *pg_read_ops;
91 const struct nfs_pageio_ops *pg_write_ops;
91 92
92 /* Returns true if layoutdriver wants to divert this request to 93 /* Returns true if layoutdriver wants to divert this request to
93 * driver's commit routine. 94 * driver's commit routine.
@@ -148,16 +149,16 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
148/* pnfs.c */ 149/* pnfs.c */
149void get_layout_hdr(struct pnfs_layout_hdr *lo); 150void get_layout_hdr(struct pnfs_layout_hdr *lo);
150void put_lseg(struct pnfs_layout_segment *lseg); 151void put_lseg(struct pnfs_layout_segment *lseg);
151struct pnfs_layout_segment * 152
152pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 153bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
153 loff_t pos, u64 count, enum pnfs_iomode access_type, 154bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
154 gfp_t gfp_flags); 155
155void set_pnfs_layoutdriver(struct nfs_server *, u32 id); 156void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
156void unset_pnfs_layoutdriver(struct nfs_server *); 157void unset_pnfs_layoutdriver(struct nfs_server *);
157enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, 158void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
158 const struct rpc_call_ops *, int); 159int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
159enum pnfs_try_status pnfs_try_to_read_data(struct nfs_read_data *, 160void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *, struct nfs_page *);
160 const struct rpc_call_ops *); 161int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
161bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req); 162bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
162int pnfs_layout_process(struct nfs4_layoutget *lgp); 163int pnfs_layout_process(struct nfs4_layoutget *lgp);
163void pnfs_free_lseg_list(struct list_head *tmp_list); 164void pnfs_free_lseg_list(struct list_head *tmp_list);
@@ -182,6 +183,19 @@ int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
182int _pnfs_return_layout(struct inode *); 183int _pnfs_return_layout(struct inode *);
183int pnfs_ld_write_done(struct nfs_write_data *); 184int pnfs_ld_write_done(struct nfs_write_data *);
184int pnfs_ld_read_done(struct nfs_read_data *); 185int pnfs_ld_read_done(struct nfs_read_data *);
186struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
187 struct nfs_open_context *ctx,
188 loff_t pos,
189 u64 count,
190 enum pnfs_iomode iomode,
191 gfp_t gfp_flags);
192
193void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
194
195/* nfs4_deviceid_flags */
196enum {
197 NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */
198};
185 199
186/* pnfs_dev.c */ 200/* pnfs_dev.c */
187struct nfs4_deviceid_node { 201struct nfs4_deviceid_node {
@@ -189,13 +203,13 @@ struct nfs4_deviceid_node {
189 struct hlist_node tmpnode; 203 struct hlist_node tmpnode;
190 const struct pnfs_layoutdriver_type *ld; 204 const struct pnfs_layoutdriver_type *ld;
191 const struct nfs_client *nfs_client; 205 const struct nfs_client *nfs_client;
206 unsigned long flags;
192 struct nfs4_deviceid deviceid; 207 struct nfs4_deviceid deviceid;
193 atomic_t ref; 208 atomic_t ref;
194}; 209};
195 210
196void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id); 211void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
197struct nfs4_deviceid_node *nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); 212struct nfs4_deviceid_node *nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
198struct nfs4_deviceid_node *nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
199void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); 213void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
200void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, 214void nfs4_init_deviceid_node(struct nfs4_deviceid_node *,
201 const struct pnfs_layoutdriver_type *, 215 const struct pnfs_layoutdriver_type *,
@@ -293,15 +307,6 @@ static inline int pnfs_return_layout(struct inode *ino)
293 return 0; 307 return 0;
294} 308}
295 309
296static inline void pnfs_pageio_init(struct nfs_pageio_descriptor *pgio,
297 struct inode *inode)
298{
299 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
300
301 if (ld)
302 pgio->pg_test = ld->pg_test;
303}
304
305#else /* CONFIG_NFS_V4_1 */ 310#else /* CONFIG_NFS_V4_1 */
306 311
307static inline void pnfs_destroy_all_layouts(struct nfs_client *clp) 312static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
@@ -322,28 +327,6 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg)
322{ 327{
323} 328}
324 329
325static inline struct pnfs_layout_segment *
326pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
327 loff_t pos, u64 count, enum pnfs_iomode access_type,
328 gfp_t gfp_flags)
329{
330 return NULL;
331}
332
333static inline enum pnfs_try_status
334pnfs_try_to_read_data(struct nfs_read_data *data,
335 const struct rpc_call_ops *call_ops)
336{
337 return PNFS_NOT_ATTEMPTED;
338}
339
340static inline enum pnfs_try_status
341pnfs_try_to_write_data(struct nfs_write_data *data,
342 const struct rpc_call_ops *call_ops, int how)
343{
344 return PNFS_NOT_ATTEMPTED;
345}
346
347static inline int pnfs_return_layout(struct inode *ino) 330static inline int pnfs_return_layout(struct inode *ino)
348{ 331{
349 return 0; 332 return 0;
@@ -385,9 +368,14 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
385{ 368{
386} 369}
387 370
388static inline void pnfs_pageio_init(struct nfs_pageio_descriptor *pgio, 371static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
389 struct inode *inode)
390{ 372{
373 return false;
374}
375
376static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
377{
378 return false;
391} 379}
392 380
393static inline void 381static inline void
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index f0f8e1e22f6c..6fda5228ef56 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -100,8 +100,8 @@ _find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
100 100
101 rcu_read_lock(); 101 rcu_read_lock();
102 d = _lookup_deviceid(ld, clp, id, hash); 102 d = _lookup_deviceid(ld, clp, id, hash);
103 if (d && !atomic_inc_not_zero(&d->ref)) 103 if (d != NULL)
104 d = NULL; 104 atomic_inc(&d->ref);
105 rcu_read_unlock(); 105 rcu_read_unlock();
106 return d; 106 return d;
107} 107}
@@ -115,15 +115,15 @@ nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
115EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 115EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
116 116
117/* 117/*
118 * Unhash and put deviceid 118 * Remove a deviceid from cache
119 * 119 *
120 * @clp nfs_client associated with deviceid 120 * @clp nfs_client associated with deviceid
121 * @id the deviceid to unhash 121 * @id the deviceid to unhash
122 * 122 *
123 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. 123 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
124 */ 124 */
125struct nfs4_deviceid_node * 125void
126nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *ld, 126nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
127 const struct nfs_client *clp, const struct nfs4_deviceid *id) 127 const struct nfs_client *clp, const struct nfs4_deviceid *id)
128{ 128{
129 struct nfs4_deviceid_node *d; 129 struct nfs4_deviceid_node *d;
@@ -134,7 +134,7 @@ nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *ld,
134 rcu_read_unlock(); 134 rcu_read_unlock();
135 if (!d) { 135 if (!d) {
136 spin_unlock(&nfs4_deviceid_lock); 136 spin_unlock(&nfs4_deviceid_lock);
137 return NULL; 137 return;
138 } 138 }
139 hlist_del_init_rcu(&d->node); 139 hlist_del_init_rcu(&d->node);
140 spin_unlock(&nfs4_deviceid_lock); 140 spin_unlock(&nfs4_deviceid_lock);
@@ -142,28 +142,7 @@ nfs4_unhash_put_deviceid(const struct pnfs_layoutdriver_type *ld,
142 142
143 /* balance the initial ref set in pnfs_insert_deviceid */ 143 /* balance the initial ref set in pnfs_insert_deviceid */
144 if (atomic_dec_and_test(&d->ref)) 144 if (atomic_dec_and_test(&d->ref))
145 return d; 145 d->ld->free_deviceid_node(d);
146
147 return NULL;
148}
149EXPORT_SYMBOL_GPL(nfs4_unhash_put_deviceid);
150
151/*
152 * Delete a deviceid from cache
153 *
154 * @clp struct nfs_client qualifying the deviceid
155 * @id deviceid to delete
156 */
157void
158nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
159 const struct nfs_client *clp, const struct nfs4_deviceid *id)
160{
161 struct nfs4_deviceid_node *d;
162
163 d = nfs4_unhash_put_deviceid(ld, clp, id);
164 if (!d)
165 return;
166 d->ld->free_deviceid_node(d);
167} 146}
168EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 147EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
169 148
@@ -177,6 +156,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
177 INIT_HLIST_NODE(&d->tmpnode); 156 INIT_HLIST_NODE(&d->tmpnode);
178 d->ld = ld; 157 d->ld = ld;
179 d->nfs_client = nfs_client; 158 d->nfs_client = nfs_client;
159 d->flags = 0;
180 d->deviceid = *id; 160 d->deviceid = *id;
181 atomic_set(&d->ref, 1); 161 atomic_set(&d->ref, 1);
182} 162}
@@ -221,16 +201,15 @@ EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
221 * 201 *
222 * @d deviceid node to put 202 * @d deviceid node to put
223 * 203 *
224 * @ret true iff the node was deleted 204 * return true iff the node was deleted
205 * Note that since the test for d->ref == 0 is sufficient to establish
206 * that the node is no longer hashed in the global device id cache.
225 */ 207 */
226bool 208bool
227nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) 209nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
228{ 210{
229 if (!atomic_dec_and_lock(&d->ref, &nfs4_deviceid_lock)) 211 if (!atomic_dec_and_test(&d->ref))
230 return false; 212 return false;
231 hlist_del_init_rcu(&d->node);
232 spin_unlock(&nfs4_deviceid_lock);
233 synchronize_rcu();
234 d->ld->free_deviceid_node(d); 213 d->ld->free_deviceid_node(d);
235 return true; 214 return true;
236} 215}
@@ -275,3 +254,22 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp)
275 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 254 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
276 _deviceid_purge_client(clp, h); 255 _deviceid_purge_client(clp, h);
277} 256}
257
258/*
259 * Stop use of all deviceids associated with an nfs_client
260 */
261void
262nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
263{
264 struct nfs4_deviceid_node *d;
265 struct hlist_node *n;
266 int i;
267
268 rcu_read_lock();
269 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
270 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
271 if (d->nfs_client == clp)
272 set_bit(NFS_DEVICEID_INVALID, &d->flags);
273 }
274 rcu_read_unlock();
275}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a68679f538fc..2171c043ab08 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -30,8 +30,7 @@
30 30
31#define NFSDBG_FACILITY NFSDBG_PAGECACHE 31#define NFSDBG_FACILITY NFSDBG_PAGECACHE
32 32
33static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc); 33static const struct nfs_pageio_ops nfs_pageio_read_ops;
34static int nfs_pagein_one(struct nfs_pageio_descriptor *desc);
35static const struct rpc_call_ops nfs_read_partial_ops; 34static const struct rpc_call_ops nfs_read_partial_ops;
36static const struct rpc_call_ops nfs_read_full_ops; 35static const struct rpc_call_ops nfs_read_full_ops;
37 36
@@ -68,7 +67,7 @@ void nfs_readdata_free(struct nfs_read_data *p)
68 mempool_free(p, nfs_rdata_mempool); 67 mempool_free(p, nfs_rdata_mempool);
69} 68}
70 69
71static void nfs_readdata_release(struct nfs_read_data *rdata) 70void nfs_readdata_release(struct nfs_read_data *rdata)
72{ 71{
73 put_lseg(rdata->lseg); 72 put_lseg(rdata->lseg);
74 put_nfs_open_context(rdata->args.context); 73 put_nfs_open_context(rdata->args.context);
@@ -113,6 +112,27 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
113 } 112 }
114} 113}
115 114
115static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
116 struct inode *inode)
117{
118 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
119 NFS_SERVER(inode)->rsize, 0);
120}
121
122void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
123{
124 pgio->pg_ops = &nfs_pageio_read_ops;
125 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
126}
127EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
128
129static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
130 struct inode *inode)
131{
132 if (!pnfs_pageio_init_read(pgio, inode))
133 nfs_pageio_init_read_mds(pgio, inode);
134}
135
116int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 136int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
117 struct page *page) 137 struct page *page)
118{ 138{
@@ -131,14 +151,9 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
131 if (len < PAGE_CACHE_SIZE) 151 if (len < PAGE_CACHE_SIZE)
132 zero_user_segment(page, len, PAGE_CACHE_SIZE); 152 zero_user_segment(page, len, PAGE_CACHE_SIZE);
133 153
134 nfs_pageio_init(&pgio, inode, NULL, 0, 0); 154 nfs_pageio_init_read(&pgio, inode);
135 nfs_list_add_request(new, &pgio.pg_list); 155 nfs_pageio_add_request(&pgio, new);
136 pgio.pg_count = len; 156 nfs_pageio_complete(&pgio);
137
138 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
139 nfs_pagein_multi(&pgio);
140 else
141 nfs_pagein_one(&pgio);
142 return 0; 157 return 0;
143} 158}
144 159
@@ -202,17 +217,14 @@ EXPORT_SYMBOL_GPL(nfs_initiate_read);
202/* 217/*
203 * Set up the NFS read request struct 218 * Set up the NFS read request struct
204 */ 219 */
205static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, 220static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
206 const struct rpc_call_ops *call_ops, 221 unsigned int count, unsigned int offset)
207 unsigned int count, unsigned int offset,
208 struct pnfs_layout_segment *lseg)
209{ 222{
210 struct inode *inode = req->wb_context->dentry->d_inode; 223 struct inode *inode = req->wb_context->dentry->d_inode;
211 224
212 data->req = req; 225 data->req = req;
213 data->inode = inode; 226 data->inode = inode;
214 data->cred = req->wb_context->cred; 227 data->cred = req->wb_context->cred;
215 data->lseg = get_lseg(lseg);
216 228
217 data->args.fh = NFS_FH(inode); 229 data->args.fh = NFS_FH(inode);
218 data->args.offset = req_offset(req) + offset; 230 data->args.offset = req_offset(req) + offset;
@@ -226,14 +238,36 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
226 data->res.count = count; 238 data->res.count = count;
227 data->res.eof = 0; 239 data->res.eof = 0;
228 nfs_fattr_init(&data->fattr); 240 nfs_fattr_init(&data->fattr);
241}
229 242
230 if (data->lseg && 243static int nfs_do_read(struct nfs_read_data *data,
231 (pnfs_try_to_read_data(data, call_ops) == PNFS_ATTEMPTED)) 244 const struct rpc_call_ops *call_ops)
232 return 0; 245{
246 struct inode *inode = data->args.context->dentry->d_inode;
233 247
234 return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops); 248 return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
235} 249}
236 250
251static int
252nfs_do_multiple_reads(struct list_head *head,
253 const struct rpc_call_ops *call_ops)
254{
255 struct nfs_read_data *data;
256 int ret = 0;
257
258 while (!list_empty(head)) {
259 int ret2;
260
261 data = list_entry(head->next, struct nfs_read_data, list);
262 list_del_init(&data->list);
263
264 ret2 = nfs_do_read(data, call_ops);
265 if (ret == 0)
266 ret = ret2;
267 }
268 return ret;
269}
270
237static void 271static void
238nfs_async_read_error(struct list_head *head) 272nfs_async_read_error(struct list_head *head)
239{ 273{
@@ -260,20 +294,19 @@ nfs_async_read_error(struct list_head *head)
260 * won't see the new data until our attribute cache is updated. This is more 294 * won't see the new data until our attribute cache is updated. This is more
261 * or less conventional NFS client behavior. 295 * or less conventional NFS client behavior.
262 */ 296 */
263static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc) 297static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
264{ 298{
265 struct nfs_page *req = nfs_list_entry(desc->pg_list.next); 299 struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
266 struct page *page = req->wb_page; 300 struct page *page = req->wb_page;
267 struct nfs_read_data *data; 301 struct nfs_read_data *data;
268 size_t rsize = NFS_SERVER(desc->pg_inode)->rsize, nbytes; 302 size_t rsize = desc->pg_bsize, nbytes;
269 unsigned int offset; 303 unsigned int offset;
270 int requests = 0; 304 int requests = 0;
271 int ret = 0; 305 int ret = 0;
272 struct pnfs_layout_segment *lseg;
273 LIST_HEAD(list);
274 306
275 nfs_list_remove_request(req); 307 nfs_list_remove_request(req);
276 308
309 offset = 0;
277 nbytes = desc->pg_count; 310 nbytes = desc->pg_count;
278 do { 311 do {
279 size_t len = min(nbytes,rsize); 312 size_t len = min(nbytes,rsize);
@@ -281,45 +314,21 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
281 data = nfs_readdata_alloc(1); 314 data = nfs_readdata_alloc(1);
282 if (!data) 315 if (!data)
283 goto out_bad; 316 goto out_bad;
284 list_add(&data->pages, &list); 317 data->pagevec[0] = page;
318 nfs_read_rpcsetup(req, data, len, offset);
319 list_add(&data->list, res);
285 requests++; 320 requests++;
286 nbytes -= len; 321 nbytes -= len;
322 offset += len;
287 } while(nbytes != 0); 323 } while(nbytes != 0);
288 atomic_set(&req->wb_complete, requests); 324 atomic_set(&req->wb_complete, requests);
289
290 BUG_ON(desc->pg_lseg != NULL);
291 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
292 req_offset(req), desc->pg_count,
293 IOMODE_READ, GFP_KERNEL);
294 ClearPageError(page); 325 ClearPageError(page);
295 offset = 0; 326 desc->pg_rpc_callops = &nfs_read_partial_ops;
296 nbytes = desc->pg_count;
297 do {
298 int ret2;
299
300 data = list_entry(list.next, struct nfs_read_data, pages);
301 list_del_init(&data->pages);
302
303 data->pagevec[0] = page;
304
305 if (nbytes < rsize)
306 rsize = nbytes;
307 ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
308 rsize, offset, lseg);
309 if (ret == 0)
310 ret = ret2;
311 offset += rsize;
312 nbytes -= rsize;
313 } while (nbytes != 0);
314 put_lseg(lseg);
315 desc->pg_lseg = NULL;
316
317 return ret; 327 return ret;
318
319out_bad: 328out_bad:
320 while (!list_empty(&list)) { 329 while (!list_empty(res)) {
321 data = list_entry(list.next, struct nfs_read_data, pages); 330 data = list_entry(res->next, struct nfs_read_data, list);
322 list_del(&data->pages); 331 list_del(&data->list);
323 nfs_readdata_free(data); 332 nfs_readdata_free(data);
324 } 333 }
325 SetPageError(page); 334 SetPageError(page);
@@ -327,19 +336,19 @@ out_bad:
327 return -ENOMEM; 336 return -ENOMEM;
328} 337}
329 338
330static int nfs_pagein_one(struct nfs_pageio_descriptor *desc) 339static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
331{ 340{
332 struct nfs_page *req; 341 struct nfs_page *req;
333 struct page **pages; 342 struct page **pages;
334 struct nfs_read_data *data; 343 struct nfs_read_data *data;
335 struct list_head *head = &desc->pg_list; 344 struct list_head *head = &desc->pg_list;
336 struct pnfs_layout_segment *lseg = desc->pg_lseg; 345 int ret = 0;
337 int ret = -ENOMEM;
338 346
339 data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base, 347 data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
340 desc->pg_count)); 348 desc->pg_count));
341 if (!data) { 349 if (!data) {
342 nfs_async_read_error(head); 350 nfs_async_read_error(head);
351 ret = -ENOMEM;
343 goto out; 352 goto out;
344 } 353 }
345 354
@@ -352,19 +361,37 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
352 *pages++ = req->wb_page; 361 *pages++ = req->wb_page;
353 } 362 }
354 req = nfs_list_entry(data->pages.next); 363 req = nfs_list_entry(data->pages.next);
355 if ((!lseg) && list_is_singular(&data->pages))
356 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
357 req_offset(req), desc->pg_count,
358 IOMODE_READ, GFP_KERNEL);
359 364
360 ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, 365 nfs_read_rpcsetup(req, data, desc->pg_count, 0);
361 0, lseg); 366 list_add(&data->list, res);
367 desc->pg_rpc_callops = &nfs_read_full_ops;
362out: 368out:
363 put_lseg(lseg);
364 desc->pg_lseg = NULL;
365 return ret; 369 return ret;
366} 370}
367 371
372int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
373{
374 if (desc->pg_bsize < PAGE_CACHE_SIZE)
375 return nfs_pagein_multi(desc, head);
376 return nfs_pagein_one(desc, head);
377}
378
379static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
380{
381 LIST_HEAD(head);
382 int ret;
383
384 ret = nfs_generic_pagein(desc, &head);
385 if (ret == 0)
386 ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
387 return ret;
388}
389
390static const struct nfs_pageio_ops nfs_pageio_read_ops = {
391 .pg_test = nfs_generic_pg_test,
392 .pg_doio = nfs_generic_pg_readpages,
393};
394
368/* 395/*
369 * This is the callback from RPC telling us whether a reply was 396 * This is the callback from RPC telling us whether a reply was
370 * received or some error occurred (timeout or socket shutdown). 397 * received or some error occurred (timeout or socket shutdown).
@@ -635,8 +662,6 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
635 .pgio = &pgio, 662 .pgio = &pgio,
636 }; 663 };
637 struct inode *inode = mapping->host; 664 struct inode *inode = mapping->host;
638 struct nfs_server *server = NFS_SERVER(inode);
639 size_t rsize = server->rsize;
640 unsigned long npages; 665 unsigned long npages;
641 int ret = -ESTALE; 666 int ret = -ESTALE;
642 667
@@ -664,10 +689,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
664 if (ret == 0) 689 if (ret == 0)
665 goto read_complete; /* all pages were read */ 690 goto read_complete; /* all pages were read */
666 691
667 if (rsize < PAGE_CACHE_SIZE) 692 nfs_pageio_init_read(&pgio, inode);
668 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
669 else
670 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
671 693
672 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 694 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
673 695
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 8d6864c2a5fa..b2fbbde58e44 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -147,7 +147,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
147 147
148 alias = d_lookup(parent, &data->args.name); 148 alias = d_lookup(parent, &data->args.name);
149 if (alias != NULL) { 149 if (alias != NULL) {
150 int ret = 0; 150 int ret;
151 void *devname_garbage = NULL; 151 void *devname_garbage = NULL;
152 152
153 /* 153 /*
@@ -155,14 +155,16 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
155 * the sillyrename information to the aliased dentry. 155 * the sillyrename information to the aliased dentry.
156 */ 156 */
157 nfs_free_dname(data); 157 nfs_free_dname(data);
158 ret = nfs_copy_dname(alias, data);
158 spin_lock(&alias->d_lock); 159 spin_lock(&alias->d_lock);
159 if (alias->d_inode != NULL && 160 if (ret == 0 && alias->d_inode != NULL &&
160 !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { 161 !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
161 devname_garbage = alias->d_fsdata; 162 devname_garbage = alias->d_fsdata;
162 alias->d_fsdata = data; 163 alias->d_fsdata = data;
163 alias->d_flags |= DCACHE_NFSFS_RENAMED; 164 alias->d_flags |= DCACHE_NFSFS_RENAMED;
164 ret = 1; 165 ret = 1;
165 } 166 } else
167 ret = 0;
166 spin_unlock(&alias->d_lock); 168 spin_unlock(&alias->d_lock);
167 nfs_dec_sillycount(dir); 169 nfs_dec_sillycount(dir);
168 dput(alias); 170 dput(alias);
@@ -171,8 +173,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
171 * point dentry is definitely not a root, so we won't need 173 * point dentry is definitely not a root, so we won't need
172 * that anymore. 174 * that anymore.
173 */ 175 */
174 if (devname_garbage) 176 kfree(devname_garbage);
175 kfree(devname_garbage);
176 return ret; 177 return ret;
177 } 178 }
178 data->dir = igrab(dir); 179 data->dir = igrab(dir);
@@ -204,8 +205,6 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
204 if (parent == NULL) 205 if (parent == NULL)
205 goto out_free; 206 goto out_free;
206 dir = parent->d_inode; 207 dir = parent->d_inode;
207 if (nfs_copy_dname(dentry, data) != 0)
208 goto out_dput;
209 /* Non-exclusive lock protects against concurrent lookup() calls */ 208 /* Non-exclusive lock protects against concurrent lookup() calls */
210 spin_lock(&dir->i_lock); 209 spin_lock(&dir->i_lock);
211 if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { 210 if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) {
@@ -366,6 +365,8 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
366 struct nfs_renamedata *data = calldata; 365 struct nfs_renamedata *data = calldata;
367 struct inode *old_dir = data->old_dir; 366 struct inode *old_dir = data->old_dir;
368 struct inode *new_dir = data->new_dir; 367 struct inode *new_dir = data->new_dir;
368 struct dentry *old_dentry = data->old_dentry;
369 struct dentry *new_dentry = data->new_dentry;
369 370
370 if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) { 371 if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
371 nfs_restart_rpc(task, NFS_SERVER(old_dir)->nfs_client); 372 nfs_restart_rpc(task, NFS_SERVER(old_dir)->nfs_client);
@@ -373,12 +374,12 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
373 } 374 }
374 375
375 if (task->tk_status != 0) { 376 if (task->tk_status != 0) {
376 nfs_cancel_async_unlink(data->old_dentry); 377 nfs_cancel_async_unlink(old_dentry);
377 return; 378 return;
378 } 379 }
379 380
380 nfs_set_verifier(data->old_dentry, nfs_save_change_attribute(old_dir)); 381 d_drop(old_dentry);
381 d_move(data->old_dentry, data->new_dentry); 382 d_drop(new_dentry);
382} 383}
383 384
384/** 385/**
@@ -501,6 +502,14 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
501 * and only performs the unlink once the last reference to it is put. 502 * and only performs the unlink once the last reference to it is put.
502 * 503 *
503 * The final cleanup is done during dentry_iput. 504 * The final cleanup is done during dentry_iput.
505 *
506 * (Note: NFSv4 is stateful, and has opens, so in theory an NFSv4 server
507 * could take responsibility for keeping open files referenced. The server
508 * would also need to ensure that opened-but-deleted files were kept over
509 * reboots. However, we may not assume a server does so. (RFC 5661
510 * does provide an OPEN4_RESULT_PRESERVE_UNLINKED flag that a server can
511 * use to advertise that it does this; some day we may take advantage of
512 * it.))
504 */ 513 */
505int 514int
506nfs_sillyrename(struct inode *dir, struct dentry *dentry) 515nfs_sillyrename(struct inode *dir, struct dentry *dentry)
@@ -560,6 +569,14 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
560 if (error) 569 if (error)
561 goto out_dput; 570 goto out_dput;
562 571
572 /* populate unlinkdata with the right dname */
573 error = nfs_copy_dname(sdentry,
574 (struct nfs_unlinkdata *)dentry->d_fsdata);
575 if (error) {
576 nfs_cancel_async_unlink(dentry);
577 goto out_dput;
578 }
579
563 /* run the rename task, undo unlink if it fails */ 580 /* run the rename task, undo unlink if it fails */
564 task = nfs_async_rename(dir, dir, dentry, sdentry); 581 task = nfs_async_rename(dir, dir, dentry, sdentry);
565 if (IS_ERR(task)) { 582 if (IS_ERR(task)) {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 00e37501fa3b..b39b37f80913 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -97,7 +97,7 @@ void nfs_writedata_free(struct nfs_write_data *p)
97 mempool_free(p, nfs_wdata_mempool); 97 mempool_free(p, nfs_wdata_mempool);
98} 98}
99 99
100static void nfs_writedata_release(struct nfs_write_data *wdata) 100void nfs_writedata_release(struct nfs_write_data *wdata)
101{ 101{
102 put_lseg(wdata->lseg); 102 put_lseg(wdata->lseg);
103 put_nfs_open_context(wdata->args.context); 103 put_nfs_open_context(wdata->args.context);
@@ -845,11 +845,9 @@ EXPORT_SYMBOL_GPL(nfs_initiate_write);
845/* 845/*
846 * Set up the argument/result storage required for the RPC call. 846 * Set up the argument/result storage required for the RPC call.
847 */ 847 */
848static int nfs_write_rpcsetup(struct nfs_page *req, 848static void nfs_write_rpcsetup(struct nfs_page *req,
849 struct nfs_write_data *data, 849 struct nfs_write_data *data,
850 const struct rpc_call_ops *call_ops,
851 unsigned int count, unsigned int offset, 850 unsigned int count, unsigned int offset,
852 struct pnfs_layout_segment *lseg,
853 int how) 851 int how)
854{ 852{
855 struct inode *inode = req->wb_context->dentry->d_inode; 853 struct inode *inode = req->wb_context->dentry->d_inode;
@@ -860,7 +858,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
860 data->req = req; 858 data->req = req;
861 data->inode = inode = req->wb_context->dentry->d_inode; 859 data->inode = inode = req->wb_context->dentry->d_inode;
862 data->cred = req->wb_context->cred; 860 data->cred = req->wb_context->cred;
863 data->lseg = get_lseg(lseg);
864 861
865 data->args.fh = NFS_FH(inode); 862 data->args.fh = NFS_FH(inode);
866 data->args.offset = req_offset(req) + offset; 863 data->args.offset = req_offset(req) + offset;
@@ -872,24 +869,51 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
872 data->args.context = get_nfs_open_context(req->wb_context); 869 data->args.context = get_nfs_open_context(req->wb_context);
873 data->args.lock_context = req->wb_lock_context; 870 data->args.lock_context = req->wb_lock_context;
874 data->args.stable = NFS_UNSTABLE; 871 data->args.stable = NFS_UNSTABLE;
875 if (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { 872 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
876 data->args.stable = NFS_DATA_SYNC; 873 case 0:
877 if (!nfs_need_commit(NFS_I(inode))) 874 break;
878 data->args.stable = NFS_FILE_SYNC; 875 case FLUSH_COND_STABLE:
876 if (nfs_need_commit(NFS_I(inode)))
877 break;
878 default:
879 data->args.stable = NFS_FILE_SYNC;
879 } 880 }
880 881
881 data->res.fattr = &data->fattr; 882 data->res.fattr = &data->fattr;
882 data->res.count = count; 883 data->res.count = count;
883 data->res.verf = &data->verf; 884 data->res.verf = &data->verf;
884 nfs_fattr_init(&data->fattr); 885 nfs_fattr_init(&data->fattr);
886}
885 887
886 if (data->lseg && 888static int nfs_do_write(struct nfs_write_data *data,
887 (pnfs_try_to_write_data(data, call_ops, how) == PNFS_ATTEMPTED)) 889 const struct rpc_call_ops *call_ops,
888 return 0; 890 int how)
891{
892 struct inode *inode = data->args.context->dentry->d_inode;
889 893
890 return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how); 894 return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
891} 895}
892 896
897static int nfs_do_multiple_writes(struct list_head *head,
898 const struct rpc_call_ops *call_ops,
899 int how)
900{
901 struct nfs_write_data *data;
902 int ret = 0;
903
904 while (!list_empty(head)) {
905 int ret2;
906
907 data = list_entry(head->next, struct nfs_write_data, list);
908 list_del_init(&data->list);
909
910 ret2 = nfs_do_write(data, call_ops, how);
911 if (ret == 0)
912 ret = ret2;
913 }
914 return ret;
915}
916
893/* If a nfs_flush_* function fails, it should remove reqs from @head and 917/* If a nfs_flush_* function fails, it should remove reqs from @head and
894 * call this on each, which will prepare them to be retried on next 918 * call this on each, which will prepare them to be retried on next
895 * writeback using standard nfs. 919 * writeback using standard nfs.
@@ -907,17 +931,15 @@ static void nfs_redirty_request(struct nfs_page *req)
907 * Generate multiple small requests to write out a single 931 * Generate multiple small requests to write out a single
908 * contiguous dirty area on one page. 932 * contiguous dirty area on one page.
909 */ 933 */
910static int nfs_flush_multi(struct nfs_pageio_descriptor *desc) 934static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
911{ 935{
912 struct nfs_page *req = nfs_list_entry(desc->pg_list.next); 936 struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
913 struct page *page = req->wb_page; 937 struct page *page = req->wb_page;
914 struct nfs_write_data *data; 938 struct nfs_write_data *data;
915 size_t wsize = NFS_SERVER(desc->pg_inode)->wsize, nbytes; 939 size_t wsize = desc->pg_bsize, nbytes;
916 unsigned int offset; 940 unsigned int offset;
917 int requests = 0; 941 int requests = 0;
918 int ret = 0; 942 int ret = 0;
919 struct pnfs_layout_segment *lseg;
920 LIST_HEAD(list);
921 943
922 nfs_list_remove_request(req); 944 nfs_list_remove_request(req);
923 945
@@ -927,6 +949,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
927 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 949 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
928 950
929 951
952 offset = 0;
930 nbytes = desc->pg_count; 953 nbytes = desc->pg_count;
931 do { 954 do {
932 size_t len = min(nbytes, wsize); 955 size_t len = min(nbytes, wsize);
@@ -934,45 +957,21 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
934 data = nfs_writedata_alloc(1); 957 data = nfs_writedata_alloc(1);
935 if (!data) 958 if (!data)
936 goto out_bad; 959 goto out_bad;
937 list_add(&data->pages, &list); 960 data->pagevec[0] = page;
961 nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags);
962 list_add(&data->list, res);
938 requests++; 963 requests++;
939 nbytes -= len; 964 nbytes -= len;
965 offset += len;
940 } while (nbytes != 0); 966 } while (nbytes != 0);
941 atomic_set(&req->wb_complete, requests); 967 atomic_set(&req->wb_complete, requests);
942 968 desc->pg_rpc_callops = &nfs_write_partial_ops;
943 BUG_ON(desc->pg_lseg);
944 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
945 req_offset(req), desc->pg_count,
946 IOMODE_RW, GFP_NOFS);
947 ClearPageError(page);
948 offset = 0;
949 nbytes = desc->pg_count;
950 do {
951 int ret2;
952
953 data = list_entry(list.next, struct nfs_write_data, pages);
954 list_del_init(&data->pages);
955
956 data->pagevec[0] = page;
957
958 if (nbytes < wsize)
959 wsize = nbytes;
960 ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
961 wsize, offset, lseg, desc->pg_ioflags);
962 if (ret == 0)
963 ret = ret2;
964 offset += wsize;
965 nbytes -= wsize;
966 } while (nbytes != 0);
967
968 put_lseg(lseg);
969 desc->pg_lseg = NULL;
970 return ret; 969 return ret;
971 970
972out_bad: 971out_bad:
973 while (!list_empty(&list)) { 972 while (!list_empty(res)) {
974 data = list_entry(list.next, struct nfs_write_data, pages); 973 data = list_entry(res->next, struct nfs_write_data, list);
975 list_del(&data->pages); 974 list_del(&data->list);
976 nfs_writedata_free(data); 975 nfs_writedata_free(data);
977 } 976 }
978 nfs_redirty_request(req); 977 nfs_redirty_request(req);
@@ -987,14 +986,13 @@ out_bad:
987 * This is the case if nfs_updatepage detects a conflicting request 986 * This is the case if nfs_updatepage detects a conflicting request
988 * that has been written but not committed. 987 * that has been written but not committed.
989 */ 988 */
990static int nfs_flush_one(struct nfs_pageio_descriptor *desc) 989static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
991{ 990{
992 struct nfs_page *req; 991 struct nfs_page *req;
993 struct page **pages; 992 struct page **pages;
994 struct nfs_write_data *data; 993 struct nfs_write_data *data;
995 struct list_head *head = &desc->pg_list; 994 struct list_head *head = &desc->pg_list;
996 struct pnfs_layout_segment *lseg = desc->pg_lseg; 995 int ret = 0;
997 int ret;
998 996
999 data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base, 997 data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
1000 desc->pg_count)); 998 desc->pg_count));
@@ -1016,32 +1014,62 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
1016 *pages++ = req->wb_page; 1014 *pages++ = req->wb_page;
1017 } 1015 }
1018 req = nfs_list_entry(data->pages.next); 1016 req = nfs_list_entry(data->pages.next);
1019 if ((!lseg) && list_is_singular(&data->pages))
1020 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context,
1021 req_offset(req), desc->pg_count,
1022 IOMODE_RW, GFP_NOFS);
1023 1017
1024 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1018 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1025 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) 1019 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1026 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 1020 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1027 1021
1028 /* Set up the argument struct */ 1022 /* Set up the argument struct */
1029 ret = nfs_write_rpcsetup(req, data, &nfs_write_full_ops, desc->pg_count, 0, lseg, desc->pg_ioflags); 1023 nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
1024 list_add(&data->list, res);
1025 desc->pg_rpc_callops = &nfs_write_full_ops;
1030out: 1026out:
1031 put_lseg(lseg); /* Cleans any gotten in ->pg_test */
1032 desc->pg_lseg = NULL;
1033 return ret; 1027 return ret;
1034} 1028}
1035 1029
1036static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1030int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
1031{
1032 if (desc->pg_bsize < PAGE_CACHE_SIZE)
1033 return nfs_flush_multi(desc, head);
1034 return nfs_flush_one(desc, head);
1035}
1036
1037static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1038{
1039 LIST_HEAD(head);
1040 int ret;
1041
1042 ret = nfs_generic_flush(desc, &head);
1043 if (ret == 0)
1044 ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
1045 desc->pg_ioflags);
1046 return ret;
1047}
1048
1049static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1050 .pg_test = nfs_generic_pg_test,
1051 .pg_doio = nfs_generic_pg_writepages,
1052};
1053
1054static void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1037 struct inode *inode, int ioflags) 1055 struct inode *inode, int ioflags)
1038{ 1056{
1039 size_t wsize = NFS_SERVER(inode)->wsize; 1057 nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
1058 NFS_SERVER(inode)->wsize, ioflags);
1059}
1060
1061void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1062{
1063 pgio->pg_ops = &nfs_pageio_write_ops;
1064 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1065}
1066EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1040 1067
1041 if (wsize < PAGE_CACHE_SIZE) 1068static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1042 nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags); 1069 struct inode *inode, int ioflags)
1043 else 1070{
1044 nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags); 1071 if (!pnfs_pageio_init_write(pgio, inode, ioflags))
1072 nfs_pageio_init_write_mds(pgio, inode, ioflags);
1045} 1073}
1046 1074
1047/* 1075/*
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 3b8d3979e03b..98e544274390 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -93,7 +93,7 @@ int omfs_make_empty(struct inode *inode, struct super_block *sb)
93 93
94 memset(bh->b_data, 0, sizeof(struct omfs_inode)); 94 memset(bh->b_data, 0, sizeof(struct omfs_inode));
95 95
96 if (inode->i_mode & S_IFDIR) { 96 if (S_ISDIR(inode->i_mode)) {
97 memset(&bh->b_data[OMFS_DIR_START], 0xff, 97 memset(&bh->b_data[OMFS_DIR_START], 0xff,
98 sbi->s_sys_blocksize - OMFS_DIR_START); 98 sbi->s_sys_blocksize - OMFS_DIR_START);
99 } else 99 } else
diff --git a/fs/open.c b/fs/open.c
index 739b751aa73e..f71192109457 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -446,74 +446,52 @@ out:
446 return error; 446 return error;
447} 447}
448 448
449SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode) 449static int chmod_common(struct path *path, umode_t mode)
450{ 450{
451 struct inode * inode; 451 struct inode *inode = path->dentry->d_inode;
452 struct dentry * dentry;
453 struct file * file;
454 int err = -EBADF;
455 struct iattr newattrs; 452 struct iattr newattrs;
453 int error;
456 454
457 file = fget(fd); 455 error = mnt_want_write(path->mnt);
458 if (!file) 456 if (error)
459 goto out; 457 return error;
460
461 dentry = file->f_path.dentry;
462 inode = dentry->d_inode;
463
464 audit_inode(NULL, dentry);
465
466 err = mnt_want_write_file(file);
467 if (err)
468 goto out_putf;
469 mutex_lock(&inode->i_mutex); 458 mutex_lock(&inode->i_mutex);
470 err = security_path_chmod(dentry, file->f_vfsmnt, mode); 459 error = security_path_chmod(path->dentry, path->mnt, mode);
471 if (err) 460 if (error)
472 goto out_unlock; 461 goto out_unlock;
473 if (mode == (mode_t) -1)
474 mode = inode->i_mode;
475 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 462 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
476 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 463 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
477 err = notify_change(dentry, &newattrs); 464 error = notify_change(path->dentry, &newattrs);
478out_unlock: 465out_unlock:
479 mutex_unlock(&inode->i_mutex); 466 mutex_unlock(&inode->i_mutex);
480 mnt_drop_write(file->f_path.mnt); 467 mnt_drop_write(path->mnt);
481out_putf: 468 return error;
482 fput(file); 469}
483out: 470
471SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
472{
473 struct file * file;
474 int err = -EBADF;
475
476 file = fget(fd);
477 if (file) {
478 audit_inode(NULL, file->f_path.dentry);
479 err = chmod_common(&file->f_path, mode);
480 fput(file);
481 }
484 return err; 482 return err;
485} 483}
486 484
487SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode) 485SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
488{ 486{
489 struct path path; 487 struct path path;
490 struct inode *inode;
491 int error; 488 int error;
492 struct iattr newattrs;
493 489
494 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path); 490 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
495 if (error) 491 if (!error) {
496 goto out; 492 error = chmod_common(&path, mode);
497 inode = path.dentry->d_inode; 493 path_put(&path);
498 494 }
499 error = mnt_want_write(path.mnt);
500 if (error)
501 goto dput_and_out;
502 mutex_lock(&inode->i_mutex);
503 error = security_path_chmod(path.dentry, path.mnt, mode);
504 if (error)
505 goto out_unlock;
506 if (mode == (mode_t) -1)
507 mode = inode->i_mode;
508 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
509 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
510 error = notify_change(path.dentry, &newattrs);
511out_unlock:
512 mutex_unlock(&inode->i_mutex);
513 mnt_drop_write(path.mnt);
514dput_and_out:
515 path_put(&path);
516out:
517 return error; 495 return error;
518} 496}
519 497
diff --git a/fs/pipe.c b/fs/pipe.c
index 1b7f9af67ccf..0e0be1dc0f8e 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -948,7 +948,7 @@ static const struct dentry_operations pipefs_dentry_operations = {
948 948
949static struct inode * get_pipe_inode(void) 949static struct inode * get_pipe_inode(void)
950{ 950{
951 struct inode *inode = new_inode(pipe_mnt->mnt_sb); 951 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
952 struct pipe_inode_info *pipe; 952 struct pipe_inode_info *pipe;
953 953
954 if (!inode) 954 if (!inode)
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index f1637f17c37c..9d99131d0d65 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -620,8 +620,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
620 if (!ent) goto out; 620 if (!ent) goto out;
621 621
622 memset(ent, 0, sizeof(struct proc_dir_entry)); 622 memset(ent, 0, sizeof(struct proc_dir_entry));
623 memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1); 623 memcpy(ent->name, fn, len + 1);
624 ent->name = ((char *) ent) + sizeof(*ent);
625 ent->namelen = len; 624 ent->namelen = len;
626 ent->mode = mode; 625 ent->mode = mode;
627 ent->nlink = nlink; 626 ent->nlink = nlink;
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 9020ac15baaa..f738024ccc8e 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -197,15 +197,15 @@ static __net_init int proc_net_ns_init(struct net *net)
197 int err; 197 int err;
198 198
199 err = -ENOMEM; 199 err = -ENOMEM;
200 netd = kzalloc(sizeof(*netd), GFP_KERNEL); 200 netd = kzalloc(sizeof(*netd) + 4, GFP_KERNEL);
201 if (!netd) 201 if (!netd)
202 goto out; 202 goto out;
203 203
204 netd->data = net; 204 netd->data = net;
205 netd->nlink = 2; 205 netd->nlink = 2;
206 netd->name = "net";
207 netd->namelen = 3; 206 netd->namelen = 3;
208 netd->parent = &proc_root; 207 netd->parent = &proc_root;
208 memcpy(netd->name, "net", 4);
209 209
210 err = -EEXIST; 210 err = -EEXIST;
211 net_statd = proc_net_mkdir(net, "stat", netd); 211 net_statd = proc_net_mkdir(net, "stat", netd);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index d6c3b416529b..9a8a2b77b874 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -186,13 +186,13 @@ static const struct inode_operations proc_root_inode_operations = {
186struct proc_dir_entry proc_root = { 186struct proc_dir_entry proc_root = {
187 .low_ino = PROC_ROOT_INO, 187 .low_ino = PROC_ROOT_INO,
188 .namelen = 5, 188 .namelen = 5,
189 .name = "/proc",
190 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 189 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
191 .nlink = 2, 190 .nlink = 2,
192 .count = ATOMIC_INIT(1), 191 .count = ATOMIC_INIT(1),
193 .proc_iops = &proc_root_inode_operations, 192 .proc_iops = &proc_root_inode_operations,
194 .proc_fops = &proc_root_operations, 193 .proc_fops = &proc_root_operations,
195 .parent = &proc_root, 194 .parent = &proc_root,
195 .name = "/proc",
196}; 196};
197 197
198int pid_ns_prepare_proc(struct pid_namespace *ns) 198int pid_ns_prepare_proc(struct pid_namespace *ns)
diff --git a/fs/read_write.c b/fs/read_write.c
index 5907b49e4d7e..179f1c33ea57 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -166,8 +166,10 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
166 * long as offset isn't at the end of the file then the 166 * long as offset isn't at the end of the file then the
167 * offset is data. 167 * offset is data.
168 */ 168 */
169 if (offset >= inode->i_size) 169 if (offset >= inode->i_size) {
170 return -ENXIO; 170 retval = -ENXIO;
171 goto out;
172 }
171 break; 173 break;
172 case SEEK_HOLE: 174 case SEEK_HOLE:
173 /* 175 /*
@@ -175,8 +177,10 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
175 * as long as offset isn't i_size or larger, return 177 * as long as offset isn't i_size or larger, return
176 * i_size. 178 * i_size.
177 */ 179 */
178 if (offset >= inode->i_size) 180 if (offset >= inode->i_size) {
179 return -ENXIO; 181 retval = -ENXIO;
182 goto out;
183 }
180 offset = inode->i_size; 184 offset = inode->i_size;
181 break; 185 break;
182 } 186 }
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index acca2c5ca3fa..f7ce7debe14c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -265,7 +265,7 @@ xfs_open_by_handle(
265 return PTR_ERR(filp); 265 return PTR_ERR(filp);
266 } 266 }
267 267
268 if (inode->i_mode & S_IFREG) { 268 if (S_ISREG(inode->i_mode)) {
269 filp->f_flags |= O_NOATIME; 269 filp->f_flags |= O_NOATIME;
270 filp->f_mode |= FMODE_NOCMTIME; 270 filp->f_mode |= FMODE_NOCMTIME;
271 } 271 }
@@ -850,14 +850,14 @@ xfs_set_diflags(
850 di_flags |= XFS_DIFLAG_NODEFRAG; 850 di_flags |= XFS_DIFLAG_NODEFRAG;
851 if (xflags & XFS_XFLAG_FILESTREAM) 851 if (xflags & XFS_XFLAG_FILESTREAM)
852 di_flags |= XFS_DIFLAG_FILESTREAM; 852 di_flags |= XFS_DIFLAG_FILESTREAM;
853 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 853 if (S_ISDIR(ip->i_d.di_mode)) {
854 if (xflags & XFS_XFLAG_RTINHERIT) 854 if (xflags & XFS_XFLAG_RTINHERIT)
855 di_flags |= XFS_DIFLAG_RTINHERIT; 855 di_flags |= XFS_DIFLAG_RTINHERIT;
856 if (xflags & XFS_XFLAG_NOSYMLINKS) 856 if (xflags & XFS_XFLAG_NOSYMLINKS)
857 di_flags |= XFS_DIFLAG_NOSYMLINKS; 857 di_flags |= XFS_DIFLAG_NOSYMLINKS;
858 if (xflags & XFS_XFLAG_EXTSZINHERIT) 858 if (xflags & XFS_XFLAG_EXTSZINHERIT)
859 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 859 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
860 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 860 } else if (S_ISREG(ip->i_d.di_mode)) {
861 if (xflags & XFS_XFLAG_REALTIME) 861 if (xflags & XFS_XFLAG_REALTIME)
862 di_flags |= XFS_DIFLAG_REALTIME; 862 di_flags |= XFS_DIFLAG_REALTIME;
863 if (xflags & XFS_XFLAG_EXTSIZE) 863 if (xflags & XFS_XFLAG_EXTSIZE)
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index c51a3f903633..ab3e5c6c4642 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -414,7 +414,7 @@ xfs_bmap_add_attrfork_local(
414 414
415 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 415 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
416 return 0; 416 return 0;
417 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 417 if (S_ISDIR(ip->i_d.di_mode)) {
418 mp = ip->i_mount; 418 mp = ip->i_mount;
419 memset(&dargs, 0, sizeof(dargs)); 419 memset(&dargs, 0, sizeof(dargs));
420 dargs.dp = ip; 420 dargs.dp = ip;
@@ -3344,8 +3344,7 @@ xfs_bmap_local_to_extents(
3344 * We don't want to deal with the case of keeping inode data inline yet. 3344 * We don't want to deal with the case of keeping inode data inline yet.
3345 * So sending the data fork of a regular inode is invalid. 3345 * So sending the data fork of a regular inode is invalid.
3346 */ 3346 */
3347 ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG && 3347 ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
3348 whichfork == XFS_DATA_FORK));
3349 ifp = XFS_IFORK_PTR(ip, whichfork); 3348 ifp = XFS_IFORK_PTR(ip, whichfork);
3350 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 3349 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3351 flags = 0; 3350 flags = 0;
@@ -4052,7 +4051,7 @@ xfs_bmap_one_block(
4052 4051
4053#ifndef DEBUG 4052#ifndef DEBUG
4054 if (whichfork == XFS_DATA_FORK) { 4053 if (whichfork == XFS_DATA_FORK) {
4055 return ((ip->i_d.di_mode & S_IFMT) == S_IFREG) ? 4054 return S_ISREG(ip->i_d.di_mode) ?
4056 (ip->i_size == ip->i_mount->m_sb.sb_blocksize) : 4055 (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
4057 (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize); 4056 (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4058 } 4057 }
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 4580ce00aeb4..a2e27010c7fb 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -121,7 +121,7 @@ xfs_dir_isempty(
121{ 121{
122 xfs_dir2_sf_hdr_t *sfp; 122 xfs_dir2_sf_hdr_t *sfp;
123 123
124 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 124 ASSERT(S_ISDIR(dp->i_d.di_mode));
125 if (dp->i_d.di_size == 0) /* might happen during shutdown. */ 125 if (dp->i_d.di_size == 0) /* might happen during shutdown. */
126 return 1; 126 return 1;
127 if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp)) 127 if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
@@ -179,7 +179,7 @@ xfs_dir_init(
179 memset((char *)&args, 0, sizeof(args)); 179 memset((char *)&args, 0, sizeof(args));
180 args.dp = dp; 180 args.dp = dp;
181 args.trans = tp; 181 args.trans = tp;
182 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 182 ASSERT(S_ISDIR(dp->i_d.di_mode));
183 if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino))) 183 if ((error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino)))
184 return error; 184 return error;
185 return xfs_dir2_sf_create(&args, pdp->i_ino); 185 return xfs_dir2_sf_create(&args, pdp->i_ino);
@@ -202,7 +202,7 @@ xfs_dir_createname(
202 int rval; 202 int rval;
203 int v; /* type-checking value */ 203 int v; /* type-checking value */
204 204
205 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 205 ASSERT(S_ISDIR(dp->i_d.di_mode));
206 if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) 206 if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum)))
207 return rval; 207 return rval;
208 XFS_STATS_INC(xs_dir_create); 208 XFS_STATS_INC(xs_dir_create);
@@ -278,7 +278,7 @@ xfs_dir_lookup(
278 int rval; 278 int rval;
279 int v; /* type-checking value */ 279 int v; /* type-checking value */
280 280
281 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 281 ASSERT(S_ISDIR(dp->i_d.di_mode));
282 XFS_STATS_INC(xs_dir_lookup); 282 XFS_STATS_INC(xs_dir_lookup);
283 283
284 memset(&args, 0, sizeof(xfs_da_args_t)); 284 memset(&args, 0, sizeof(xfs_da_args_t));
@@ -333,7 +333,7 @@ xfs_dir_removename(
333 int rval; 333 int rval;
334 int v; /* type-checking value */ 334 int v; /* type-checking value */
335 335
336 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 336 ASSERT(S_ISDIR(dp->i_d.di_mode));
337 XFS_STATS_INC(xs_dir_remove); 337 XFS_STATS_INC(xs_dir_remove);
338 338
339 memset(&args, 0, sizeof(xfs_da_args_t)); 339 memset(&args, 0, sizeof(xfs_da_args_t));
@@ -382,7 +382,7 @@ xfs_readdir(
382 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 382 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
383 return XFS_ERROR(EIO); 383 return XFS_ERROR(EIO);
384 384
385 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 385 ASSERT(S_ISDIR(dp->i_d.di_mode));
386 XFS_STATS_INC(xs_dir_getdents); 386 XFS_STATS_INC(xs_dir_getdents);
387 387
388 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) 388 if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
@@ -414,7 +414,7 @@ xfs_dir_replace(
414 int rval; 414 int rval;
415 int v; /* type-checking value */ 415 int v; /* type-checking value */
416 416
417 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 417 ASSERT(S_ISDIR(dp->i_d.di_mode));
418 418
419 if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) 419 if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum)))
420 return rval; 420 return rval;
@@ -464,7 +464,7 @@ xfs_dir_canenter(
464 if (resblks) 464 if (resblks)
465 return 0; 465 return 0;
466 466
467 ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); 467 ASSERT(S_ISDIR(dp->i_d.di_mode));
468 468
469 memset(&args, 0, sizeof(xfs_da_args_t)); 469 memset(&args, 0, sizeof(xfs_da_args_t));
470 args.name = name->name; 470 args.name = name->name;
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 9124425b7f2f..3ff3d9e23ded 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -344,9 +344,9 @@ _xfs_filestream_update_ag(
344 * Either ip is a regular file and pip is a directory, or ip is a 344 * Either ip is a regular file and pip is a directory, or ip is a
345 * directory and pip is NULL. 345 * directory and pip is NULL.
346 */ 346 */
347 ASSERT(ip && (((ip->i_d.di_mode & S_IFREG) && pip && 347 ASSERT(ip && ((S_ISREG(ip->i_d.di_mode) && pip &&
348 (pip->i_d.di_mode & S_IFDIR)) || 348 S_ISDIR(pip->i_d.di_mode)) ||
349 ((ip->i_d.di_mode & S_IFDIR) && !pip))); 349 (S_ISDIR(ip->i_d.di_mode) && !pip)));
350 350
351 mp = ip->i_mount; 351 mp = ip->i_mount;
352 cache = mp->m_filestream; 352 cache = mp->m_filestream;
@@ -537,7 +537,7 @@ xfs_filestream_lookup_ag(
537 xfs_agnumber_t ag; 537 xfs_agnumber_t ag;
538 int ref; 538 int ref;
539 539
540 if (!(ip->i_d.di_mode & (S_IFREG | S_IFDIR))) { 540 if (!S_ISREG(ip->i_d.di_mode) && !S_ISDIR(ip->i_d.di_mode)) {
541 ASSERT(0); 541 ASSERT(0);
542 return NULLAGNUMBER; 542 return NULLAGNUMBER;
543 } 543 }
@@ -579,9 +579,9 @@ xfs_filestream_associate(
579 xfs_agnumber_t ag, rotorstep, startag; 579 xfs_agnumber_t ag, rotorstep, startag;
580 int err = 0; 580 int err = 0;
581 581
582 ASSERT(pip->i_d.di_mode & S_IFDIR); 582 ASSERT(S_ISDIR(pip->i_d.di_mode));
583 ASSERT(ip->i_d.di_mode & S_IFREG); 583 ASSERT(S_ISREG(ip->i_d.di_mode));
584 if (!(pip->i_d.di_mode & S_IFDIR) || !(ip->i_d.di_mode & S_IFREG)) 584 if (!S_ISDIR(pip->i_d.di_mode) || !S_ISREG(ip->i_d.di_mode))
585 return -EINVAL; 585 return -EINVAL;
586 586
587 mp = pip->i_mount; 587 mp = pip->i_mount;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 3cc21ddf9f7e..2fcca4b03ed3 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -368,7 +368,7 @@ xfs_iformat(
368 /* 368 /*
369 * no local regular files yet 369 * no local regular files yet
370 */ 370 */
371 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { 371 if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
372 xfs_warn(ip->i_mount, 372 xfs_warn(ip->i_mount,
373 "corrupt inode %Lu (local format for regular file).", 373 "corrupt inode %Lu (local format for regular file).",
374 (unsigned long long) ip->i_ino); 374 (unsigned long long) ip->i_ino);
@@ -1040,7 +1040,7 @@ xfs_ialloc(
1040 1040
1041 if (pip && XFS_INHERIT_GID(pip)) { 1041 if (pip && XFS_INHERIT_GID(pip)) {
1042 ip->i_d.di_gid = pip->i_d.di_gid; 1042 ip->i_d.di_gid = pip->i_d.di_gid;
1043 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1043 if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
1044 ip->i_d.di_mode |= S_ISGID; 1044 ip->i_d.di_mode |= S_ISGID;
1045 } 1045 }
1046 } 1046 }
@@ -1097,14 +1097,14 @@ xfs_ialloc(
1097 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1097 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1098 uint di_flags = 0; 1098 uint di_flags = 0;
1099 1099
1100 if ((mode & S_IFMT) == S_IFDIR) { 1100 if (S_ISDIR(mode)) {
1101 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1101 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1102 di_flags |= XFS_DIFLAG_RTINHERIT; 1102 di_flags |= XFS_DIFLAG_RTINHERIT;
1103 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1103 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1104 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1104 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1105 ip->i_d.di_extsize = pip->i_d.di_extsize; 1105 ip->i_d.di_extsize = pip->i_d.di_extsize;
1106 } 1106 }
1107 } else if ((mode & S_IFMT) == S_IFREG) { 1107 } else if (S_ISREG(mode)) {
1108 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1108 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1109 di_flags |= XFS_DIFLAG_REALTIME; 1109 di_flags |= XFS_DIFLAG_REALTIME;
1110 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1110 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
@@ -1188,7 +1188,7 @@ xfs_isize_check(
1188 int nimaps; 1188 int nimaps;
1189 xfs_bmbt_irec_t imaps[2]; 1189 xfs_bmbt_irec_t imaps[2];
1190 1190
1191 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1191 if (!S_ISREG(ip->i_d.di_mode))
1192 return; 1192 return;
1193 1193
1194 if (XFS_IS_REALTIME_INODE(ip)) 1194 if (XFS_IS_REALTIME_INODE(ip))
@@ -1828,7 +1828,7 @@ xfs_ifree(
1828 ASSERT(ip->i_d.di_nextents == 0); 1828 ASSERT(ip->i_d.di_nextents == 0);
1829 ASSERT(ip->i_d.di_anextents == 0); 1829 ASSERT(ip->i_d.di_anextents == 0);
1830 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) || 1830 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
1831 ((ip->i_d.di_mode & S_IFMT) != S_IFREG)); 1831 (!S_ISREG(ip->i_d.di_mode)));
1832 ASSERT(ip->i_d.di_nblocks == 0); 1832 ASSERT(ip->i_d.di_nblocks == 0);
1833 1833
1834 /* 1834 /*
@@ -2671,7 +2671,7 @@ xfs_iflush_int(
2671 __func__, ip->i_ino, ip, ip->i_d.di_magic); 2671 __func__, ip->i_ino, ip, ip->i_d.di_magic);
2672 goto corrupt_out; 2672 goto corrupt_out;
2673 } 2673 }
2674 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 2674 if (S_ISREG(ip->i_d.di_mode)) {
2675 if (XFS_TEST_ERROR( 2675 if (XFS_TEST_ERROR(
2676 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2676 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2677 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 2677 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
@@ -2681,7 +2681,7 @@ xfs_iflush_int(
2681 __func__, ip->i_ino, ip); 2681 __func__, ip->i_ino, ip);
2682 goto corrupt_out; 2682 goto corrupt_out;
2683 } 2683 }
2684 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 2684 } else if (S_ISDIR(ip->i_d.di_mode)) {
2685 if (XFS_TEST_ERROR( 2685 if (XFS_TEST_ERROR(
2686 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2686 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
2687 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 2687 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index a97644ab945a..2380a4bcbece 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -263,7 +263,7 @@ typedef struct xfs_inode {
263 struct inode i_vnode; /* embedded VFS inode */ 263 struct inode i_vnode; /* embedded VFS inode */
264} xfs_inode_t; 264} xfs_inode_t;
265 265
266#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ 266#define XFS_ISIZE(ip) S_ISREG((ip)->i_d.di_mode) ? \
267 (ip)->i_size : (ip)->i_d.di_size; 267 (ip)->i_size : (ip)->i_d.di_size;
268 268
269/* Convert from vfs inode to xfs inode */ 269/* Convert from vfs inode to xfs inode */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 8fe4206de057..052a2c0ec5fb 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2283,7 +2283,7 @@ xlog_recover_inode_pass2(
2283 /* Take the opportunity to reset the flush iteration count */ 2283 /* Take the opportunity to reset the flush iteration count */
2284 dicp->di_flushiter = 0; 2284 dicp->di_flushiter = 0;
2285 2285
2286 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) { 2286 if (unlikely(S_ISREG(dicp->di_mode))) {
2287 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2287 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2288 (dicp->di_format != XFS_DINODE_FMT_BTREE)) { 2288 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2289 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 2289 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
@@ -2296,7 +2296,7 @@ xlog_recover_inode_pass2(
2296 error = EFSCORRUPTED; 2296 error = EFSCORRUPTED;
2297 goto error; 2297 goto error;
2298 } 2298 }
2299 } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) { 2299 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2300 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2300 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2301 (dicp->di_format != XFS_DINODE_FMT_BTREE) && 2301 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2302 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { 2302 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 7f25245da289..092e16ae4d9d 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1331,7 +1331,7 @@ xfs_mountfs(
1331 1331
1332 ASSERT(rip != NULL); 1332 ASSERT(rip != NULL);
1333 1333
1334 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { 1334 if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
1335 xfs_warn(mp, "corrupted root inode %llu: not a directory", 1335 xfs_warn(mp, "corrupted root inode %llu: not a directory",
1336 (unsigned long long)rip->i_ino); 1336 (unsigned long long)rip->i_ino);
1337 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1337 xfs_iunlock(rip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 77a59891734e..df78c297d1a1 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -116,7 +116,7 @@ xfs_rename(
116 trace_xfs_rename(src_dp, target_dp, src_name, target_name); 116 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
117 117
118 new_parent = (src_dp != target_dp); 118 new_parent = (src_dp != target_dp);
119 src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); 119 src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
120 120
121 if (src_is_directory) { 121 if (src_is_directory) {
122 /* 122 /*
@@ -226,7 +226,7 @@ xfs_rename(
226 * target and source are directories and that target can be 226 * target and source are directories and that target can be
227 * destroyed, or that neither is a directory. 227 * destroyed, or that neither is a directory.
228 */ 228 */
229 if ((target_ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 229 if (S_ISDIR(target_ip->i_d.di_mode)) {
230 /* 230 /*
231 * Make sure target dir is empty. 231 * Make sure target dir is empty.
232 */ 232 */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 88d121486c52..9322e13f0c63 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -121,7 +121,7 @@ xfs_readlink(
121 121
122 xfs_ilock(ip, XFS_ILOCK_SHARED); 122 xfs_ilock(ip, XFS_ILOCK_SHARED);
123 123
124 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK); 124 ASSERT(S_ISLNK(ip->i_d.di_mode));
125 ASSERT(ip->i_d.di_size <= MAXPATHLEN); 125 ASSERT(ip->i_d.di_size <= MAXPATHLEN);
126 126
127 pathlen = ip->i_d.di_size; 127 pathlen = ip->i_d.di_size;
@@ -529,7 +529,7 @@ xfs_release(
529 if (ip->i_d.di_nlink == 0) 529 if (ip->i_d.di_nlink == 0)
530 return 0; 530 return 0;
531 531
532 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 532 if ((S_ISREG(ip->i_d.di_mode) &&
533 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || 533 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
534 ip->i_delayed_blks > 0)) && 534 ip->i_delayed_blks > 0)) &&
535 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 535 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
@@ -610,7 +610,7 @@ xfs_inactive(
610 truncate = ((ip->i_d.di_nlink == 0) && 610 truncate = ((ip->i_d.di_nlink == 0) &&
611 ((ip->i_d.di_size != 0) || (ip->i_size != 0) || 611 ((ip->i_d.di_size != 0) || (ip->i_size != 0) ||
612 (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) && 612 (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
613 ((ip->i_d.di_mode & S_IFMT) == S_IFREG)); 613 S_ISREG(ip->i_d.di_mode));
614 614
615 mp = ip->i_mount; 615 mp = ip->i_mount;
616 616
@@ -621,7 +621,7 @@ xfs_inactive(
621 goto out; 621 goto out;
622 622
623 if (ip->i_d.di_nlink != 0) { 623 if (ip->i_d.di_nlink != 0) {
624 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 624 if ((S_ISREG(ip->i_d.di_mode) &&
625 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || 625 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
626 ip->i_delayed_blks > 0)) && 626 ip->i_delayed_blks > 0)) &&
627 (ip->i_df.if_flags & XFS_IFEXTENTS) && 627 (ip->i_df.if_flags & XFS_IFEXTENTS) &&
@@ -669,7 +669,7 @@ xfs_inactive(
669 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 669 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
670 return VN_INACTIVE_CACHE; 670 return VN_INACTIVE_CACHE;
671 } 671 }
672 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) { 672 } else if (S_ISLNK(ip->i_d.di_mode)) {
673 673
674 /* 674 /*
675 * If we get an error while cleaning up a 675 * If we get an error while cleaning up a
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5f523eb9bb8d..f23bcb77260c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2310,7 +2310,8 @@ extern void __iget(struct inode * inode);
2310extern void iget_failed(struct inode *); 2310extern void iget_failed(struct inode *);
2311extern void end_writeback(struct inode *); 2311extern void end_writeback(struct inode *);
2312extern void __destroy_inode(struct inode *); 2312extern void __destroy_inode(struct inode *);
2313extern struct inode *new_inode(struct super_block *); 2313extern struct inode *new_inode_pseudo(struct super_block *sb);
2314extern struct inode *new_inode(struct super_block *sb);
2314extern void free_inode_nonrcu(struct inode *inode); 2315extern void free_inode_nonrcu(struct inode *inode);
2315extern int should_remove_suid(struct dentry *); 2316extern int should_remove_suid(struct dentry *);
2316extern int file_remove_suid(struct file *); 2317extern int file_remove_suid(struct file *);
diff --git a/include/linux/input.h b/include/linux/input.h
index 771d6d85667d..068784e17972 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -119,9 +119,9 @@ struct input_keymap_entry {
119#define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */ 119#define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */
120#define EVIOCGSW(len) _IOC(_IOC_READ, 'E', 0x1b, len) /* get all switch states */ 120#define EVIOCGSW(len) _IOC(_IOC_READ, 'E', 0x1b, len) /* get all switch states */
121 121
122#define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + ev, len) /* get event bits */ 122#define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + (ev), len) /* get event bits */
123#define EVIOCGABS(abs) _IOR('E', 0x40 + abs, struct input_absinfo) /* get abs value/limits */ 123#define EVIOCGABS(abs) _IOR('E', 0x40 + (abs), struct input_absinfo) /* get abs value/limits */
124#define EVIOCSABS(abs) _IOW('E', 0xc0 + abs, struct input_absinfo) /* set abs value/limits */ 124#define EVIOCSABS(abs) _IOW('E', 0xc0 + (abs), struct input_absinfo) /* set abs value/limits */
125 125
126#define EVIOCSFF _IOC(_IOC_WRITE, 'E', 0x80, sizeof(struct ff_effect)) /* send a force effect to a force feedback device */ 126#define EVIOCSFF _IOC(_IOC_WRITE, 'E', 0x80, sizeof(struct ff_effect)) /* send a force effect to a force feedback device */
127#define EVIOCRMFF _IOW('E', 0x81, int) /* Erase a force effect */ 127#define EVIOCRMFF _IOW('E', 0x81, int) /* Erase a force effect */
diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h
new file mode 100644
index 000000000000..f6bac89537b8
--- /dev/null
+++ b/include/linux/input/kxtj9.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright (C) 2011 Kionix, Inc.
3 * Written by Chris Hudson <chudson@kionix.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA
18 */
19
20#ifndef __KXTJ9_H__
21#define __KXTJ9_H__
22
23#define KXTJ9_I2C_ADDR 0x0F
24
25struct kxtj9_platform_data {
26 unsigned int min_interval; /* minimum poll interval (in milli-seconds) */
27
28 /*
29 * By default, x is axis 0, y is axis 1, z is axis 2; these can be
30 * changed to account for sensor orientation within the host device.
31 */
32 u8 axis_map_x;
33 u8 axis_map_y;
34 u8 axis_map_z;
35
36 /*
37 * Each axis can be negated to account for sensor orientation within
38 * the host device.
39 */
40 bool negate_x;
41 bool negate_y;
42 bool negate_z;
43
44 /* CTRL_REG1: set resolution, g-range, data ready enable */
45 /* Output resolution: 8-bit valid or 12-bit valid */
46 #define RES_8BIT 0
47 #define RES_12BIT (1 << 6)
48 u8 res_12bit;
49 /* Output g-range: +/-2g, 4g, or 8g */
50 #define KXTJ9_G_2G 0
51 #define KXTJ9_G_4G (1 << 3)
52 #define KXTJ9_G_8G (1 << 4)
53 u8 g_range;
54
55 /* DATA_CTRL_REG: controls the output data rate of the part */
56 #define ODR12_5F 0
57 #define ODR25F 1
58 #define ODR50F 2
59 #define ODR100F 3
60 #define ODR200F 4
61 #define ODR400F 5
62 #define ODR800F 6
63 u8 data_odr_init;
64
65 int (*init)(void);
66 void (*exit)(void);
67 int (*power_on)(void);
68 int (*power_off)(void);
69};
70#endif /* __KXTJ9_H__ */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9a43ad792cfc..46ac9a50528d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -56,6 +56,14 @@
56 56
57#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 57#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
58#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 58#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
59#define DIV_ROUND_UP_ULL(ll,d) \
60 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
61
62#if BITS_PER_LONG == 32
63# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
64#else
65# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
66#endif
59 67
60/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ 68/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
61#define roundup(x, y) ( \ 69#define roundup(x, y) ( \
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 504b289ba680..a3c4bc800dce 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -563,6 +563,9 @@ enum {
563 NFSPROC4_CLNT_GETDEVICEINFO, 563 NFSPROC4_CLNT_GETDEVICEINFO,
564 NFSPROC4_CLNT_LAYOUTCOMMIT, 564 NFSPROC4_CLNT_LAYOUTCOMMIT,
565 NFSPROC4_CLNT_LAYOUTRETURN, 565 NFSPROC4_CLNT_LAYOUTRETURN,
566 NFSPROC4_CLNT_SECINFO_NO_NAME,
567 NFSPROC4_CLNT_TEST_STATEID,
568 NFSPROC4_CLNT_FREE_STATEID,
566}; 569};
567 570
568/* nfs41 types */ 571/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 08c444aa0411..50a661f8b45a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -16,6 +16,7 @@ struct nfs4_sequence_args;
16struct nfs4_sequence_res; 16struct nfs4_sequence_res;
17struct nfs_server; 17struct nfs_server;
18struct nfs4_minor_version_ops; 18struct nfs4_minor_version_ops;
19struct server_scope;
19 20
20/* 21/*
21 * The nfs_client identifies our client state to the server. 22 * The nfs_client identifies our client state to the server.
@@ -77,12 +78,13 @@ struct nfs_client {
77 /* The flags used for obtaining the clientid during EXCHANGE_ID */ 78 /* The flags used for obtaining the clientid during EXCHANGE_ID */
78 u32 cl_exchange_flags; 79 u32 cl_exchange_flags;
79 struct nfs4_session *cl_session; /* sharred session */ 80 struct nfs4_session *cl_session; /* sharred session */
80 struct list_head cl_layouts;
81#endif /* CONFIG_NFS_V4 */ 81#endif /* CONFIG_NFS_V4 */
82 82
83#ifdef CONFIG_NFS_FSCACHE 83#ifdef CONFIG_NFS_FSCACHE
84 struct fscache_cookie *fscache; /* client index cache cookie */ 84 struct fscache_cookie *fscache; /* client index cache cookie */
85#endif 85#endif
86
87 struct server_scope *server_scope; /* from exchange_id */
86}; 88};
87 89
88/* 90/*
@@ -149,6 +151,7 @@ struct nfs_server {
149 struct rb_root openowner_id; 151 struct rb_root openowner_id;
150 struct rb_root lockowner_id; 152 struct rb_root lockowner_id;
151#endif 153#endif
154 struct list_head layouts;
152 struct list_head delegations; 155 struct list_head delegations;
153 void (*destroy)(struct nfs_server *); 156 void (*destroy)(struct nfs_server *);
154 157
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 25311b3bedf8..e2791a27a901 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -55,20 +55,28 @@ struct nfs_page {
55 struct nfs_writeverf wb_verf; /* Commit cookie */ 55 struct nfs_writeverf wb_verf; /* Commit cookie */
56}; 56};
57 57
58struct nfs_pageio_descriptor;
59struct nfs_pageio_ops {
60 void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
61 bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
62 int (*pg_doio)(struct nfs_pageio_descriptor *);
63};
64
58struct nfs_pageio_descriptor { 65struct nfs_pageio_descriptor {
59 struct list_head pg_list; 66 struct list_head pg_list;
60 unsigned long pg_bytes_written; 67 unsigned long pg_bytes_written;
61 size_t pg_count; 68 size_t pg_count;
62 size_t pg_bsize; 69 size_t pg_bsize;
63 unsigned int pg_base; 70 unsigned int pg_base;
64 char pg_moreio; 71 unsigned char pg_moreio : 1,
72 pg_recoalesce : 1;
65 73
66 struct inode *pg_inode; 74 struct inode *pg_inode;
67 int (*pg_doio)(struct nfs_pageio_descriptor *); 75 const struct nfs_pageio_ops *pg_ops;
68 int pg_ioflags; 76 int pg_ioflags;
69 int pg_error; 77 int pg_error;
78 const struct rpc_call_ops *pg_rpc_callops;
70 struct pnfs_layout_segment *pg_lseg; 79 struct pnfs_layout_segment *pg_lseg;
71 bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
72}; 80};
73 81
74#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) 82#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
@@ -85,7 +93,7 @@ extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst,
85 pgoff_t idx_start, unsigned int npages, int tag); 93 pgoff_t idx_start, unsigned int npages, int tag);
86extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 94extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
87 struct inode *inode, 95 struct inode *inode,
88 int (*doio)(struct nfs_pageio_descriptor *desc), 96 const struct nfs_pageio_ops *pg_ops,
89 size_t bsize, 97 size_t bsize,
90 int how); 98 int how);
91extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, 99extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
@@ -100,7 +108,6 @@ extern void nfs_unlock_request(struct nfs_page *req);
100extern int nfs_set_page_tag_locked(struct nfs_page *req); 108extern int nfs_set_page_tag_locked(struct nfs_page *req);
101extern void nfs_clear_page_tag_locked(struct nfs_page *req); 109extern void nfs_clear_page_tag_locked(struct nfs_page *req);
102 110
103
104/* 111/*
105 * Lock the page of an asynchronous request without getting a new reference 112 * Lock the page of an asynchronous request without getting a new reference
106 */ 113 */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 00848d86ffb2..5b115956abac 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -269,9 +269,10 @@ struct nfs4_layoutcommit_data {
269}; 269};
270 270
271struct nfs4_layoutreturn_args { 271struct nfs4_layoutreturn_args {
272 __u32 layout_type; 272 struct pnfs_layout_hdr *layout;
273 struct inode *inode; 273 struct inode *inode;
274 nfs4_stateid stateid; 274 nfs4_stateid stateid;
275 __u32 layout_type;
275 struct nfs4_sequence_args seq_args; 276 struct nfs4_sequence_args seq_args;
276}; 277};
277 278
@@ -1060,6 +1061,7 @@ struct server_scope {
1060struct nfs41_exchange_id_res { 1061struct nfs41_exchange_id_res {
1061 struct nfs_client *client; 1062 struct nfs_client *client;
1062 u32 flags; 1063 u32 flags;
1064 struct server_scope *server_scope;
1063}; 1065};
1064 1066
1065struct nfs41_create_session_args { 1067struct nfs41_create_session_args {
@@ -1083,6 +1085,34 @@ struct nfs41_reclaim_complete_args {
1083struct nfs41_reclaim_complete_res { 1085struct nfs41_reclaim_complete_res {
1084 struct nfs4_sequence_res seq_res; 1086 struct nfs4_sequence_res seq_res;
1085}; 1087};
1088
1089#define SECINFO_STYLE_CURRENT_FH 0
1090#define SECINFO_STYLE_PARENT 1
1091struct nfs41_secinfo_no_name_args {
1092 int style;
1093 struct nfs4_sequence_args seq_args;
1094};
1095
1096struct nfs41_test_stateid_args {
1097 nfs4_stateid *stateid;
1098 struct nfs4_sequence_args seq_args;
1099};
1100
1101struct nfs41_test_stateid_res {
1102 unsigned int status;
1103 struct nfs4_sequence_res seq_res;
1104};
1105
1106struct nfs41_free_stateid_args {
1107 nfs4_stateid *stateid;
1108 struct nfs4_sequence_args seq_args;
1109};
1110
1111struct nfs41_free_stateid_res {
1112 unsigned int status;
1113 struct nfs4_sequence_res seq_res;
1114};
1115
1086#endif /* CONFIG_NFS_V4_1 */ 1116#endif /* CONFIG_NFS_V4_1 */
1087 1117
1088struct nfs_page; 1118struct nfs_page;
@@ -1096,6 +1126,7 @@ struct nfs_read_data {
1096 struct rpc_cred *cred; 1126 struct rpc_cred *cred;
1097 struct nfs_fattr fattr; /* fattr storage */ 1127 struct nfs_fattr fattr; /* fattr storage */
1098 struct list_head pages; /* Coalesced read requests */ 1128 struct list_head pages; /* Coalesced read requests */
1129 struct list_head list; /* lists of struct nfs_read_data */
1099 struct nfs_page *req; /* multi ops per nfs_page */ 1130 struct nfs_page *req; /* multi ops per nfs_page */
1100 struct page **pagevec; 1131 struct page **pagevec;
1101 unsigned int npages; /* Max length of pagevec */ 1132 unsigned int npages; /* Max length of pagevec */
@@ -1119,6 +1150,7 @@ struct nfs_write_data {
1119 struct nfs_fattr fattr; 1150 struct nfs_fattr fattr;
1120 struct nfs_writeverf verf; 1151 struct nfs_writeverf verf;
1121 struct list_head pages; /* Coalesced requests we wish to flush */ 1152 struct list_head pages; /* Coalesced requests we wish to flush */
1153 struct list_head list; /* lists of struct nfs_write_data */
1122 struct nfs_page *req; /* multi ops per nfs_page */ 1154 struct nfs_page *req; /* multi ops per nfs_page */
1123 struct page **pagevec; 1155 struct page **pagevec;
1124 unsigned int npages; /* Max length of pagevec */ 1156 unsigned int npages; /* Max length of pagevec */
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
index 76efbdd01622..435dd5fa7453 100644
--- a/include/linux/pnfs_osd_xdr.h
+++ b/include/linux/pnfs_osd_xdr.h
@@ -41,9 +41,6 @@
41 41
42#include <linux/nfs_fs.h> 42#include <linux/nfs_fs.h>
43#include <linux/nfs_page.h> 43#include <linux/nfs_page.h>
44#include <scsi/osd_protocol.h>
45
46#define PNFS_OSD_OSDNAME_MAXSIZE 256
47 44
48/* 45/*
49 * draft-ietf-nfsv4-minorversion-22 46 * draft-ietf-nfsv4-minorversion-22
@@ -99,12 +96,6 @@ struct pnfs_osd_objid {
99#define _DEVID_HI(oid_device_id) \ 96#define _DEVID_HI(oid_device_id) \
100 (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1) 97 (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
101 98
102static inline int
103pnfs_osd_objid_xdr_sz(void)
104{
105 return (NFS4_DEVICEID4_SIZE / 4) + 2 + 2;
106}
107
108enum pnfs_osd_version { 99enum pnfs_osd_version {
109 PNFS_OSD_MISSING = 0, 100 PNFS_OSD_MISSING = 0,
110 PNFS_OSD_VERSION_1 = 1, 101 PNFS_OSD_VERSION_1 = 1,
@@ -189,8 +180,6 @@ struct pnfs_osd_targetid {
189 struct nfs4_string oti_scsi_device_id; 180 struct nfs4_string oti_scsi_device_id;
190}; 181};
191 182
192enum { PNFS_OSD_TARGETID_MAX = 1 + PNFS_OSD_OSDNAME_MAXSIZE / 4 };
193
194/* struct netaddr4 { 183/* struct netaddr4 {
195 * // see struct rpcb in RFC1833 184 * // see struct rpcb in RFC1833
196 * string r_netid<>; // network id 185 * string r_netid<>; // network id
@@ -207,12 +196,6 @@ struct pnfs_osd_targetaddr {
207 struct pnfs_osd_net_addr ota_netaddr; 196 struct pnfs_osd_net_addr ota_netaddr;
208}; 197};
209 198
210enum {
211 NETWORK_ID_MAX = 16 / 4,
212 UNIVERSAL_ADDRESS_MAX = 64 / 4,
213 PNFS_OSD_TARGETADDR_MAX = 3 + NETWORK_ID_MAX + UNIVERSAL_ADDRESS_MAX,
214};
215
216struct pnfs_osd_deviceaddr { 199struct pnfs_osd_deviceaddr {
217 struct pnfs_osd_targetid oda_targetid; 200 struct pnfs_osd_targetid oda_targetid;
218 struct pnfs_osd_targetaddr oda_targetaddr; 201 struct pnfs_osd_targetaddr oda_targetaddr;
@@ -222,15 +205,6 @@ struct pnfs_osd_deviceaddr {
222 struct nfs4_string oda_osdname; 205 struct nfs4_string oda_osdname;
223}; 206};
224 207
225enum {
226 ODA_OSDNAME_MAX = PNFS_OSD_OSDNAME_MAXSIZE / 4,
227 PNFS_OSD_DEVICEADDR_MAX =
228 PNFS_OSD_TARGETID_MAX + PNFS_OSD_TARGETADDR_MAX +
229 2 /*oda_lun*/ +
230 1 + OSD_SYSTEMID_LEN +
231 1 + ODA_OSDNAME_MAX,
232};
233
234/* LAYOUTCOMMIT: layoutupdate */ 208/* LAYOUTCOMMIT: layoutupdate */
235 209
236/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) { 210/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
@@ -279,7 +253,7 @@ struct pnfs_osd_ioerr {
279 u32 oer_errno; 253 u32 oer_errno;
280}; 254};
281 255
282/* OSD XDR API */ 256/* OSD XDR Client API */
283/* Layout helpers */ 257/* Layout helpers */
284/* Layout decoding is done in two parts: 258/* Layout decoding is done in two parts:
285 * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part 259 * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
@@ -337,8 +311,7 @@ extern int
337pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr, 311pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
338 struct pnfs_osd_layoutupdate *lou); 312 struct pnfs_osd_layoutupdate *lou);
339 313
340/* osd_ioerror encoding/decoding (layout_return) */ 314/* osd_ioerror encoding (layout_return) */
341/* Client */
342extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr); 315extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
343extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr); 316extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
344 317
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 650af6deaf8f..643b96c7a94f 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -50,8 +50,6 @@ typedef int (write_proc_t)(struct file *file, const char __user *buffer,
50 50
51struct proc_dir_entry { 51struct proc_dir_entry {
52 unsigned int low_ino; 52 unsigned int low_ino;
53 unsigned int namelen;
54 const char *name;
55 mode_t mode; 53 mode_t mode;
56 nlink_t nlink; 54 nlink_t nlink;
57 uid_t uid; 55 uid_t uid;
@@ -73,9 +71,11 @@ struct proc_dir_entry {
73 write_proc_t *write_proc; 71 write_proc_t *write_proc;
74 atomic_t count; /* use count */ 72 atomic_t count; /* use count */
75 int pde_users; /* number of callers into module in progress */ 73 int pde_users; /* number of callers into module in progress */
76 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
77 struct completion *pde_unload_completion; 74 struct completion *pde_unload_completion;
78 struct list_head pde_openers; /* who did ->open, but not ->release */ 75 struct list_head pde_openers; /* who did ->open, but not ->release */
76 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
77 u8 namelen;
78 char name[];
79}; 79};
80 80
81enum kcore_type { 81enum kcore_type {
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 082884295f80..f7f3ce340c08 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -31,7 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31#include <linux/sunrpc/xprt.h> 31#include <linux/sunrpc/xprt.h>
32#include <linux/sunrpc/sched.h> 32#include <linux/sunrpc/sched.h>
33 33
34#ifdef CONFIG_NFS_V4_1 34#ifdef CONFIG_SUNRPC_BACKCHANNEL
35struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt); 35struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
36void xprt_free_bc_request(struct rpc_rqst *req); 36void xprt_free_bc_request(struct rpc_rqst *req);
37int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); 37int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
@@ -47,7 +47,7 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
47 return 1; 47 return 1;
48 return 0; 48 return 0;
49} 49}
50#else /* CONFIG_NFS_V4_1 */ 50#else /* CONFIG_SUNRPC_BACKCHANNEL */
51static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, 51static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
52 unsigned int min_reqs) 52 unsigned int min_reqs)
53{ 53{
@@ -62,6 +62,6 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
62static inline void xprt_free_bc_request(struct rpc_rqst *req) 62static inline void xprt_free_bc_request(struct rpc_rqst *req)
63{ 63{
64} 64}
65#endif /* CONFIG_NFS_V4_1 */ 65#endif /* CONFIG_SUNRPC_BACKCHANNEL */
66#endif /* _LINUX_SUNRPC_BC_XPRT_H */ 66#endif /* _LINUX_SUNRPC_BC_XPRT_H */
67 67
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index fe2d8e6b923b..e7756896f3ca 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -227,6 +227,10 @@ void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
227void rpc_destroy_wait_queue(struct rpc_wait_queue *); 227void rpc_destroy_wait_queue(struct rpc_wait_queue *);
228void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, 228void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
229 rpc_action action); 229 rpc_action action);
230void rpc_sleep_on_priority(struct rpc_wait_queue *,
231 struct rpc_task *,
232 rpc_action action,
233 int priority);
230void rpc_wake_up_queued_task(struct rpc_wait_queue *, 234void rpc_wake_up_queued_task(struct rpc_wait_queue *,
231 struct rpc_task *); 235 struct rpc_task *);
232void rpc_wake_up(struct rpc_wait_queue *); 236void rpc_wake_up(struct rpc_wait_queue *);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 2f1e5186e049..223588a976a0 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -92,7 +92,7 @@ struct svc_serv {
92 struct module * sv_module; /* optional module to count when 92 struct module * sv_module; /* optional module to count when
93 * adding threads */ 93 * adding threads */
94 svc_thread_fn sv_function; /* main function for threads */ 94 svc_thread_fn sv_function; /* main function for threads */
95#if defined(CONFIG_NFS_V4_1) 95#if defined(CONFIG_SUNRPC_BACKCHANNEL)
96 struct list_head sv_cb_list; /* queue for callback requests 96 struct list_head sv_cb_list; /* queue for callback requests
97 * that arrive over the same 97 * that arrive over the same
98 * connection */ 98 * connection */
@@ -100,7 +100,7 @@ struct svc_serv {
100 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no 100 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
101 * entries in the svc_cb_list */ 101 * entries in the svc_cb_list */
102 struct svc_xprt *sv_bc_xprt; /* callback on fore channel */ 102 struct svc_xprt *sv_bc_xprt; /* callback on fore channel */
103#endif /* CONFIG_NFS_V4_1 */ 103#endif /* CONFIG_SUNRPC_BACKCHANNEL */
104}; 104};
105 105
106/* 106/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 81cce3b3ee66..15518a152ac3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -22,6 +22,7 @@
22#define RPC_MIN_SLOT_TABLE (2U) 22#define RPC_MIN_SLOT_TABLE (2U)
23#define RPC_DEF_SLOT_TABLE (16U) 23#define RPC_DEF_SLOT_TABLE (16U)
24#define RPC_MAX_SLOT_TABLE (128U) 24#define RPC_MAX_SLOT_TABLE (128U)
25#define RPC_MAX_SLOT_TABLE_LIMIT (65536U)
25 26
26/* 27/*
27 * This describes a timeout strategy 28 * This describes a timeout strategy
@@ -100,18 +101,18 @@ struct rpc_rqst {
100 ktime_t rq_xtime; /* transmit time stamp */ 101 ktime_t rq_xtime; /* transmit time stamp */
101 int rq_ntrans; 102 int rq_ntrans;
102 103
103#if defined(CONFIG_NFS_V4_1) 104#if defined(CONFIG_SUNRPC_BACKCHANNEL)
104 struct list_head rq_bc_list; /* Callback service list */ 105 struct list_head rq_bc_list; /* Callback service list */
105 unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ 106 unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
106 struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ 107 struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
107#endif /* CONFIG_NFS_V4_1 */ 108#endif /* CONFIG_SUNRPC_BACKCHANEL */
108}; 109};
109#define rq_svec rq_snd_buf.head 110#define rq_svec rq_snd_buf.head
110#define rq_slen rq_snd_buf.len 111#define rq_slen rq_snd_buf.len
111 112
112struct rpc_xprt_ops { 113struct rpc_xprt_ops {
113 void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); 114 void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
114 int (*reserve_xprt)(struct rpc_task *task); 115 int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
115 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); 116 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
116 void (*rpcbind)(struct rpc_task *task); 117 void (*rpcbind)(struct rpc_task *task);
117 void (*set_port)(struct rpc_xprt *xprt, unsigned short port); 118 void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
@@ -164,12 +165,12 @@ struct rpc_xprt {
164 165
165 struct rpc_wait_queue binding; /* requests waiting on rpcbind */ 166 struct rpc_wait_queue binding; /* requests waiting on rpcbind */
166 struct rpc_wait_queue sending; /* requests waiting to send */ 167 struct rpc_wait_queue sending; /* requests waiting to send */
167 struct rpc_wait_queue resend; /* requests waiting to resend */
168 struct rpc_wait_queue pending; /* requests in flight */ 168 struct rpc_wait_queue pending; /* requests in flight */
169 struct rpc_wait_queue backlog; /* waiting for slot */ 169 struct rpc_wait_queue backlog; /* waiting for slot */
170 struct list_head free; /* free slots */ 170 struct list_head free; /* free slots */
171 struct rpc_rqst * slot; /* slot table storage */ 171 unsigned int max_reqs; /* max number of slots */
172 unsigned int max_reqs; /* total slots */ 172 unsigned int min_reqs; /* min number of slots */
173 atomic_t num_reqs; /* total slots */
173 unsigned long state; /* transport state */ 174 unsigned long state; /* transport state */
174 unsigned char shutdown : 1, /* being shut down */ 175 unsigned char shutdown : 1, /* being shut down */
175 resvport : 1; /* use a reserved port */ 176 resvport : 1; /* use a reserved port */
@@ -200,7 +201,7 @@ struct rpc_xprt {
200 u32 xid; /* Next XID value to use */ 201 u32 xid; /* Next XID value to use */
201 struct rpc_task * snd_task; /* Task blocked in send */ 202 struct rpc_task * snd_task; /* Task blocked in send */
202 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 203 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
203#if defined(CONFIG_NFS_V4_1) 204#if defined(CONFIG_SUNRPC_BACKCHANNEL)
204 struct svc_serv *bc_serv; /* The RPC service which will */ 205 struct svc_serv *bc_serv; /* The RPC service which will */
205 /* process the callback */ 206 /* process the callback */
206 unsigned int bc_alloc_count; /* Total number of preallocs */ 207 unsigned int bc_alloc_count; /* Total number of preallocs */
@@ -208,7 +209,7 @@ struct rpc_xprt {
208 * items */ 209 * items */
209 struct list_head bc_pa_list; /* List of preallocated 210 struct list_head bc_pa_list; /* List of preallocated
210 * backchannel rpc_rqst's */ 211 * backchannel rpc_rqst's */
211#endif /* CONFIG_NFS_V4_1 */ 212#endif /* CONFIG_SUNRPC_BACKCHANNEL */
212 struct list_head recv; 213 struct list_head recv;
213 214
214 struct { 215 struct {
@@ -228,15 +229,15 @@ struct rpc_xprt {
228 const char *address_strings[RPC_DISPLAY_MAX]; 229 const char *address_strings[RPC_DISPLAY_MAX];
229}; 230};
230 231
231#if defined(CONFIG_NFS_V4_1) 232#if defined(CONFIG_SUNRPC_BACKCHANNEL)
232/* 233/*
233 * Backchannel flags 234 * Backchannel flags
234 */ 235 */
235#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ 236#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
236 /* buffer in use */ 237 /* buffer in use */
237#endif /* CONFIG_NFS_V4_1 */ 238#endif /* CONFIG_SUNRPC_BACKCHANNEL */
238 239
239#if defined(CONFIG_NFS_V4_1) 240#if defined(CONFIG_SUNRPC_BACKCHANNEL)
240static inline int bc_prealloc(struct rpc_rqst *req) 241static inline int bc_prealloc(struct rpc_rqst *req)
241{ 242{
242 return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 243 return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
@@ -246,7 +247,7 @@ static inline int bc_prealloc(struct rpc_rqst *req)
246{ 247{
247 return 0; 248 return 0;
248} 249}
249#endif /* CONFIG_NFS_V4_1 */ 250#endif /* CONFIG_SUNRPC_BACKCHANNEL */
250 251
251struct xprt_create { 252struct xprt_create {
252 int ident; /* XPRT_TRANSPORT identifier */ 253 int ident; /* XPRT_TRANSPORT identifier */
@@ -271,8 +272,8 @@ struct xprt_class {
271struct rpc_xprt *xprt_create_transport(struct xprt_create *args); 272struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
272void xprt_connect(struct rpc_task *task); 273void xprt_connect(struct rpc_task *task);
273void xprt_reserve(struct rpc_task *task); 274void xprt_reserve(struct rpc_task *task);
274int xprt_reserve_xprt(struct rpc_task *task); 275int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
275int xprt_reserve_xprt_cong(struct rpc_task *task); 276int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
276int xprt_prepare_transmit(struct rpc_task *task); 277int xprt_prepare_transmit(struct rpc_task *task);
277void xprt_transmit(struct rpc_task *task); 278void xprt_transmit(struct rpc_task *task);
278void xprt_end_transmit(struct rpc_task *task); 279void xprt_end_transmit(struct rpc_task *task);
@@ -282,7 +283,9 @@ void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
282void xprt_release(struct rpc_task *task); 283void xprt_release(struct rpc_task *task);
283struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); 284struct rpc_xprt * xprt_get(struct rpc_xprt *xprt);
284void xprt_put(struct rpc_xprt *xprt); 285void xprt_put(struct rpc_xprt *xprt);
285struct rpc_xprt * xprt_alloc(struct net *net, int size, int max_req); 286struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
287 unsigned int num_prealloc,
288 unsigned int max_req);
286void xprt_free(struct rpc_xprt *); 289void xprt_free(struct rpc_xprt *);
287 290
288static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) 291static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
@@ -321,7 +324,6 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
321#define XPRT_CLOSING (6) 324#define XPRT_CLOSING (6)
322#define XPRT_CONNECTION_ABORT (7) 325#define XPRT_CONNECTION_ABORT (7)
323#define XPRT_CONNECTION_CLOSE (8) 326#define XPRT_CONNECTION_CLOSE (8)
324#define XPRT_INITIALIZED (9)
325 327
326static inline void xprt_set_connected(struct rpc_xprt *xprt) 328static inline void xprt_set_connected(struct rpc_xprt *xprt)
327{ 329{
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 38e8c4d9289e..fd98bb968219 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -38,7 +38,11 @@
38#define WM97XX_ADCSEL_X 0x1000 /* x coord measurement */ 38#define WM97XX_ADCSEL_X 0x1000 /* x coord measurement */
39#define WM97XX_ADCSEL_Y 0x2000 /* y coord measurement */ 39#define WM97XX_ADCSEL_Y 0x2000 /* y coord measurement */
40#define WM97XX_ADCSEL_PRES 0x3000 /* pressure measurement */ 40#define WM97XX_ADCSEL_PRES 0x3000 /* pressure measurement */
41#define WM97XX_ADCSEL_MASK 0x7000 41#define WM97XX_AUX_ID1 0x4000
42#define WM97XX_AUX_ID2 0x5000
43#define WM97XX_AUX_ID3 0x6000
44#define WM97XX_AUX_ID4 0x7000
45#define WM97XX_ADCSEL_MASK 0x7000 /* ADC selection mask */
42#define WM97XX_COO 0x0800 /* enable coordinate mode */ 46#define WM97XX_COO 0x0800 /* enable coordinate mode */
43#define WM97XX_CTC 0x0400 /* enable continuous mode */ 47#define WM97XX_CTC 0x0400 /* enable continuous mode */
44#define WM97XX_CM_RATE_93 0x0000 /* 93.75Hz continuous rate */ 48#define WM97XX_CM_RATE_93 0x0000 /* 93.75Hz continuous rate */
@@ -61,13 +65,6 @@
61#define WM97XX_PRP_DET_DIG 0xc000 /* setect on, digitise on */ 65#define WM97XX_PRP_DET_DIG 0xc000 /* setect on, digitise on */
62#define WM97XX_RPR 0x2000 /* wake up on pen down */ 66#define WM97XX_RPR 0x2000 /* wake up on pen down */
63#define WM97XX_PEN_DOWN 0x8000 /* pen is down */ 67#define WM97XX_PEN_DOWN 0x8000 /* pen is down */
64#define WM97XX_ADCSRC_MASK 0x7000 /* ADC source mask */
65
66#define WM97XX_AUX_ID1 0x8001
67#define WM97XX_AUX_ID2 0x8002
68#define WM97XX_AUX_ID3 0x8003
69#define WM97XX_AUX_ID4 0x8004
70
71 68
72/* WM9712 Bits */ 69/* WM9712 Bits */
73#define WM9712_45W 0x1000 /* set for 5-wire touchscreen */ 70#define WM9712_45W 0x1000 /* set for 5-wire touchscreen */
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index ea68b3c56dbf..988ba06b3ad6 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -29,10 +29,40 @@
29/* default iSCSI listen port for incoming connections */ 29/* default iSCSI listen port for incoming connections */
30#define ISCSI_LISTEN_PORT 3260 30#define ISCSI_LISTEN_PORT 3260
31 31
32/* iSCSI header length */
33#define ISCSI_HDR_LEN 48
34
35/* iSCSI CRC32C length */
36#define ISCSI_CRC_LEN 4
37
32/* Padding word length */ 38/* Padding word length */
33#define ISCSI_PAD_LEN 4 39#define ISCSI_PAD_LEN 4
34 40
35/* 41/*
42 * Serial Number Arithmetic, 32 bits, RFC1982
43 */
44
45static inline int iscsi_sna_lt(u32 n1, u32 n2)
46{
47 return (s32)(n1 - n2) < 0;
48}
49
50static inline int iscsi_sna_lte(u32 n1, u32 n2)
51{
52 return (s32)(n1 - n2) <= 0;
53}
54
55static inline int iscsi_sna_gt(u32 n1, u32 n2)
56{
57 return (s32)(n1 - n2) > 0;
58}
59
60static inline int iscsi_sna_gte(u32 n1, u32 n2)
61{
62 return (s32)(n1 - n2) >= 0;
63}
64
65/*
36 * useful common(control and data pathes) macro 66 * useful common(control and data pathes) macro
37 */ 67 */
38#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) 68#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
@@ -116,7 +146,7 @@ struct iscsi_ahs_hdr {
116#define ISCSI_CDB_SIZE 16 146#define ISCSI_CDB_SIZE 16
117 147
118/* iSCSI PDU Header */ 148/* iSCSI PDU Header */
119struct iscsi_cmd { 149struct iscsi_scsi_req {
120 uint8_t opcode; 150 uint8_t opcode;
121 uint8_t flags; 151 uint8_t flags;
122 __be16 rsvd2; 152 __be16 rsvd2;
@@ -161,7 +191,7 @@ struct iscsi_ecdb_ahdr {
161}; 191};
162 192
163/* SCSI Response Header */ 193/* SCSI Response Header */
164struct iscsi_cmd_rsp { 194struct iscsi_scsi_rsp {
165 uint8_t opcode; 195 uint8_t opcode;
166 uint8_t flags; 196 uint8_t flags;
167 uint8_t response; 197 uint8_t response;
@@ -406,7 +436,7 @@ struct iscsi_text_rsp {
406}; 436};
407 437
408/* Login Header */ 438/* Login Header */
409struct iscsi_login { 439struct iscsi_login_req {
410 uint8_t opcode; 440 uint8_t opcode;
411 uint8_t flags; 441 uint8_t flags;
412 uint8_t max_version; /* Max. version supported */ 442 uint8_t max_version; /* Max. version supported */
@@ -427,7 +457,13 @@ struct iscsi_login {
427#define ISCSI_FLAG_LOGIN_TRANSIT 0x80 457#define ISCSI_FLAG_LOGIN_TRANSIT 0x80
428#define ISCSI_FLAG_LOGIN_CONTINUE 0x40 458#define ISCSI_FLAG_LOGIN_CONTINUE 0x40
429#define ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK 0x0C /* 2 bits */ 459#define ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK 0x0C /* 2 bits */
460#define ISCSI_FLAG_LOGIN_CURRENT_STAGE1 0x04
461#define ISCSI_FLAG_LOGIN_CURRENT_STAGE2 0x08
462#define ISCSI_FLAG_LOGIN_CURRENT_STAGE3 0x0C
430#define ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK 0x03 /* 2 bits */ 463#define ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK 0x03 /* 2 bits */
464#define ISCSI_FLAG_LOGIN_NEXT_STAGE1 0x01
465#define ISCSI_FLAG_LOGIN_NEXT_STAGE2 0x02
466#define ISCSI_FLAG_LOGIN_NEXT_STAGE3 0x03
431 467
432#define ISCSI_LOGIN_CURRENT_STAGE(flags) \ 468#define ISCSI_LOGIN_CURRENT_STAGE(flags) \
433 ((flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) 469 ((flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2)
@@ -550,17 +586,25 @@ struct iscsi_logout_rsp {
550struct iscsi_snack { 586struct iscsi_snack {
551 uint8_t opcode; 587 uint8_t opcode;
552 uint8_t flags; 588 uint8_t flags;
553 uint8_t rsvd2[14]; 589 uint8_t rsvd2[2];
590 uint8_t hlength;
591 uint8_t dlength[3];
592 uint8_t lun[8];
554 itt_t itt; 593 itt_t itt;
594 __be32 ttt;
595 uint8_t rsvd3[4];
596 __be32 exp_statsn;
597 uint8_t rsvd4[8];
555 __be32 begrun; 598 __be32 begrun;
556 __be32 runlength; 599 __be32 runlength;
557 __be32 exp_statsn;
558 __be32 rsvd3;
559 __be32 exp_datasn;
560 uint8_t rsvd6[8];
561}; 600};
562 601
563/* SNACK PDU flags */ 602/* SNACK PDU flags */
603#define ISCSI_FLAG_SNACK_TYPE_DATA 0
604#define ISCSI_FLAG_SNACK_TYPE_R2T 0
605#define ISCSI_FLAG_SNACK_TYPE_STATUS 1
606#define ISCSI_FLAG_SNACK_TYPE_DATA_ACK 2
607#define ISCSI_FLAG_SNACK_TYPE_RDATA 3
564#define ISCSI_FLAG_SNACK_TYPE_MASK 0x0F /* 4 bits */ 608#define ISCSI_FLAG_SNACK_TYPE_MASK 0x0F /* 4 bits */
565 609
566/* Reject Message Header */ 610/* Reject Message Header */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e1bad1130616..57e71fa33f7c 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -507,6 +507,18 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream);
507void snd_pcm_vma_notify_data(void *client, void *data); 507void snd_pcm_vma_notify_data(void *client, void *data);
508int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); 508int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area);
509 509
510
511#ifdef CONFIG_SND_DEBUG
512void snd_pcm_debug_name(struct snd_pcm_substream *substream,
513 char *name, size_t len);
514#else
515static inline void
516snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
517{
518 *buf = 0;
519}
520#endif
521
510/* 522/*
511 * PCM library 523 * PCM library
512 */ 524 */
@@ -749,17 +761,18 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
749 return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; 761 return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL];
750} 762}
751 763
752#define params_access(p) ((__force snd_pcm_access_t)snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_ACCESS))) 764#define params_channels(p) \
753#define params_format(p) ((__force snd_pcm_format_t)snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_FORMAT))) 765 (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min)
754#define params_subformat(p) snd_mask_min(hw_param_mask((p), SNDRV_PCM_HW_PARAM_SUBFORMAT)) 766#define params_rate(p) \
755#define params_channels(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min 767 (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_RATE)->min)
756#define params_rate(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_RATE)->min 768#define params_period_size(p) \
757#define params_period_size(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min 769 (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min)
758#define params_period_bytes(p) ((params_period_size(p)*snd_pcm_format_physical_width(params_format(p))*params_channels(p))/8) 770#define params_periods(p) \
759#define params_periods(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_PERIODS)->min 771 (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIODS)->min)
760#define params_buffer_size(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min 772#define params_buffer_size(p) \
761#define params_buffer_bytes(p) hw_param_interval((p), SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min 773 (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min)
762 774#define params_buffer_bytes(p) \
775 (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min)
763 776
764int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v); 777int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v);
765void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c); 778void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c);
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 85cf1cf4f31a..f494f1e3c900 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -337,5 +337,19 @@ static inline unsigned int sub(unsigned int a, unsigned int b)
337 return 0; 337 return 0;
338} 338}
339 339
340#endif /* __SOUND_PCM_PARAMS_H */ 340#define params_access(p) ((__force snd_pcm_access_t)\
341 snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_ACCESS)))
342#define params_format(p) ((__force snd_pcm_format_t)\
343 snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_FORMAT)))
344#define params_subformat(p) \
345 snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_SUBFORMAT))
341 346
347static inline unsigned int
348params_period_bytes(const struct snd_pcm_hw_params *p)
349{
350 return (params_period_size(p) *
351 snd_pcm_format_physical_width(params_format(p)) *
352 params_channels(p)) / 8;
353}
354
355#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index e09505c5a490..e0583b7769cb 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -266,6 +266,12 @@
266 .get = snd_soc_dapm_get_enum_virt, \ 266 .get = snd_soc_dapm_get_enum_virt, \
267 .put = snd_soc_dapm_put_enum_virt, \ 267 .put = snd_soc_dapm_put_enum_virt, \
268 .private_value = (unsigned long)&xenum } 268 .private_value = (unsigned long)&xenum }
269#define SOC_DAPM_ENUM_EXT(xname, xenum, xget, xput) \
270{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
271 .info = snd_soc_info_enum_double, \
272 .get = xget, \
273 .put = xput, \
274 .private_value = (unsigned long)&xenum }
269#define SOC_DAPM_VALUE_ENUM(xname, xenum) \ 275#define SOC_DAPM_VALUE_ENUM(xname, xenum) \
270{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 276{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
271 .info = snd_soc_info_enum_double, \ 277 .info = snd_soc_info_enum_double, \
diff --git a/kernel/compat.c b/kernel/compat.c
index 18197ae2d465..616c78197cca 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -992,11 +992,8 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
992 sigset_from_compat(&newset, &newset32); 992 sigset_from_compat(&newset, &newset32);
993 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 993 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
994 994
995 spin_lock_irq(&current->sighand->siglock);
996 current->saved_sigmask = current->blocked; 995 current->saved_sigmask = current->blocked;
997 current->blocked = newset; 996 set_current_blocked(&newset);
998 recalc_sigpending();
999 spin_unlock_irq(&current->sighand->siglock);
1000 997
1001 current->state = TASK_INTERRUPTIBLE; 998 current->state = TASK_INTERRUPTIBLE;
1002 schedule(); 999 schedule();
diff --git a/kernel/signal.c b/kernel/signal.c
index d7f70aed1cc0..291c9700be75 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3102,15 +3102,11 @@ SYSCALL_DEFINE0(sgetmask)
3102 3102
3103SYSCALL_DEFINE1(ssetmask, int, newmask) 3103SYSCALL_DEFINE1(ssetmask, int, newmask)
3104{ 3104{
3105 int old; 3105 int old = current->blocked.sig[0];
3106 3106 sigset_t newset;
3107 spin_lock_irq(&current->sighand->siglock);
3108 old = current->blocked.sig[0];
3109 3107
3110 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)| 3108 siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
3111 sigmask(SIGSTOP))); 3109 set_current_blocked(&newset);
3112 recalc_sigpending();
3113 spin_unlock_irq(&current->sighand->siglock);
3114 3110
3115 return old; 3111 return old;
3116} 3112}
@@ -3167,11 +3163,8 @@ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3167 return -EFAULT; 3163 return -EFAULT;
3168 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3164 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3169 3165
3170 spin_lock_irq(&current->sighand->siglock);
3171 current->saved_sigmask = current->blocked; 3166 current->saved_sigmask = current->blocked;
3172 current->blocked = newset; 3167 set_current_blocked(&newset);
3173 recalc_sigpending();
3174 spin_unlock_irq(&current->sighand->siglock);
3175 3168
3176 current->state = TASK_INTERRUPTIBLE; 3169 current->state = TASK_INTERRUPTIBLE;
3177 schedule(); 3170 schedule();
diff --git a/net/socket.c b/net/socket.c
index 02dc82db3d23..26ed35c7751e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -467,7 +467,7 @@ static struct socket *sock_alloc(void)
467 struct inode *inode; 467 struct inode *inode;
468 struct socket *sock; 468 struct socket *sock;
469 469
470 inode = new_inode(sock_mnt->mnt_sb); 470 inode = new_inode_pseudo(sock_mnt->mnt_sb);
471 if (!inode) 471 if (!inode)
472 return NULL; 472 return NULL;
473 473
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index b2198e65d8bb..ffd243d09188 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -4,6 +4,10 @@ config SUNRPC
4config SUNRPC_GSS 4config SUNRPC_GSS
5 tristate 5 tristate
6 6
7config SUNRPC_BACKCHANNEL
8 bool
9 depends on SUNRPC
10
7config SUNRPC_XPRT_RDMA 11config SUNRPC_XPRT_RDMA
8 tristate 12 tristate
9 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL 13 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 9d2fca5ad14a..8209a0411bca 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,6 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
13 addr.o rpcb_clnt.o timer.o xdr.o \ 13 addr.o rpcb_clnt.o timer.o xdr.o \
14 sunrpc_syms.o cache.o rpc_pipe.o \ 14 sunrpc_syms.o cache.o rpc_pipe.o \
15 svc_xprt.o 15 svc_xprt.o
16sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o 16sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o
17sunrpc-$(CONFIG_PROC_FS) += stats.o 17sunrpc-$(CONFIG_PROC_FS) += stats.o
18sunrpc-$(CONFIG_SYSCTL) += sysctl.o 18sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index cf06af3b63c6..91eaa26e4c42 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -29,8 +29,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29#define RPCDBG_FACILITY RPCDBG_TRANS 29#define RPCDBG_FACILITY RPCDBG_TRANS
30#endif 30#endif
31 31
32#if defined(CONFIG_NFS_V4_1)
33
34/* 32/*
35 * Helper routines that track the number of preallocation elements 33 * Helper routines that track the number of preallocation elements
36 * on the transport. 34 * on the transport.
@@ -174,7 +172,7 @@ out_free:
174 dprintk("RPC: setup backchannel transport failed\n"); 172 dprintk("RPC: setup backchannel transport failed\n");
175 return -1; 173 return -1;
176} 174}
177EXPORT_SYMBOL(xprt_setup_backchannel); 175EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
178 176
179/* 177/*
180 * Destroys the backchannel preallocated structures. 178 * Destroys the backchannel preallocated structures.
@@ -204,7 +202,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
204 dprintk("RPC: backchannel list empty= %s\n", 202 dprintk("RPC: backchannel list empty= %s\n",
205 list_empty(&xprt->bc_pa_list) ? "true" : "false"); 203 list_empty(&xprt->bc_pa_list) ? "true" : "false");
206} 204}
207EXPORT_SYMBOL(xprt_destroy_backchannel); 205EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
208 206
209/* 207/*
210 * One or more rpc_rqst structure have been preallocated during the 208 * One or more rpc_rqst structure have been preallocated during the
@@ -279,4 +277,3 @@ void xprt_free_bc_request(struct rpc_rqst *req)
279 spin_unlock_bh(&xprt->bc_pa_lock); 277 spin_unlock_bh(&xprt->bc_pa_lock);
280} 278}
281 279
282#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 1dd1a6890007..0b2eb388cbda 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -27,8 +27,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * reply over an existing open connection previously established by the client. 27 * reply over an existing open connection previously established by the client.
28 */ 28 */
29 29
30#if defined(CONFIG_NFS_V4_1)
31
32#include <linux/module.h> 30#include <linux/module.h>
33 31
34#include <linux/sunrpc/xprt.h> 32#include <linux/sunrpc/xprt.h>
@@ -63,4 +61,3 @@ int bc_send(struct rpc_rqst *req)
63 return ret; 61 return ret;
64} 62}
65 63
66#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c50818f0473b..c5347d29cfb7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -64,9 +64,9 @@ static void call_decode(struct rpc_task *task);
64static void call_bind(struct rpc_task *task); 64static void call_bind(struct rpc_task *task);
65static void call_bind_status(struct rpc_task *task); 65static void call_bind_status(struct rpc_task *task);
66static void call_transmit(struct rpc_task *task); 66static void call_transmit(struct rpc_task *task);
67#if defined(CONFIG_NFS_V4_1) 67#if defined(CONFIG_SUNRPC_BACKCHANNEL)
68static void call_bc_transmit(struct rpc_task *task); 68static void call_bc_transmit(struct rpc_task *task);
69#endif /* CONFIG_NFS_V4_1 */ 69#endif /* CONFIG_SUNRPC_BACKCHANNEL */
70static void call_status(struct rpc_task *task); 70static void call_status(struct rpc_task *task);
71static void call_transmit_status(struct rpc_task *task); 71static void call_transmit_status(struct rpc_task *task);
72static void call_refresh(struct rpc_task *task); 72static void call_refresh(struct rpc_task *task);
@@ -715,7 +715,7 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
715} 715}
716EXPORT_SYMBOL_GPL(rpc_call_async); 716EXPORT_SYMBOL_GPL(rpc_call_async);
717 717
718#if defined(CONFIG_NFS_V4_1) 718#if defined(CONFIG_SUNRPC_BACKCHANNEL)
719/** 719/**
720 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 720 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
721 * rpc_execute against it 721 * rpc_execute against it
@@ -758,7 +758,7 @@ out:
758 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 758 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
759 return task; 759 return task;
760} 760}
761#endif /* CONFIG_NFS_V4_1 */ 761#endif /* CONFIG_SUNRPC_BACKCHANNEL */
762 762
763void 763void
764rpc_call_start(struct rpc_task *task) 764rpc_call_start(struct rpc_task *task)
@@ -1361,7 +1361,7 @@ call_transmit_status(struct rpc_task *task)
1361 } 1361 }
1362} 1362}
1363 1363
1364#if defined(CONFIG_NFS_V4_1) 1364#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1365/* 1365/*
1366 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 1366 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1367 * addition, disconnect on connectivity errors. 1367 * addition, disconnect on connectivity errors.
@@ -1425,7 +1425,7 @@ call_bc_transmit(struct rpc_task *task)
1425 } 1425 }
1426 rpc_wake_up_queued_task(&req->rq_xprt->pending, task); 1426 rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1427} 1427}
1428#endif /* CONFIG_NFS_V4_1 */ 1428#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1429 1429
1430/* 1430/*
1431 * 6. Sort out the RPC call status 1431 * 6. Sort out the RPC call status
@@ -1550,8 +1550,7 @@ call_decode(struct rpc_task *task)
1550 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; 1550 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
1551 __be32 *p; 1551 __be32 *p;
1552 1552
1553 dprintk("RPC: %5u call_decode (status %d)\n", 1553 dprint_status(task);
1554 task->tk_pid, task->tk_status);
1555 1554
1556 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1555 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1557 if (clnt->cl_chatty) 1556 if (clnt->cl_chatty)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4814e246a874..d12ffa545811 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -97,14 +97,16 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
97/* 97/*
98 * Add new request to a priority queue. 98 * Add new request to a priority queue.
99 */ 99 */
100static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) 100static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
101 struct rpc_task *task,
102 unsigned char queue_priority)
101{ 103{
102 struct list_head *q; 104 struct list_head *q;
103 struct rpc_task *t; 105 struct rpc_task *t;
104 106
105 INIT_LIST_HEAD(&task->u.tk_wait.links); 107 INIT_LIST_HEAD(&task->u.tk_wait.links);
106 q = &queue->tasks[task->tk_priority]; 108 q = &queue->tasks[queue_priority];
107 if (unlikely(task->tk_priority > queue->maxpriority)) 109 if (unlikely(queue_priority > queue->maxpriority))
108 q = &queue->tasks[queue->maxpriority]; 110 q = &queue->tasks[queue->maxpriority];
109 list_for_each_entry(t, q, u.tk_wait.list) { 111 list_for_each_entry(t, q, u.tk_wait.list) {
110 if (t->tk_owner == task->tk_owner) { 112 if (t->tk_owner == task->tk_owner) {
@@ -123,12 +125,14 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r
123 * improve overall performance. 125 * improve overall performance.
124 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 126 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
125 */ 127 */
126static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 128static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
129 struct rpc_task *task,
130 unsigned char queue_priority)
127{ 131{
128 BUG_ON (RPC_IS_QUEUED(task)); 132 BUG_ON (RPC_IS_QUEUED(task));
129 133
130 if (RPC_IS_PRIORITY(queue)) 134 if (RPC_IS_PRIORITY(queue))
131 __rpc_add_wait_queue_priority(queue, task); 135 __rpc_add_wait_queue_priority(queue, task, queue_priority);
132 else if (RPC_IS_SWAPPER(task)) 136 else if (RPC_IS_SWAPPER(task))
133 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 137 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
134 else 138 else
@@ -311,13 +315,15 @@ static void rpc_make_runnable(struct rpc_task *task)
311 * NB: An RPC task will only receive interrupt-driven events as long 315 * NB: An RPC task will only receive interrupt-driven events as long
312 * as it's on a wait queue. 316 * as it's on a wait queue.
313 */ 317 */
314static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 318static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
315 rpc_action action) 319 struct rpc_task *task,
320 rpc_action action,
321 unsigned char queue_priority)
316{ 322{
317 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 323 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
318 task->tk_pid, rpc_qname(q), jiffies); 324 task->tk_pid, rpc_qname(q), jiffies);
319 325
320 __rpc_add_wait_queue(q, task); 326 __rpc_add_wait_queue(q, task, queue_priority);
321 327
322 BUG_ON(task->tk_callback != NULL); 328 BUG_ON(task->tk_callback != NULL);
323 task->tk_callback = action; 329 task->tk_callback = action;
@@ -334,11 +340,25 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
334 * Protect the queue operations. 340 * Protect the queue operations.
335 */ 341 */
336 spin_lock_bh(&q->lock); 342 spin_lock_bh(&q->lock);
337 __rpc_sleep_on(q, task, action); 343 __rpc_sleep_on_priority(q, task, action, task->tk_priority);
338 spin_unlock_bh(&q->lock); 344 spin_unlock_bh(&q->lock);
339} 345}
340EXPORT_SYMBOL_GPL(rpc_sleep_on); 346EXPORT_SYMBOL_GPL(rpc_sleep_on);
341 347
348void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
349 rpc_action action, int priority)
350{
351 /* We shouldn't ever put an inactive task to sleep */
352 BUG_ON(!RPC_IS_ACTIVATED(task));
353
354 /*
355 * Protect the queue operations.
356 */
357 spin_lock_bh(&q->lock);
358 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
359 spin_unlock_bh(&q->lock);
360}
361
342/** 362/**
343 * __rpc_do_wake_up_task - wake up a single rpc_task 363 * __rpc_do_wake_up_task - wake up a single rpc_task
344 * @queue: wait queue 364 * @queue: wait queue
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2b90292e9505..6a69a1131fb7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1252,7 +1252,7 @@ svc_process(struct svc_rqst *rqstp)
1252 } 1252 }
1253} 1253}
1254 1254
1255#if defined(CONFIG_NFS_V4_1) 1255#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1256/* 1256/*
1257 * Process a backchannel RPC request that arrived over an existing 1257 * Process a backchannel RPC request that arrived over an existing
1258 * outbound connection 1258 * outbound connection
@@ -1300,8 +1300,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1300 return 0; 1300 return 0;
1301 } 1301 }
1302} 1302}
1303EXPORT_SYMBOL(bc_svc_process); 1303EXPORT_SYMBOL_GPL(bc_svc_process);
1304#endif /* CONFIG_NFS_V4_1 */ 1304#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1305 1305
1306/* 1306/*
1307 * Return (transport-specific) limit on the rpc payload. 1307 * Return (transport-specific) limit on the rpc payload.
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f2cb5b881dea..767d494de7a2 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -68,12 +68,12 @@ static void svc_sock_free(struct svc_xprt *);
68static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 68static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
69 struct net *, struct sockaddr *, 69 struct net *, struct sockaddr *,
70 int, int); 70 int, int);
71#if defined(CONFIG_NFS_V4_1) 71#if defined(CONFIG_SUNRPC_BACKCHANNEL)
72static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, 72static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
73 struct net *, struct sockaddr *, 73 struct net *, struct sockaddr *,
74 int, int); 74 int, int);
75static void svc_bc_sock_free(struct svc_xprt *xprt); 75static void svc_bc_sock_free(struct svc_xprt *xprt);
76#endif /* CONFIG_NFS_V4_1 */ 76#endif /* CONFIG_SUNRPC_BACKCHANNEL */
77 77
78#ifdef CONFIG_DEBUG_LOCK_ALLOC 78#ifdef CONFIG_DEBUG_LOCK_ALLOC
79static struct lock_class_key svc_key[2]; 79static struct lock_class_key svc_key[2];
@@ -1243,7 +1243,7 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
1243 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); 1243 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
1244} 1244}
1245 1245
1246#if defined(CONFIG_NFS_V4_1) 1246#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1247static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, 1247static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
1248 struct net *, struct sockaddr *, 1248 struct net *, struct sockaddr *,
1249 int, int); 1249 int, int);
@@ -1284,7 +1284,7 @@ static void svc_cleanup_bc_xprt_sock(void)
1284{ 1284{
1285 svc_unreg_xprt_class(&svc_tcp_bc_class); 1285 svc_unreg_xprt_class(&svc_tcp_bc_class);
1286} 1286}
1287#else /* CONFIG_NFS_V4_1 */ 1287#else /* CONFIG_SUNRPC_BACKCHANNEL */
1288static void svc_init_bc_xprt_sock(void) 1288static void svc_init_bc_xprt_sock(void)
1289{ 1289{
1290} 1290}
@@ -1292,7 +1292,7 @@ static void svc_init_bc_xprt_sock(void)
1292static void svc_cleanup_bc_xprt_sock(void) 1292static void svc_cleanup_bc_xprt_sock(void)
1293{ 1293{
1294} 1294}
1295#endif /* CONFIG_NFS_V4_1 */ 1295#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1296 1296
1297static struct svc_xprt_ops svc_tcp_ops = { 1297static struct svc_xprt_ops svc_tcp_ops = {
1298 .xpo_create = svc_tcp_create, 1298 .xpo_create = svc_tcp_create,
@@ -1623,7 +1623,7 @@ static void svc_sock_free(struct svc_xprt *xprt)
1623 kfree(svsk); 1623 kfree(svsk);
1624} 1624}
1625 1625
1626#if defined(CONFIG_NFS_V4_1) 1626#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1627/* 1627/*
1628 * Create a back channel svc_xprt which shares the fore channel socket. 1628 * Create a back channel svc_xprt which shares the fore channel socket.
1629 */ 1629 */
@@ -1662,4 +1662,4 @@ static void svc_bc_sock_free(struct svc_xprt *xprt)
1662 if (xprt) 1662 if (xprt)
1663 kfree(container_of(xprt, struct svc_sock, sk_xprt)); 1663 kfree(container_of(xprt, struct svc_sock, sk_xprt));
1664} 1664}
1665#endif /* CONFIG_NFS_V4_1 */ 1665#endif /* CONFIG_SUNRPC_BACKCHANNEL */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index f008c14ad34c..277ebd4bf095 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -126,7 +126,7 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
126 kaddr[buf->page_base + len] = '\0'; 126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr, KM_USER0); 127 kunmap_atomic(kaddr, KM_USER0);
128} 128}
129EXPORT_SYMBOL(xdr_terminate_string); 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
130 130
131void 131void
132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, 132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ce5eb68a9664..9b6a4d1ea8f8 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -62,6 +62,7 @@
62/* 62/*
63 * Local functions 63 * Local functions
64 */ 64 */
65static void xprt_init(struct rpc_xprt *xprt, struct net *net);
65static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 66static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
66static void xprt_connect_status(struct rpc_task *task); 67static void xprt_connect_status(struct rpc_task *task);
67static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 68static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
@@ -191,10 +192,10 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
191 * transport connects from colliding with writes. No congestion control 192 * transport connects from colliding with writes. No congestion control
192 * is provided. 193 * is provided.
193 */ 194 */
194int xprt_reserve_xprt(struct rpc_task *task) 195int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
195{ 196{
196 struct rpc_rqst *req = task->tk_rqstp; 197 struct rpc_rqst *req = task->tk_rqstp;
197 struct rpc_xprt *xprt = req->rq_xprt; 198 int priority;
198 199
199 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 200 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
200 if (task == xprt->snd_task) 201 if (task == xprt->snd_task)
@@ -202,8 +203,10 @@ int xprt_reserve_xprt(struct rpc_task *task)
202 goto out_sleep; 203 goto out_sleep;
203 } 204 }
204 xprt->snd_task = task; 205 xprt->snd_task = task;
205 req->rq_bytes_sent = 0; 206 if (req != NULL) {
206 req->rq_ntrans++; 207 req->rq_bytes_sent = 0;
208 req->rq_ntrans++;
209 }
207 210
208 return 1; 211 return 1;
209 212
@@ -212,10 +215,13 @@ out_sleep:
212 task->tk_pid, xprt); 215 task->tk_pid, xprt);
213 task->tk_timeout = 0; 216 task->tk_timeout = 0;
214 task->tk_status = -EAGAIN; 217 task->tk_status = -EAGAIN;
215 if (req->rq_ntrans) 218 if (req == NULL)
216 rpc_sleep_on(&xprt->resend, task, NULL); 219 priority = RPC_PRIORITY_LOW;
220 else if (!req->rq_ntrans)
221 priority = RPC_PRIORITY_NORMAL;
217 else 222 else
218 rpc_sleep_on(&xprt->sending, task, NULL); 223 priority = RPC_PRIORITY_HIGH;
224 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
219 return 0; 225 return 0;
220} 226}
221EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 227EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
@@ -239,22 +245,24 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
239 * integrated into the decision of whether a request is allowed to be 245 * integrated into the decision of whether a request is allowed to be
240 * woken up and given access to the transport. 246 * woken up and given access to the transport.
241 */ 247 */
242int xprt_reserve_xprt_cong(struct rpc_task *task) 248int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
243{ 249{
244 struct rpc_xprt *xprt = task->tk_xprt;
245 struct rpc_rqst *req = task->tk_rqstp; 250 struct rpc_rqst *req = task->tk_rqstp;
251 int priority;
246 252
247 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 253 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
248 if (task == xprt->snd_task) 254 if (task == xprt->snd_task)
249 return 1; 255 return 1;
250 goto out_sleep; 256 goto out_sleep;
251 } 257 }
258 if (req == NULL) {
259 xprt->snd_task = task;
260 return 1;
261 }
252 if (__xprt_get_cong(xprt, task)) { 262 if (__xprt_get_cong(xprt, task)) {
253 xprt->snd_task = task; 263 xprt->snd_task = task;
254 if (req) { 264 req->rq_bytes_sent = 0;
255 req->rq_bytes_sent = 0; 265 req->rq_ntrans++;
256 req->rq_ntrans++;
257 }
258 return 1; 266 return 1;
259 } 267 }
260 xprt_clear_locked(xprt); 268 xprt_clear_locked(xprt);
@@ -262,10 +270,13 @@ out_sleep:
262 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 270 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
263 task->tk_timeout = 0; 271 task->tk_timeout = 0;
264 task->tk_status = -EAGAIN; 272 task->tk_status = -EAGAIN;
265 if (req && req->rq_ntrans) 273 if (req == NULL)
266 rpc_sleep_on(&xprt->resend, task, NULL); 274 priority = RPC_PRIORITY_LOW;
275 else if (!req->rq_ntrans)
276 priority = RPC_PRIORITY_NORMAL;
267 else 277 else
268 rpc_sleep_on(&xprt->sending, task, NULL); 278 priority = RPC_PRIORITY_HIGH;
279 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
269 return 0; 280 return 0;
270} 281}
271EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 282EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@@ -275,7 +286,7 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
275 int retval; 286 int retval;
276 287
277 spin_lock_bh(&xprt->transport_lock); 288 spin_lock_bh(&xprt->transport_lock);
278 retval = xprt->ops->reserve_xprt(task); 289 retval = xprt->ops->reserve_xprt(xprt, task);
279 spin_unlock_bh(&xprt->transport_lock); 290 spin_unlock_bh(&xprt->transport_lock);
280 return retval; 291 return retval;
281} 292}
@@ -288,12 +299,9 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
288 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 299 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
289 return; 300 return;
290 301
291 task = rpc_wake_up_next(&xprt->resend); 302 task = rpc_wake_up_next(&xprt->sending);
292 if (!task) { 303 if (task == NULL)
293 task = rpc_wake_up_next(&xprt->sending); 304 goto out_unlock;
294 if (!task)
295 goto out_unlock;
296 }
297 305
298 req = task->tk_rqstp; 306 req = task->tk_rqstp;
299 xprt->snd_task = task; 307 xprt->snd_task = task;
@@ -310,24 +318,25 @@ out_unlock:
310static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 318static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
311{ 319{
312 struct rpc_task *task; 320 struct rpc_task *task;
321 struct rpc_rqst *req;
313 322
314 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 323 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
315 return; 324 return;
316 if (RPCXPRT_CONGESTED(xprt)) 325 if (RPCXPRT_CONGESTED(xprt))
317 goto out_unlock; 326 goto out_unlock;
318 task = rpc_wake_up_next(&xprt->resend); 327 task = rpc_wake_up_next(&xprt->sending);
319 if (!task) { 328 if (task == NULL)
320 task = rpc_wake_up_next(&xprt->sending); 329 goto out_unlock;
321 if (!task) 330
322 goto out_unlock; 331 req = task->tk_rqstp;
332 if (req == NULL) {
333 xprt->snd_task = task;
334 return;
323 } 335 }
324 if (__xprt_get_cong(xprt, task)) { 336 if (__xprt_get_cong(xprt, task)) {
325 struct rpc_rqst *req = task->tk_rqstp;
326 xprt->snd_task = task; 337 xprt->snd_task = task;
327 if (req) { 338 req->rq_bytes_sent = 0;
328 req->rq_bytes_sent = 0; 339 req->rq_ntrans++;
329 req->rq_ntrans++;
330 }
331 return; 340 return;
332 } 341 }
333out_unlock: 342out_unlock:
@@ -852,7 +861,7 @@ int xprt_prepare_transmit(struct rpc_task *task)
852 err = req->rq_reply_bytes_recvd; 861 err = req->rq_reply_bytes_recvd;
853 goto out_unlock; 862 goto out_unlock;
854 } 863 }
855 if (!xprt->ops->reserve_xprt(task)) 864 if (!xprt->ops->reserve_xprt(xprt, task))
856 err = -EAGAIN; 865 err = -EAGAIN;
857out_unlock: 866out_unlock:
858 spin_unlock_bh(&xprt->transport_lock); 867 spin_unlock_bh(&xprt->transport_lock);
@@ -928,28 +937,66 @@ void xprt_transmit(struct rpc_task *task)
928 spin_unlock_bh(&xprt->transport_lock); 937 spin_unlock_bh(&xprt->transport_lock);
929} 938}
930 939
940static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
941{
942 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
943
944 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
945 goto out;
946 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
947 if (req != NULL)
948 goto out;
949 atomic_dec(&xprt->num_reqs);
950 req = ERR_PTR(-ENOMEM);
951out:
952 return req;
953}
954
955static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
956{
957 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
958 kfree(req);
959 return true;
960 }
961 return false;
962}
963
931static void xprt_alloc_slot(struct rpc_task *task) 964static void xprt_alloc_slot(struct rpc_task *task)
932{ 965{
933 struct rpc_xprt *xprt = task->tk_xprt; 966 struct rpc_xprt *xprt = task->tk_xprt;
967 struct rpc_rqst *req;
934 968
935 task->tk_status = 0;
936 if (task->tk_rqstp)
937 return;
938 if (!list_empty(&xprt->free)) { 969 if (!list_empty(&xprt->free)) {
939 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 970 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
940 list_del_init(&req->rq_list); 971 list_del(&req->rq_list);
941 task->tk_rqstp = req; 972 goto out_init_req;
942 xprt_request_init(task, xprt); 973 }
943 return; 974 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
975 if (!IS_ERR(req))
976 goto out_init_req;
977 switch (PTR_ERR(req)) {
978 case -ENOMEM:
979 rpc_delay(task, HZ >> 2);
980 dprintk("RPC: dynamic allocation of request slot "
981 "failed! Retrying\n");
982 break;
983 case -EAGAIN:
984 rpc_sleep_on(&xprt->backlog, task, NULL);
985 dprintk("RPC: waiting for request slot\n");
944 } 986 }
945 dprintk("RPC: waiting for request slot\n");
946 task->tk_status = -EAGAIN; 987 task->tk_status = -EAGAIN;
947 task->tk_timeout = 0; 988 return;
948 rpc_sleep_on(&xprt->backlog, task, NULL); 989out_init_req:
990 task->tk_status = 0;
991 task->tk_rqstp = req;
992 xprt_request_init(task, xprt);
949} 993}
950 994
951static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 995static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
952{ 996{
997 if (xprt_dynamic_free_slot(xprt, req))
998 return;
999
953 memset(req, 0, sizeof(*req)); /* mark unused */ 1000 memset(req, 0, sizeof(*req)); /* mark unused */
954 1001
955 spin_lock(&xprt->reserve_lock); 1002 spin_lock(&xprt->reserve_lock);
@@ -958,25 +1005,49 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
958 spin_unlock(&xprt->reserve_lock); 1005 spin_unlock(&xprt->reserve_lock);
959} 1006}
960 1007
961struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) 1008static void xprt_free_all_slots(struct rpc_xprt *xprt)
1009{
1010 struct rpc_rqst *req;
1011 while (!list_empty(&xprt->free)) {
1012 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1013 list_del(&req->rq_list);
1014 kfree(req);
1015 }
1016}
1017
1018struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1019 unsigned int num_prealloc,
1020 unsigned int max_alloc)
962{ 1021{
963 struct rpc_xprt *xprt; 1022 struct rpc_xprt *xprt;
1023 struct rpc_rqst *req;
1024 int i;
964 1025
965 xprt = kzalloc(size, GFP_KERNEL); 1026 xprt = kzalloc(size, GFP_KERNEL);
966 if (xprt == NULL) 1027 if (xprt == NULL)
967 goto out; 1028 goto out;
968 atomic_set(&xprt->count, 1);
969 1029
970 xprt->max_reqs = max_req; 1030 xprt_init(xprt, net);
971 xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); 1031
972 if (xprt->slot == NULL) 1032 for (i = 0; i < num_prealloc; i++) {
1033 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1034 if (!req)
1035 break;
1036 list_add(&req->rq_list, &xprt->free);
1037 }
1038 if (i < num_prealloc)
973 goto out_free; 1039 goto out_free;
1040 if (max_alloc > num_prealloc)
1041 xprt->max_reqs = max_alloc;
1042 else
1043 xprt->max_reqs = num_prealloc;
1044 xprt->min_reqs = num_prealloc;
1045 atomic_set(&xprt->num_reqs, num_prealloc);
974 1046
975 xprt->xprt_net = get_net(net);
976 return xprt; 1047 return xprt;
977 1048
978out_free: 1049out_free:
979 kfree(xprt); 1050 xprt_free(xprt);
980out: 1051out:
981 return NULL; 1052 return NULL;
982} 1053}
@@ -985,7 +1056,7 @@ EXPORT_SYMBOL_GPL(xprt_alloc);
985void xprt_free(struct rpc_xprt *xprt) 1056void xprt_free(struct rpc_xprt *xprt)
986{ 1057{
987 put_net(xprt->xprt_net); 1058 put_net(xprt->xprt_net);
988 kfree(xprt->slot); 1059 xprt_free_all_slots(xprt);
989 kfree(xprt); 1060 kfree(xprt);
990} 1061}
991EXPORT_SYMBOL_GPL(xprt_free); 1062EXPORT_SYMBOL_GPL(xprt_free);
@@ -1001,10 +1072,24 @@ void xprt_reserve(struct rpc_task *task)
1001{ 1072{
1002 struct rpc_xprt *xprt = task->tk_xprt; 1073 struct rpc_xprt *xprt = task->tk_xprt;
1003 1074
1004 task->tk_status = -EIO; 1075 task->tk_status = 0;
1076 if (task->tk_rqstp != NULL)
1077 return;
1078
1079 /* Note: grabbing the xprt_lock_write() here is not strictly needed,
1080 * but ensures that we throttle new slot allocation if the transport
1081 * is congested (e.g. if reconnecting or if we're out of socket
1082 * write buffer space).
1083 */
1084 task->tk_timeout = 0;
1085 task->tk_status = -EAGAIN;
1086 if (!xprt_lock_write(xprt, task))
1087 return;
1088
1005 spin_lock(&xprt->reserve_lock); 1089 spin_lock(&xprt->reserve_lock);
1006 xprt_alloc_slot(task); 1090 xprt_alloc_slot(task);
1007 spin_unlock(&xprt->reserve_lock); 1091 spin_unlock(&xprt->reserve_lock);
1092 xprt_release_write(xprt, task);
1008} 1093}
1009 1094
1010static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 1095static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
@@ -1021,6 +1106,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1021{ 1106{
1022 struct rpc_rqst *req = task->tk_rqstp; 1107 struct rpc_rqst *req = task->tk_rqstp;
1023 1108
1109 INIT_LIST_HEAD(&req->rq_list);
1024 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 1110 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1025 req->rq_task = task; 1111 req->rq_task = task;
1026 req->rq_xprt = xprt; 1112 req->rq_xprt = xprt;
@@ -1073,6 +1159,34 @@ void xprt_release(struct rpc_task *task)
1073 xprt_free_bc_request(req); 1159 xprt_free_bc_request(req);
1074} 1160}
1075 1161
1162static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1163{
1164 atomic_set(&xprt->count, 1);
1165
1166 spin_lock_init(&xprt->transport_lock);
1167 spin_lock_init(&xprt->reserve_lock);
1168
1169 INIT_LIST_HEAD(&xprt->free);
1170 INIT_LIST_HEAD(&xprt->recv);
1171#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1172 spin_lock_init(&xprt->bc_pa_lock);
1173 INIT_LIST_HEAD(&xprt->bc_pa_list);
1174#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1175
1176 xprt->last_used = jiffies;
1177 xprt->cwnd = RPC_INITCWND;
1178 xprt->bind_index = 0;
1179
1180 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1181 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1182 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1183 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1184
1185 xprt_init_xid(xprt);
1186
1187 xprt->xprt_net = get_net(net);
1188}
1189
1076/** 1190/**
1077 * xprt_create_transport - create an RPC transport 1191 * xprt_create_transport - create an RPC transport
1078 * @args: rpc transport creation arguments 1192 * @args: rpc transport creation arguments
@@ -1081,7 +1195,6 @@ void xprt_release(struct rpc_task *task)
1081struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1195struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1082{ 1196{
1083 struct rpc_xprt *xprt; 1197 struct rpc_xprt *xprt;
1084 struct rpc_rqst *req;
1085 struct xprt_class *t; 1198 struct xprt_class *t;
1086 1199
1087 spin_lock(&xprt_list_lock); 1200 spin_lock(&xprt_list_lock);
@@ -1100,46 +1213,17 @@ found:
1100 if (IS_ERR(xprt)) { 1213 if (IS_ERR(xprt)) {
1101 dprintk("RPC: xprt_create_transport: failed, %ld\n", 1214 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1102 -PTR_ERR(xprt)); 1215 -PTR_ERR(xprt));
1103 return xprt; 1216 goto out;
1104 } 1217 }
1105 if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
1106 /* ->setup returned a pre-initialized xprt: */
1107 return xprt;
1108
1109 spin_lock_init(&xprt->transport_lock);
1110 spin_lock_init(&xprt->reserve_lock);
1111
1112 INIT_LIST_HEAD(&xprt->free);
1113 INIT_LIST_HEAD(&xprt->recv);
1114#if defined(CONFIG_NFS_V4_1)
1115 spin_lock_init(&xprt->bc_pa_lock);
1116 INIT_LIST_HEAD(&xprt->bc_pa_list);
1117#endif /* CONFIG_NFS_V4_1 */
1118
1119 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1218 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1120 if (xprt_has_timer(xprt)) 1219 if (xprt_has_timer(xprt))
1121 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1220 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1122 (unsigned long)xprt); 1221 (unsigned long)xprt);
1123 else 1222 else
1124 init_timer(&xprt->timer); 1223 init_timer(&xprt->timer);
1125 xprt->last_used = jiffies;
1126 xprt->cwnd = RPC_INITCWND;
1127 xprt->bind_index = 0;
1128
1129 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1130 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1131 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1132 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1133 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1134
1135 /* initialize free list */
1136 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
1137 list_add(&req->rq_list, &xprt->free);
1138
1139 xprt_init_xid(xprt);
1140
1141 dprintk("RPC: created transport %p with %u slots\n", xprt, 1224 dprintk("RPC: created transport %p with %u slots\n", xprt,
1142 xprt->max_reqs); 1225 xprt->max_reqs);
1226out:
1143 return xprt; 1227 return xprt;
1144} 1228}
1145 1229
@@ -1157,7 +1241,6 @@ static void xprt_destroy(struct rpc_xprt *xprt)
1157 rpc_destroy_wait_queue(&xprt->binding); 1241 rpc_destroy_wait_queue(&xprt->binding);
1158 rpc_destroy_wait_queue(&xprt->pending); 1242 rpc_destroy_wait_queue(&xprt->pending);
1159 rpc_destroy_wait_queue(&xprt->sending); 1243 rpc_destroy_wait_queue(&xprt->sending);
1160 rpc_destroy_wait_queue(&xprt->resend);
1161 rpc_destroy_wait_queue(&xprt->backlog); 1244 rpc_destroy_wait_queue(&xprt->backlog);
1162 cancel_work_sync(&xprt->task_cleanup); 1245 cancel_work_sync(&xprt->task_cleanup);
1163 /* 1246 /*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 0867070bb5ca..b446e100286f 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -283,6 +283,7 @@ xprt_setup_rdma(struct xprt_create *args)
283 } 283 }
284 284
285 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 285 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
286 xprt_rdma_slot_table_entries,
286 xprt_rdma_slot_table_entries); 287 xprt_rdma_slot_table_entries);
287 if (xprt == NULL) { 288 if (xprt == NULL) {
288 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", 289 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
@@ -452,9 +453,8 @@ xprt_rdma_connect(struct rpc_task *task)
452} 453}
453 454
454static int 455static int
455xprt_rdma_reserve_xprt(struct rpc_task *task) 456xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
456{ 457{
457 struct rpc_xprt *xprt = task->tk_xprt;
458 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 458 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
459 int credits = atomic_read(&r_xprt->rx_buf.rb_credits); 459 int credits = atomic_read(&r_xprt->rx_buf.rb_credits);
460 460
@@ -466,7 +466,7 @@ xprt_rdma_reserve_xprt(struct rpc_task *task)
466 BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); 466 BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0);
467 } 467 }
468 xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; 468 xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale;
469 return xprt_reserve_xprt_cong(task); 469 return xprt_reserve_xprt_cong(xprt, task);
470} 470}
471 471
472/* 472/*
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index ddf05288d9f1..08c5d5a128fc 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -109,7 +109,7 @@ struct rpcrdma_ep {
109 */ 109 */
110 110
111/* temporary static scatter/gather max */ 111/* temporary static scatter/gather max */
112#define RPCRDMA_MAX_DATA_SEGS (8) /* max scatter/gather */ 112#define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */
113#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ 113#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
114#define MAX_RPCRDMAHDR (\ 114#define MAX_RPCRDMAHDR (\
115 /* max supported RPC/RDMA header */ \ 115 /* max supported RPC/RDMA header */ \
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 72abb7358933..d7f97ef26590 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -37,7 +37,7 @@
37#include <linux/sunrpc/svcsock.h> 37#include <linux/sunrpc/svcsock.h>
38#include <linux/sunrpc/xprtsock.h> 38#include <linux/sunrpc/xprtsock.h>
39#include <linux/file.h> 39#include <linux/file.h>
40#ifdef CONFIG_NFS_V4_1 40#ifdef CONFIG_SUNRPC_BACKCHANNEL
41#include <linux/sunrpc/bc_xprt.h> 41#include <linux/sunrpc/bc_xprt.h>
42#endif 42#endif
43 43
@@ -54,7 +54,8 @@ static void xs_close(struct rpc_xprt *xprt);
54 * xprtsock tunables 54 * xprtsock tunables
55 */ 55 */
56unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 56unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
57unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; 57unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
58unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
58 59
59unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 60unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
60unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 61unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
@@ -75,6 +76,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
75 76
76static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 77static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
77static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 78static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
79static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
78static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 80static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
79static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 81static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
80 82
@@ -104,6 +106,15 @@ static ctl_table xs_tunables_table[] = {
104 .extra2 = &max_slot_table_size 106 .extra2 = &max_slot_table_size
105 }, 107 },
106 { 108 {
109 .procname = "tcp_max_slot_table_entries",
110 .data = &xprt_max_tcp_slot_table_entries,
111 .maxlen = sizeof(unsigned int),
112 .mode = 0644,
113 .proc_handler = proc_dointvec_minmax,
114 .extra1 = &min_slot_table_size,
115 .extra2 = &max_tcp_slot_table_limit
116 },
117 {
107 .procname = "min_resvport", 118 .procname = "min_resvport",
108 .data = &xprt_min_resvport, 119 .data = &xprt_min_resvport,
109 .maxlen = sizeof(unsigned int), 120 .maxlen = sizeof(unsigned int),
@@ -755,6 +766,8 @@ static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
755 if (task == NULL) 766 if (task == NULL)
756 goto out_release; 767 goto out_release;
757 req = task->tk_rqstp; 768 req = task->tk_rqstp;
769 if (req == NULL)
770 goto out_release;
758 if (req->rq_bytes_sent == 0) 771 if (req->rq_bytes_sent == 0)
759 goto out_release; 772 goto out_release;
760 if (req->rq_bytes_sent == req->rq_snd_buf.len) 773 if (req->rq_bytes_sent == req->rq_snd_buf.len)
@@ -1236,7 +1249,7 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1236 return 0; 1249 return 0;
1237} 1250}
1238 1251
1239#if defined(CONFIG_NFS_V4_1) 1252#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1240/* 1253/*
1241 * Obtains an rpc_rqst previously allocated and invokes the common 1254 * Obtains an rpc_rqst previously allocated and invokes the common
1242 * tcp read code to read the data. The result is placed in the callback 1255 * tcp read code to read the data. The result is placed in the callback
@@ -1299,7 +1312,7 @@ static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1299{ 1312{
1300 return xs_tcp_read_reply(xprt, desc); 1313 return xs_tcp_read_reply(xprt, desc);
1301} 1314}
1302#endif /* CONFIG_NFS_V4_1 */ 1315#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1303 1316
1304/* 1317/*
1305 * Read data off the transport. This can be either an RPC_CALL or an 1318 * Read data off the transport. This can be either an RPC_CALL or an
@@ -2489,7 +2502,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2489} 2502}
2490 2503
2491static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2504static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2492 unsigned int slot_table_size) 2505 unsigned int slot_table_size,
2506 unsigned int max_slot_table_size)
2493{ 2507{
2494 struct rpc_xprt *xprt; 2508 struct rpc_xprt *xprt;
2495 struct sock_xprt *new; 2509 struct sock_xprt *new;
@@ -2499,7 +2513,8 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2499 return ERR_PTR(-EBADF); 2513 return ERR_PTR(-EBADF);
2500 } 2514 }
2501 2515
2502 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size); 2516 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2517 max_slot_table_size);
2503 if (xprt == NULL) { 2518 if (xprt == NULL) {
2504 dprintk("RPC: xs_setup_xprt: couldn't allocate " 2519 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2505 "rpc_xprt\n"); 2520 "rpc_xprt\n");
@@ -2541,7 +2556,8 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2541 struct rpc_xprt *xprt; 2556 struct rpc_xprt *xprt;
2542 struct rpc_xprt *ret; 2557 struct rpc_xprt *ret;
2543 2558
2544 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2559 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2560 xprt_max_tcp_slot_table_entries);
2545 if (IS_ERR(xprt)) 2561 if (IS_ERR(xprt))
2546 return xprt; 2562 return xprt;
2547 transport = container_of(xprt, struct sock_xprt, xprt); 2563 transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2605,7 +2621,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2605 struct sock_xprt *transport; 2621 struct sock_xprt *transport;
2606 struct rpc_xprt *ret; 2622 struct rpc_xprt *ret;
2607 2623
2608 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 2624 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2625 xprt_udp_slot_table_entries);
2609 if (IS_ERR(xprt)) 2626 if (IS_ERR(xprt))
2610 return xprt; 2627 return xprt;
2611 transport = container_of(xprt, struct sock_xprt, xprt); 2628 transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2681,7 +2698,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2681 struct sock_xprt *transport; 2698 struct sock_xprt *transport;
2682 struct rpc_xprt *ret; 2699 struct rpc_xprt *ret;
2683 2700
2684 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2701 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2702 xprt_max_tcp_slot_table_entries);
2685 if (IS_ERR(xprt)) 2703 if (IS_ERR(xprt))
2686 return xprt; 2704 return xprt;
2687 transport = container_of(xprt, struct sock_xprt, xprt); 2705 transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2760,7 +2778,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2760 */ 2778 */
2761 return args->bc_xprt->xpt_bc_xprt; 2779 return args->bc_xprt->xpt_bc_xprt;
2762 } 2780 }
2763 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2781 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2782 xprt_tcp_slot_table_entries);
2764 if (IS_ERR(xprt)) 2783 if (IS_ERR(xprt))
2765 return xprt; 2784 return xprt;
2766 transport = container_of(xprt, struct sock_xprt, xprt); 2785 transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2947,8 +2966,26 @@ static struct kernel_param_ops param_ops_slot_table_size = {
2947#define param_check_slot_table_size(name, p) \ 2966#define param_check_slot_table_size(name, p) \
2948 __param_check(name, p, unsigned int); 2967 __param_check(name, p, unsigned int);
2949 2968
2969static int param_set_max_slot_table_size(const char *val,
2970 const struct kernel_param *kp)
2971{
2972 return param_set_uint_minmax(val, kp,
2973 RPC_MIN_SLOT_TABLE,
2974 RPC_MAX_SLOT_TABLE_LIMIT);
2975}
2976
2977static struct kernel_param_ops param_ops_max_slot_table_size = {
2978 .set = param_set_max_slot_table_size,
2979 .get = param_get_uint,
2980};
2981
2982#define param_check_max_slot_table_size(name, p) \
2983 __param_check(name, p, unsigned int);
2984
2950module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 2985module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
2951 slot_table_size, 0644); 2986 slot_table_size, 0644);
2987module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
2988 max_slot_table_size, 0644);
2952module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 2989module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
2953 slot_table_size, 0644); 2990 slot_table_size, 0644);
2954 2991
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 39d66dc2b8e9..26b46ff74663 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -86,7 +86,7 @@ static void ima_check_last_writer(struct ima_iint_cache *iint,
86 struct inode *inode, 86 struct inode *inode,
87 struct file *file) 87 struct file *file)
88{ 88{
89 mode_t mode = file->f_mode; 89 fmode_t mode = file->f_mode;
90 90
91 mutex_lock(&iint->mutex); 91 mutex_lock(&iint->mutex);
92 if (mode & FMODE_WRITE && 92 if (mode & FMODE_WRITE &&
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index f1341308beda..86d0caf91b35 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -128,7 +128,8 @@ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_ufram
128 } 128 }
129} 129}
130 130
131static void pcm_debug_name(struct snd_pcm_substream *substream, 131#ifdef CONFIG_SND_DEBUG
132void snd_pcm_debug_name(struct snd_pcm_substream *substream,
132 char *name, size_t len) 133 char *name, size_t len)
133{ 134{
134 snprintf(name, len, "pcmC%dD%d%c:%d", 135 snprintf(name, len, "pcmC%dD%d%c:%d",
@@ -137,6 +138,8 @@ static void pcm_debug_name(struct snd_pcm_substream *substream,
137 substream->stream ? 'c' : 'p', 138 substream->stream ? 'c' : 'p',
138 substream->number); 139 substream->number);
139} 140}
141EXPORT_SYMBOL(snd_pcm_debug_name);
142#endif
140 143
141#define XRUN_DEBUG_BASIC (1<<0) 144#define XRUN_DEBUG_BASIC (1<<0)
142#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ 145#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
@@ -168,7 +171,7 @@ static void xrun(struct snd_pcm_substream *substream)
168 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 171 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
169 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { 172 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
170 char name[16]; 173 char name[16];
171 pcm_debug_name(substream, name, sizeof(name)); 174 snd_pcm_debug_name(substream, name, sizeof(name));
172 snd_printd(KERN_DEBUG "XRUN: %s\n", name); 175 snd_printd(KERN_DEBUG "XRUN: %s\n", name);
173 dump_stack_on_xrun(substream); 176 dump_stack_on_xrun(substream);
174 } 177 }
@@ -243,7 +246,7 @@ static void xrun_log_show(struct snd_pcm_substream *substream)
243 return; 246 return;
244 if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit) 247 if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit)
245 return; 248 return;
246 pcm_debug_name(substream, name, sizeof(name)); 249 snd_pcm_debug_name(substream, name, sizeof(name));
247 for (cnt = 0, idx = log->idx; cnt < XRUN_LOG_CNT; cnt++) { 250 for (cnt = 0, idx = log->idx; cnt < XRUN_LOG_CNT; cnt++) {
248 entry = &log->entries[idx]; 251 entry = &log->entries[idx];
249 if (entry->period_size == 0) 252 if (entry->period_size == 0)
@@ -319,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
319 if (pos >= runtime->buffer_size) { 322 if (pos >= runtime->buffer_size) {
320 if (printk_ratelimit()) { 323 if (printk_ratelimit()) {
321 char name[16]; 324 char name[16];
322 pcm_debug_name(substream, name, sizeof(name)); 325 snd_pcm_debug_name(substream, name, sizeof(name));
323 xrun_log_show(substream); 326 xrun_log_show(substream);
324 snd_printd(KERN_ERR "BUG: %s, pos = %ld, " 327 snd_printd(KERN_ERR "BUG: %s, pos = %ld, "
325 "buffer size = %ld, period size = %ld\n", 328 "buffer size = %ld, period size = %ld\n",
@@ -364,7 +367,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
364 if (xrun_debug(substream, in_interrupt ? 367 if (xrun_debug(substream, in_interrupt ?
365 XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) { 368 XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
366 char name[16]; 369 char name[16];
367 pcm_debug_name(substream, name, sizeof(name)); 370 snd_pcm_debug_name(substream, name, sizeof(name));
368 snd_printd("%s_update: %s: pos=%u/%u/%u, " 371 snd_printd("%s_update: %s: pos=%u/%u/%u, "
369 "hwptr=%ld/%ld/%ld/%ld\n", 372 "hwptr=%ld/%ld/%ld/%ld\n",
370 in_interrupt ? "period" : "hwptr", 373 in_interrupt ? "period" : "hwptr",
diff --git a/sound/isa/msnd/msnd.h b/sound/isa/msnd/msnd.h
index 3773e242b58e..a168ba3313ac 100644
--- a/sound/isa/msnd/msnd.h
+++ b/sound/isa/msnd/msnd.h
@@ -249,7 +249,7 @@ struct snd_msnd {
249 249
250 /* State variables */ 250 /* State variables */
251 enum { msndClassic, msndPinnacle } type; 251 enum { msndClassic, msndPinnacle } type;
252 mode_t mode; 252 fmode_t mode;
253 unsigned long flags; 253 unsigned long flags;
254#define F_RESETTING 0 254#define F_RESETTING 0
255#define F_HAVEDIGITAL 1 255#define F_HAVEDIGITAL 1
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index b941d2541dda..eae62ebbd295 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -41,31 +41,10 @@
41#include <sound/tlv.h> 41#include <sound/tlv.h>
42#include <sound/hwdep.h> 42#include <sound/hwdep.h>
43 43
44
45MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
46MODULE_AUTHOR("AudioScience inc. <support@audioscience.com>"); 45MODULE_AUTHOR("AudioScience inc. <support@audioscience.com>");
47MODULE_DESCRIPTION("AudioScience ALSA ASI5000 ASI6000 ASI87xx ASI89xx"); 46MODULE_DESCRIPTION("AudioScience ALSA ASI5000 ASI6000 ASI87xx ASI89xx");
48 47
49#if defined CONFIG_SND_DEBUG
50/* copied from pcm_lib.c, hope later patch will make that version public
51and this copy can be removed */
52static inline void
53snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
54{
55 snprintf(buf, size, "pcmC%dD%d%c:%d",
56 substream->pcm->card->number,
57 substream->pcm->device,
58 substream->stream ? 'c' : 'p',
59 substream->number);
60}
61#else
62static inline void
63snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
64{
65 *buf = 0;
66}
67#endif
68
69#if defined CONFIG_SND_DEBUG_VERBOSE 48#if defined CONFIG_SND_DEBUG_VERBOSE
70/** 49/**
71 * snd_printddd - very verbose debug printk 50 * snd_printddd - very verbose debug printk
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7489b4608551..bb7e102d6726 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -243,6 +243,7 @@ config SND_HDA_GENERIC
243 243
244config SND_HDA_POWER_SAVE 244config SND_HDA_POWER_SAVE
245 bool "Aggressive power-saving on HD-audio" 245 bool "Aggressive power-saving on HD-audio"
246 depends on PM
246 help 247 help
247 Say Y here to enable more aggressive power-saving mode on 248 Say Y here to enable more aggressive power-saving mode on
248 HD-audio driver. The power-saving timeout can be configured 249 HD-audio driver. The power-saving timeout can be configured
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9c27a3a4c4d5..3e7850c238c3 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -91,8 +91,10 @@ EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset);
91#ifdef CONFIG_SND_HDA_POWER_SAVE 91#ifdef CONFIG_SND_HDA_POWER_SAVE
92static void hda_power_work(struct work_struct *work); 92static void hda_power_work(struct work_struct *work);
93static void hda_keep_power_on(struct hda_codec *codec); 93static void hda_keep_power_on(struct hda_codec *codec);
94#define hda_codec_is_power_on(codec) ((codec)->power_on)
94#else 95#else
95static inline void hda_keep_power_on(struct hda_codec *codec) {} 96static inline void hda_keep_power_on(struct hda_codec *codec) {}
97#define hda_codec_is_power_on(codec) 1
96#endif 98#endif
97 99
98/** 100/**
@@ -1101,7 +1103,7 @@ void snd_hda_shutup_pins(struct hda_codec *codec)
1101} 1103}
1102EXPORT_SYMBOL_HDA(snd_hda_shutup_pins); 1104EXPORT_SYMBOL_HDA(snd_hda_shutup_pins);
1103 1105
1104#ifdef SND_HDA_NEEDS_RESUME 1106#ifdef CONFIG_PM
1105/* Restore the pin controls cleared previously via snd_hda_shutup_pins() */ 1107/* Restore the pin controls cleared previously via snd_hda_shutup_pins() */
1106static void restore_shutup_pins(struct hda_codec *codec) 1108static void restore_shutup_pins(struct hda_codec *codec)
1107{ 1109{
@@ -1499,7 +1501,7 @@ static void purify_inactive_streams(struct hda_codec *codec)
1499 } 1501 }
1500} 1502}
1501 1503
1502#ifdef SND_HDA_NEEDS_RESUME 1504#ifdef CONFIG_PM
1503/* clean up all streams; called from suspend */ 1505/* clean up all streams; called from suspend */
1504static void hda_cleanup_all_streams(struct hda_codec *codec) 1506static void hda_cleanup_all_streams(struct hda_codec *codec)
1505{ 1507{
@@ -1838,7 +1840,7 @@ int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
1838} 1840}
1839EXPORT_SYMBOL_HDA(snd_hda_codec_amp_stereo); 1841EXPORT_SYMBOL_HDA(snd_hda_codec_amp_stereo);
1840 1842
1841#ifdef SND_HDA_NEEDS_RESUME 1843#ifdef CONFIG_PM
1842/** 1844/**
1843 * snd_hda_codec_resume_amp - Resume all AMP commands from the cache 1845 * snd_hda_codec_resume_amp - Resume all AMP commands from the cache
1844 * @codec: HD-audio codec 1846 * @codec: HD-audio codec
@@ -1868,7 +1870,7 @@ void snd_hda_codec_resume_amp(struct hda_codec *codec)
1868 } 1870 }
1869} 1871}
1870EXPORT_SYMBOL_HDA(snd_hda_codec_resume_amp); 1872EXPORT_SYMBOL_HDA(snd_hda_codec_resume_amp);
1871#endif /* SND_HDA_NEEDS_RESUME */ 1873#endif /* CONFIG_PM */
1872 1874
1873static u32 get_amp_max_value(struct hda_codec *codec, hda_nid_t nid, int dir, 1875static u32 get_amp_max_value(struct hda_codec *codec, hda_nid_t nid, int dir,
1874 unsigned int ofs) 1876 unsigned int ofs)
@@ -3082,7 +3084,7 @@ int snd_hda_create_spdif_in_ctls(struct hda_codec *codec, hda_nid_t nid)
3082} 3084}
3083EXPORT_SYMBOL_HDA(snd_hda_create_spdif_in_ctls); 3085EXPORT_SYMBOL_HDA(snd_hda_create_spdif_in_ctls);
3084 3086
3085#ifdef SND_HDA_NEEDS_RESUME 3087#ifdef CONFIG_PM
3086/* 3088/*
3087 * command cache 3089 * command cache
3088 */ 3090 */
@@ -3199,53 +3201,32 @@ void snd_hda_sequence_write_cache(struct hda_codec *codec,
3199 seq->param); 3201 seq->param);
3200} 3202}
3201EXPORT_SYMBOL_HDA(snd_hda_sequence_write_cache); 3203EXPORT_SYMBOL_HDA(snd_hda_sequence_write_cache);
3202#endif /* SND_HDA_NEEDS_RESUME */ 3204#endif /* CONFIG_PM */
3203 3205
3204/* 3206void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
3205 * set power state of the codec 3207 unsigned int power_state,
3206 */ 3208 bool eapd_workaround)
3207static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
3208 unsigned int power_state)
3209{ 3209{
3210 hda_nid_t nid; 3210 hda_nid_t nid = codec->start_nid;
3211 int i; 3211 int i;
3212 3212
3213 /* this delay seems necessary to avoid click noise at power-down */
3214 if (power_state == AC_PWRST_D3)
3215 msleep(100);
3216 snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
3217 power_state);
3218 /* partial workaround for "azx_get_response timeout" */
3219 if (power_state == AC_PWRST_D0 &&
3220 (codec->vendor_id & 0xffff0000) == 0x14f10000)
3221 msleep(10);
3222
3223 nid = codec->start_nid;
3224 for (i = 0; i < codec->num_nodes; i++, nid++) { 3213 for (i = 0; i < codec->num_nodes; i++, nid++) {
3225 unsigned int wcaps = get_wcaps(codec, nid); 3214 unsigned int wcaps = get_wcaps(codec, nid);
3226 if (wcaps & AC_WCAP_POWER) { 3215 if (!(wcaps & AC_WCAP_POWER))
3227 unsigned int wid_type = get_wcaps_type(wcaps); 3216 continue;
3228 if (power_state == AC_PWRST_D3 && 3217 /* don't power down the widget if it controls eapd and
3229 wid_type == AC_WID_PIN) { 3218 * EAPD_BTLENABLE is set.
3230 unsigned int pincap; 3219 */
3231 /* 3220 if (eapd_workaround && power_state == AC_PWRST_D3 &&
3232 * don't power down the widget if it controls 3221 get_wcaps_type(wcaps) == AC_WID_PIN &&
3233 * eapd and EAPD_BTLENABLE is set. 3222 (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_EAPD)) {
3234 */ 3223 int eapd = snd_hda_codec_read(codec, nid, 0,
3235 pincap = snd_hda_query_pin_caps(codec, nid);
3236 if (pincap & AC_PINCAP_EAPD) {
3237 int eapd = snd_hda_codec_read(codec,
3238 nid, 0,
3239 AC_VERB_GET_EAPD_BTLENABLE, 0); 3224 AC_VERB_GET_EAPD_BTLENABLE, 0);
3240 eapd &= 0x02; 3225 if (eapd & 0x02)
3241 if (eapd) 3226 continue;
3242 continue;
3243 }
3244 }
3245 snd_hda_codec_write(codec, nid, 0,
3246 AC_VERB_SET_POWER_STATE,
3247 power_state);
3248 } 3227 }
3228 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE,
3229 power_state);
3249 } 3230 }
3250 3231
3251 if (power_state == AC_PWRST_D0) { 3232 if (power_state == AC_PWRST_D0) {
@@ -3262,6 +3243,26 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
3262 } while (time_after_eq(end_time, jiffies)); 3243 } while (time_after_eq(end_time, jiffies));
3263 } 3244 }
3264} 3245}
3246EXPORT_SYMBOL_HDA(snd_hda_codec_set_power_to_all);
3247
3248/*
3249 * set power state of the codec
3250 */
3251static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
3252 unsigned int power_state)
3253{
3254 if (codec->patch_ops.set_power_state) {
3255 codec->patch_ops.set_power_state(codec, fg, power_state);
3256 return;
3257 }
3258
3259 /* this delay seems necessary to avoid click noise at power-down */
3260 if (power_state == AC_PWRST_D3)
3261 msleep(100);
3262 snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
3263 power_state);
3264 snd_hda_codec_set_power_to_all(codec, fg, power_state, true);
3265}
3265 3266
3266#ifdef CONFIG_SND_HDA_HWDEP 3267#ifdef CONFIG_SND_HDA_HWDEP
3267/* execute additional init verbs */ 3268/* execute additional init verbs */
@@ -3274,7 +3275,7 @@ static void hda_exec_init_verbs(struct hda_codec *codec)
3274static inline void hda_exec_init_verbs(struct hda_codec *codec) {} 3275static inline void hda_exec_init_verbs(struct hda_codec *codec) {}
3275#endif 3276#endif
3276 3277
3277#ifdef SND_HDA_NEEDS_RESUME 3278#ifdef CONFIG_PM
3278/* 3279/*
3279 * call suspend and power-down; used both from PM and power-save 3280 * call suspend and power-down; used both from PM and power-save
3280 */ 3281 */
@@ -3315,7 +3316,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
3315 snd_hda_codec_resume_cache(codec); 3316 snd_hda_codec_resume_cache(codec);
3316 } 3317 }
3317} 3318}
3318#endif /* SND_HDA_NEEDS_RESUME */ 3319#endif /* CONFIG_PM */
3319 3320
3320 3321
3321/** 3322/**
@@ -4071,9 +4072,6 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
4071EXPORT_SYMBOL_HDA(snd_hda_add_new_ctls); 4072EXPORT_SYMBOL_HDA(snd_hda_add_new_ctls);
4072 4073
4073#ifdef CONFIG_SND_HDA_POWER_SAVE 4074#ifdef CONFIG_SND_HDA_POWER_SAVE
4074static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
4075 unsigned int power_state);
4076
4077static void hda_power_work(struct work_struct *work) 4075static void hda_power_work(struct work_struct *work)
4078{ 4076{
4079 struct hda_codec *codec = 4077 struct hda_codec *codec =
@@ -4376,11 +4374,8 @@ void snd_hda_bus_reboot_notify(struct hda_bus *bus)
4376 if (!bus) 4374 if (!bus)
4377 return; 4375 return;
4378 list_for_each_entry(codec, &bus->codec_list, list) { 4376 list_for_each_entry(codec, &bus->codec_list, list) {
4379#ifdef CONFIG_SND_HDA_POWER_SAVE 4377 if (hda_codec_is_power_on(codec) &&
4380 if (!codec->power_on) 4378 codec->patch_ops.reboot_notify)
4381 continue;
4382#endif
4383 if (codec->patch_ops.reboot_notify)
4384 codec->patch_ops.reboot_notify(codec); 4379 codec->patch_ops.reboot_notify(codec);
4385 } 4380 }
4386} 4381}
@@ -5079,11 +5074,10 @@ int snd_hda_suspend(struct hda_bus *bus)
5079 struct hda_codec *codec; 5074 struct hda_codec *codec;
5080 5075
5081 list_for_each_entry(codec, &bus->codec_list, list) { 5076 list_for_each_entry(codec, &bus->codec_list, list) {
5082#ifdef CONFIG_SND_HDA_POWER_SAVE 5077 if (hda_codec_is_power_on(codec))
5083 if (!codec->power_on) 5078 hda_call_codec_suspend(codec);
5084 continue; 5079 if (codec->patch_ops.post_suspend)
5085#endif 5080 codec->patch_ops.post_suspend(codec);
5086 hda_call_codec_suspend(codec);
5087 } 5081 }
5088 return 0; 5082 return 0;
5089} 5083}
@@ -5103,6 +5097,8 @@ int snd_hda_resume(struct hda_bus *bus)
5103 struct hda_codec *codec; 5097 struct hda_codec *codec;
5104 5098
5105 list_for_each_entry(codec, &bus->codec_list, list) { 5099 list_for_each_entry(codec, &bus->codec_list, list) {
5100 if (codec->patch_ops.pre_resume)
5101 codec->patch_ops.pre_resume(codec);
5106 if (snd_hda_codec_needs_resume(codec)) 5102 if (snd_hda_codec_needs_resume(codec))
5107 hda_call_codec_resume(codec); 5103 hda_call_codec_resume(codec);
5108 } 5104 }
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index f465e07a4879..755f2b0f9d8e 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -26,10 +26,6 @@
26#include <sound/pcm.h> 26#include <sound/pcm.h>
27#include <sound/hwdep.h> 27#include <sound/hwdep.h>
28 28
29#if defined(CONFIG_PM) || defined(CONFIG_SND_HDA_POWER_SAVE)
30#define SND_HDA_NEEDS_RESUME /* resume control code is required */
31#endif
32
33/* 29/*
34 * nodes 30 * nodes
35 */ 31 */
@@ -704,8 +700,12 @@ struct hda_codec_ops {
704 int (*init)(struct hda_codec *codec); 700 int (*init)(struct hda_codec *codec);
705 void (*free)(struct hda_codec *codec); 701 void (*free)(struct hda_codec *codec);
706 void (*unsol_event)(struct hda_codec *codec, unsigned int res); 702 void (*unsol_event)(struct hda_codec *codec, unsigned int res);
707#ifdef SND_HDA_NEEDS_RESUME 703 void (*set_power_state)(struct hda_codec *codec, hda_nid_t fg,
704 unsigned int power_state);
705#ifdef CONFIG_PM
708 int (*suspend)(struct hda_codec *codec, pm_message_t state); 706 int (*suspend)(struct hda_codec *codec, pm_message_t state);
707 int (*post_suspend)(struct hda_codec *codec);
708 int (*pre_resume)(struct hda_codec *codec);
709 int (*resume)(struct hda_codec *codec); 709 int (*resume)(struct hda_codec *codec);
710#endif 710#endif
711#ifdef CONFIG_SND_HDA_POWER_SAVE 711#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -927,7 +927,7 @@ void snd_hda_sequence_write(struct hda_codec *codec,
927int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex); 927int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex);
928 928
929/* cached write */ 929/* cached write */
930#ifdef SND_HDA_NEEDS_RESUME 930#ifdef CONFIG_PM
931int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid, 931int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
932 int direct, unsigned int verb, unsigned int parm); 932 int direct, unsigned int verb, unsigned int parm);
933void snd_hda_sequence_write_cache(struct hda_codec *codec, 933void snd_hda_sequence_write_cache(struct hda_codec *codec,
@@ -1008,6 +1008,9 @@ int snd_hda_is_supported_format(struct hda_codec *codec, hda_nid_t nid,
1008 */ 1008 */
1009void snd_hda_get_codec_name(struct hda_codec *codec, char *name, int namelen); 1009void snd_hda_get_codec_name(struct hda_codec *codec, char *name, int namelen);
1010void snd_hda_bus_reboot_notify(struct hda_bus *bus); 1010void snd_hda_bus_reboot_notify(struct hda_bus *bus);
1011void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
1012 unsigned int power_state,
1013 bool eapd_workaround);
1011 1014
1012/* 1015/*
1013 * power management 1016 * power management
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 88b277e97409..2e7ac31afa8d 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -131,7 +131,7 @@ int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, int ch,
131 int direction, int idx, int mask, int val); 131 int direction, int idx, int mask, int val);
132int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid, 132int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
133 int dir, int idx, int mask, int val); 133 int dir, int idx, int mask, int val);
134#ifdef SND_HDA_NEEDS_RESUME 134#ifdef CONFIG_PM
135void snd_hda_codec_resume_amp(struct hda_codec *codec); 135void snd_hda_codec_resume_amp(struct hda_codec *codec);
136#endif 136#endif
137 137
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 1362c8ba4d1f..8648917acffb 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -563,7 +563,7 @@ static void ad198x_free(struct hda_codec *codec)
563 snd_hda_detach_beep_device(codec); 563 snd_hda_detach_beep_device(codec);
564} 564}
565 565
566#ifdef SND_HDA_NEEDS_RESUME 566#ifdef CONFIG_PM
567static int ad198x_suspend(struct hda_codec *codec, pm_message_t state) 567static int ad198x_suspend(struct hda_codec *codec, pm_message_t state)
568{ 568{
569 ad198x_shutup(codec); 569 ad198x_shutup(codec);
@@ -579,7 +579,7 @@ static const struct hda_codec_ops ad198x_patch_ops = {
579#ifdef CONFIG_SND_HDA_POWER_SAVE 579#ifdef CONFIG_SND_HDA_POWER_SAVE
580 .check_power_status = ad198x_check_power_status, 580 .check_power_status = ad198x_check_power_status,
581#endif 581#endif
582#ifdef SND_HDA_NEEDS_RESUME 582#ifdef CONFIG_PM
583 .suspend = ad198x_suspend, 583 .suspend = ad198x_suspend,
584#endif 584#endif
585 .reboot_notify = ad198x_shutup, 585 .reboot_notify = ad198x_shutup,
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 7f93739b1e33..47d6ffc9b5b5 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -25,6 +25,7 @@
25#include <sound/core.h> 25#include <sound/core.h>
26#include "hda_codec.h" 26#include "hda_codec.h"
27#include "hda_local.h" 27#include "hda_local.h"
28#include <sound/tlv.h>
28 29
29/* 30/*
30 */ 31 */
@@ -61,9 +62,15 @@ struct cs_spec {
61 62
62 unsigned int hp_detect:1; 63 unsigned int hp_detect:1;
63 unsigned int mic_detect:1; 64 unsigned int mic_detect:1;
65 /* CS421x */
66 unsigned int spdif_detect:1;
67 unsigned int sense_b:1;
68 hda_nid_t vendor_nid;
69 struct hda_input_mux input_mux;
70 unsigned int last_input;
64}; 71};
65 72
66/* available models */ 73/* available models with CS420x */
67enum { 74enum {
68 CS420X_MBP53, 75 CS420X_MBP53,
69 CS420X_MBP55, 76 CS420X_MBP55,
@@ -72,6 +79,12 @@ enum {
72 CS420X_MODELS 79 CS420X_MODELS
73}; 80};
74 81
82/* CS421x boards */
83enum {
84 CS421X_CDB4210,
85 CS421X_MODELS
86};
87
75/* Vendor-specific processing widget */ 88/* Vendor-specific processing widget */
76#define CS420X_VENDOR_NID 0x11 89#define CS420X_VENDOR_NID 0x11
77#define CS_DIG_OUT1_PIN_NID 0x10 90#define CS_DIG_OUT1_PIN_NID 0x10
@@ -111,21 +124,42 @@ enum {
111/* 0x0009 - 0x0014 -> 12 test regs */ 124/* 0x0009 - 0x0014 -> 12 test regs */
112/* 0x0015 - visibility reg */ 125/* 0x0015 - visibility reg */
113 126
127/*
128 * Cirrus Logic CS4210
129 *
130 * 1 DAC => HP(sense) / Speakers,
131 * 1 ADC <= LineIn(sense) / MicIn / DMicIn,
132 * 1 SPDIF OUT => SPDIF Trasmitter(sense)
133*/
134#define CS4210_DAC_NID 0x02
135#define CS4210_ADC_NID 0x03
136#define CS421X_VENDOR_NID 0x0B
137#define CS421X_DMIC_PIN_NID 0x09 /* Port E */
138#define CS421X_SPDIF_PIN_NID 0x0A /* Port H */
139
140#define CS421X_IDX_DEV_CFG 0x01
141#define CS421X_IDX_ADC_CFG 0x02
142#define CS421X_IDX_DAC_CFG 0x03
143#define CS421X_IDX_SPK_CTL 0x04
144
145#define SPDIF_EVENT 0x04
114 146
115static inline int cs_vendor_coef_get(struct hda_codec *codec, unsigned int idx) 147static inline int cs_vendor_coef_get(struct hda_codec *codec, unsigned int idx)
116{ 148{
117 snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0, 149 struct cs_spec *spec = codec->spec;
150 snd_hda_codec_write(codec, spec->vendor_nid, 0,
118 AC_VERB_SET_COEF_INDEX, idx); 151 AC_VERB_SET_COEF_INDEX, idx);
119 return snd_hda_codec_read(codec, CS420X_VENDOR_NID, 0, 152 return snd_hda_codec_read(codec, spec->vendor_nid, 0,
120 AC_VERB_GET_PROC_COEF, 0); 153 AC_VERB_GET_PROC_COEF, 0);
121} 154}
122 155
123static inline void cs_vendor_coef_set(struct hda_codec *codec, unsigned int idx, 156static inline void cs_vendor_coef_set(struct hda_codec *codec, unsigned int idx,
124 unsigned int coef) 157 unsigned int coef)
125{ 158{
126 snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0, 159 struct cs_spec *spec = codec->spec;
160 snd_hda_codec_write(codec, spec->vendor_nid, 0,
127 AC_VERB_SET_COEF_INDEX, idx); 161 AC_VERB_SET_COEF_INDEX, idx);
128 snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0, 162 snd_hda_codec_write(codec, spec->vendor_nid, 0,
129 AC_VERB_SET_PROC_COEF, coef); 163 AC_VERB_SET_PROC_COEF, coef);
130} 164}
131 165
@@ -347,15 +381,12 @@ static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin,
347 nid = codec->start_nid; 381 nid = codec->start_nid;
348 for (i = 0; i < codec->num_nodes; i++, nid++) { 382 for (i = 0; i < codec->num_nodes; i++, nid++) {
349 unsigned int type; 383 unsigned int type;
350 int idx;
351 type = get_wcaps_type(get_wcaps(codec, nid)); 384 type = get_wcaps_type(get_wcaps(codec, nid));
352 if (type != AC_WID_AUD_IN) 385 if (type != AC_WID_AUD_IN)
353 continue; 386 continue;
354 idx = snd_hda_get_conn_index(codec, nid, pin, 0); 387 *idxp = snd_hda_get_conn_index(codec, nid, pin, false);
355 if (idx >= 0) { 388 if (*idxp >= 0)
356 *idxp = idx;
357 return nid; 389 return nid;
358 }
359 } 390 }
360 return 0; 391 return 0;
361} 392}
@@ -835,6 +866,8 @@ static int build_digital_input(struct hda_codec *codec)
835 866
836/* 867/*
837 * auto-mute and auto-mic switching 868 * auto-mute and auto-mic switching
869 * CS421x auto-output redirecting
870 * HP/SPK/SPDIF
838 */ 871 */
839 872
840static void cs_automute(struct hda_codec *codec) 873static void cs_automute(struct hda_codec *codec)
@@ -842,9 +875,25 @@ static void cs_automute(struct hda_codec *codec)
842 struct cs_spec *spec = codec->spec; 875 struct cs_spec *spec = codec->spec;
843 struct auto_pin_cfg *cfg = &spec->autocfg; 876 struct auto_pin_cfg *cfg = &spec->autocfg;
844 unsigned int hp_present; 877 unsigned int hp_present;
878 unsigned int spdif_present;
845 hda_nid_t nid; 879 hda_nid_t nid;
846 int i; 880 int i;
847 881
882 spdif_present = 0;
883 if (cfg->dig_outs) {
884 nid = cfg->dig_out_pins[0];
885 if (is_jack_detectable(codec, nid)) {
886 /*
887 TODO: SPDIF output redirect when SENSE_B is enabled.
888 Shared (SENSE_A) jack (e.g HP/mini-TOSLINK)
889 assumed.
890 */
891 if (snd_hda_jack_detect(codec, nid)
892 /* && spec->sense_b */)
893 spdif_present = 1;
894 }
895 }
896
848 hp_present = 0; 897 hp_present = 0;
849 for (i = 0; i < cfg->hp_outs; i++) { 898 for (i = 0; i < cfg->hp_outs; i++) {
850 nid = cfg->hp_pins[i]; 899 nid = cfg->hp_pins[i];
@@ -854,11 +903,19 @@ static void cs_automute(struct hda_codec *codec)
854 if (hp_present) 903 if (hp_present)
855 break; 904 break;
856 } 905 }
906
907 /* mute speakers if spdif or hp jack is plugged in */
857 for (i = 0; i < cfg->speaker_outs; i++) { 908 for (i = 0; i < cfg->speaker_outs; i++) {
858 nid = cfg->speaker_pins[i]; 909 nid = cfg->speaker_pins[i];
859 snd_hda_codec_write(codec, nid, 0, 910 snd_hda_codec_write(codec, nid, 0,
860 AC_VERB_SET_PIN_WIDGET_CONTROL, 911 AC_VERB_SET_PIN_WIDGET_CONTROL,
861 hp_present ? 0 : PIN_OUT); 912 hp_present ? 0 : PIN_OUT);
913 /* detect on spdif is specific to CS421x */
914 if (spec->vendor_nid == CS421X_VENDOR_NID) {
915 snd_hda_codec_write(codec, nid, 0,
916 AC_VERB_SET_PIN_WIDGET_CONTROL,
917 spdif_present ? 0 : PIN_OUT);
918 }
862 } 919 }
863 if (spec->board_config == CS420X_MBP53 || 920 if (spec->board_config == CS420X_MBP53 ||
864 spec->board_config == CS420X_MBP55 || 921 spec->board_config == CS420X_MBP55 ||
@@ -867,21 +924,62 @@ static void cs_automute(struct hda_codec *codec)
867 snd_hda_codec_write(codec, 0x01, 0, 924 snd_hda_codec_write(codec, 0x01, 0,
868 AC_VERB_SET_GPIO_DATA, gpio); 925 AC_VERB_SET_GPIO_DATA, gpio);
869 } 926 }
927
928 /* specific to CS421x */
929 if (spec->vendor_nid == CS421X_VENDOR_NID) {
930 /* mute HPs if spdif jack (SENSE_B) is present */
931 for (i = 0; i < cfg->hp_outs; i++) {
932 nid = cfg->hp_pins[i];
933 snd_hda_codec_write(codec, nid, 0,
934 AC_VERB_SET_PIN_WIDGET_CONTROL,
935 (spdif_present && spec->sense_b) ? 0 : PIN_HP);
936 }
937
938 /* SPDIF TX on/off */
939 if (cfg->dig_outs) {
940 nid = cfg->dig_out_pins[0];
941 snd_hda_codec_write(codec, nid, 0,
942 AC_VERB_SET_PIN_WIDGET_CONTROL,
943 spdif_present ? PIN_OUT : 0);
944
945 }
946 /* Update board GPIOs if neccessary ... */
947 }
870} 948}
871 949
950/*
951 * Auto-input redirect for CS421x
952 * Switch max 3 inputs of a single ADC (nid 3)
953*/
954
872static void cs_automic(struct hda_codec *codec) 955static void cs_automic(struct hda_codec *codec)
873{ 956{
874 struct cs_spec *spec = codec->spec; 957 struct cs_spec *spec = codec->spec;
875 struct auto_pin_cfg *cfg = &spec->autocfg; 958 struct auto_pin_cfg *cfg = &spec->autocfg;
876 hda_nid_t nid; 959 hda_nid_t nid;
877 unsigned int present; 960 unsigned int present;
878 961
879 nid = cfg->inputs[spec->automic_idx].pin; 962 nid = cfg->inputs[spec->automic_idx].pin;
880 present = snd_hda_jack_detect(codec, nid); 963 present = snd_hda_jack_detect(codec, nid);
881 if (present) 964
882 change_cur_input(codec, spec->automic_idx, 0); 965 /* specific to CS421x, single ADC */
883 else 966 if (spec->vendor_nid == CS421X_VENDOR_NID) {
884 change_cur_input(codec, !spec->automic_idx, 0); 967 if (present) {
968 spec->last_input = spec->cur_input;
969 spec->cur_input = spec->automic_idx;
970 } else {
971 spec->cur_input = spec->last_input;
972 }
973
974 snd_hda_codec_write_cache(codec, spec->cur_adc, 0,
975 AC_VERB_SET_CONNECT_SEL,
976 spec->adc_idx[spec->cur_input]);
977 } else {
978 if (present)
979 change_cur_input(codec, spec->automic_idx, 0);
980 else
981 change_cur_input(codec, !spec->automic_idx, 0);
982 }
885} 983}
886 984
887/* 985/*
@@ -911,23 +1009,28 @@ static void init_output(struct hda_codec *codec)
911 for (i = 0; i < cfg->line_outs; i++) 1009 for (i = 0; i < cfg->line_outs; i++)
912 snd_hda_codec_write(codec, cfg->line_out_pins[i], 0, 1010 snd_hda_codec_write(codec, cfg->line_out_pins[i], 0,
913 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); 1011 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
1012 /* HP */
914 for (i = 0; i < cfg->hp_outs; i++) { 1013 for (i = 0; i < cfg->hp_outs; i++) {
915 hda_nid_t nid = cfg->hp_pins[i]; 1014 hda_nid_t nid = cfg->hp_pins[i];
916 snd_hda_codec_write(codec, nid, 0, 1015 snd_hda_codec_write(codec, nid, 0,
917 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); 1016 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
918 if (!cfg->speaker_outs) 1017 if (!cfg->speaker_outs)
919 continue; 1018 continue;
920 if (is_jack_detectable(codec, nid)) { 1019 if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
921 snd_hda_codec_write(codec, nid, 0, 1020 snd_hda_codec_write(codec, nid, 0,
922 AC_VERB_SET_UNSOLICITED_ENABLE, 1021 AC_VERB_SET_UNSOLICITED_ENABLE,
923 AC_USRSP_EN | HP_EVENT); 1022 AC_USRSP_EN | HP_EVENT);
924 spec->hp_detect = 1; 1023 spec->hp_detect = 1;
925 } 1024 }
926 } 1025 }
1026
1027 /* Speaker */
927 for (i = 0; i < cfg->speaker_outs; i++) 1028 for (i = 0; i < cfg->speaker_outs; i++)
928 snd_hda_codec_write(codec, cfg->speaker_pins[i], 0, 1029 snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
929 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); 1030 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
930 if (spec->hp_detect) 1031
1032 /* SPDIF is enabled on presence detect for CS421x */
1033 if (spec->hp_detect || spec->spdif_detect)
931 cs_automute(codec); 1034 cs_automute(codec);
932} 1035}
933 1036
@@ -961,19 +1064,31 @@ static void init_input(struct hda_codec *codec)
961 AC_VERB_SET_UNSOLICITED_ENABLE, 1064 AC_VERB_SET_UNSOLICITED_ENABLE,
962 AC_USRSP_EN | MIC_EVENT); 1065 AC_USRSP_EN | MIC_EVENT);
963 } 1066 }
964 change_cur_input(codec, spec->cur_input, 1); 1067 /* specific to CS421x */
965 if (spec->mic_detect) 1068 if (spec->vendor_nid == CS421X_VENDOR_NID) {
966 cs_automic(codec); 1069 if (spec->mic_detect)
967 1070 cs_automic(codec);
968 coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */ 1071 else {
969 if (is_active_pin(codec, CS_DMIC2_PIN_NID)) 1072 spec->cur_adc = spec->adc_nid[spec->cur_input];
970 coef |= 0x0500; /* DMIC2 enable 2 channels, disable GPIO1 */ 1073 snd_hda_codec_write(codec, spec->cur_adc, 0,
971 if (is_active_pin(codec, CS_DMIC1_PIN_NID)) 1074 AC_VERB_SET_CONNECT_SEL,
972 coef |= 0x1800; /* DMIC1 enable 2 channels, disable GPIO0 1075 spec->adc_idx[spec->cur_input]);
973 * No effect if SPDIF_OUT2 is selected in 1076 }
974 * IDX_SPDIF_CTL. 1077 } else {
975 */ 1078 change_cur_input(codec, spec->cur_input, 1);
976 cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); 1079 if (spec->mic_detect)
1080 cs_automic(codec);
1081
1082 coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
1083 if (is_active_pin(codec, CS_DMIC2_PIN_NID))
1084 coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */
1085 if (is_active_pin(codec, CS_DMIC1_PIN_NID))
1086 coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off
1087 * No effect if SPDIF_OUT2 is
1088 * selected in IDX_SPDIF_CTL.
1089 */
1090 cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
1091 }
977} 1092}
978 1093
979static const struct hda_verb cs_coef_init_verbs[] = { 1094static const struct hda_verb cs_coef_init_verbs[] = {
@@ -1221,16 +1336,16 @@ static const struct cs_pincfg *cs_pincfgs[CS420X_MODELS] = {
1221 [CS420X_IMAC27] = imac27_pincfgs, 1336 [CS420X_IMAC27] = imac27_pincfgs,
1222}; 1337};
1223 1338
1224static void fix_pincfg(struct hda_codec *codec, int model) 1339static void fix_pincfg(struct hda_codec *codec, int model,
1340 const struct cs_pincfg **pin_configs)
1225{ 1341{
1226 const struct cs_pincfg *cfg = cs_pincfgs[model]; 1342 const struct cs_pincfg *cfg = pin_configs[model];
1227 if (!cfg) 1343 if (!cfg)
1228 return; 1344 return;
1229 for (; cfg->nid; cfg++) 1345 for (; cfg->nid; cfg++)
1230 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); 1346 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
1231} 1347}
1232 1348
1233
1234static int patch_cs420x(struct hda_codec *codec) 1349static int patch_cs420x(struct hda_codec *codec)
1235{ 1350{
1236 struct cs_spec *spec; 1351 struct cs_spec *spec;
@@ -1241,11 +1356,13 @@ static int patch_cs420x(struct hda_codec *codec)
1241 return -ENOMEM; 1356 return -ENOMEM;
1242 codec->spec = spec; 1357 codec->spec = spec;
1243 1358
1359 spec->vendor_nid = CS420X_VENDOR_NID;
1360
1244 spec->board_config = 1361 spec->board_config =
1245 snd_hda_check_board_config(codec, CS420X_MODELS, 1362 snd_hda_check_board_config(codec, CS420X_MODELS,
1246 cs420x_models, cs420x_cfg_tbl); 1363 cs420x_models, cs420x_cfg_tbl);
1247 if (spec->board_config >= 0) 1364 if (spec->board_config >= 0)
1248 fix_pincfg(codec, spec->board_config); 1365 fix_pincfg(codec, spec->board_config, cs_pincfgs);
1249 1366
1250 switch (spec->board_config) { 1367 switch (spec->board_config) {
1251 case CS420X_IMAC27: 1368 case CS420X_IMAC27:
@@ -1272,6 +1389,562 @@ static int patch_cs420x(struct hda_codec *codec)
1272 return err; 1389 return err;
1273} 1390}
1274 1391
1392/*
1393 * Cirrus Logic CS4210
1394 *
1395 * 1 DAC => HP(sense) / Speakers,
1396 * 1 ADC <= LineIn(sense) / MicIn / DMicIn,
1397 * 1 SPDIF OUT => SPDIF Trasmitter(sense)
1398*/
1399
1400/* CS4210 board names */
1401static const char *cs421x_models[CS421X_MODELS] = {
1402 [CS421X_CDB4210] = "cdb4210",
1403};
1404
1405static const struct snd_pci_quirk cs421x_cfg_tbl[] = {
1406 /* Test Intel board + CDB2410 */
1407 SND_PCI_QUIRK(0x8086, 0x5001, "DP45SG/CDB4210", CS421X_CDB4210),
1408 {} /* terminator */
1409};
1410
1411/* CS4210 board pinconfigs */
1412/* Default CS4210 (CDB4210)*/
1413static const struct cs_pincfg cdb4210_pincfgs[] = {
1414 { 0x05, 0x0321401f },
1415 { 0x06, 0x90170010 },
1416 { 0x07, 0x03813031 },
1417 { 0x08, 0xb7a70037 },
1418 { 0x09, 0xb7a6003e },
1419 { 0x0a, 0x034510f0 },
1420 {} /* terminator */
1421};
1422
1423static const struct cs_pincfg *cs421x_pincfgs[CS421X_MODELS] = {
1424 [CS421X_CDB4210] = cdb4210_pincfgs,
1425};
1426
1427static const struct hda_verb cs421x_coef_init_verbs[] = {
1428 {0x0B, AC_VERB_SET_PROC_STATE, 1},
1429 {0x0B, AC_VERB_SET_COEF_INDEX, CS421X_IDX_DEV_CFG},
1430 /*
1431 Disable Coefficient Index Auto-Increment(DAI)=1,
1432 PDREF=0
1433 */
1434 {0x0B, AC_VERB_SET_PROC_COEF, 0x0001 },
1435
1436 {0x0B, AC_VERB_SET_COEF_INDEX, CS421X_IDX_ADC_CFG},
1437 /* ADC SZCMode = Digital Soft Ramp */
1438 {0x0B, AC_VERB_SET_PROC_COEF, 0x0002 },
1439
1440 {0x0B, AC_VERB_SET_COEF_INDEX, CS421X_IDX_DAC_CFG},
1441 {0x0B, AC_VERB_SET_PROC_COEF,
1442 (0x0002 /* DAC SZCMode = Digital Soft Ramp */
1443 | 0x0004 /* Mute DAC on FIFO error */
1444 | 0x0008 /* Enable DAC High Pass Filter */
1445 )},
1446 {} /* terminator */
1447};
1448
1449/* Errata: CS4210 rev A1 Silicon
1450 *
1451 * http://www.cirrus.com/en/pubs/errata/
1452 *
1453 * Description:
1454 * 1. Performance degredation is present in the ADC.
1455 * 2. Speaker output is not completely muted upon HP detect.
1456 * 3. Noise is present when clipping occurs on the amplified
1457 * speaker outputs.
1458 *
1459 * Workaround:
1460 * The following verb sequence written to the registers during
1461 * initialization will correct the issues listed above.
1462 */
1463
1464static const struct hda_verb cs421x_coef_init_verbs_A1_silicon_fixes[] = {
1465 {0x0B, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
1466
1467 {0x0B, AC_VERB_SET_COEF_INDEX, 0x0006},
1468 {0x0B, AC_VERB_SET_PROC_COEF, 0x9999}, /* Test mode: on */
1469
1470 {0x0B, AC_VERB_SET_COEF_INDEX, 0x000A},
1471 {0x0B, AC_VERB_SET_PROC_COEF, 0x14CB}, /* Chop double */
1472
1473 {0x0B, AC_VERB_SET_COEF_INDEX, 0x0011},
1474 {0x0B, AC_VERB_SET_PROC_COEF, 0xA2D0}, /* Increase ADC current */
1475
1476 {0x0B, AC_VERB_SET_COEF_INDEX, 0x001A},
1477 {0x0B, AC_VERB_SET_PROC_COEF, 0x02A9}, /* Mute speaker */
1478
1479 {0x0B, AC_VERB_SET_COEF_INDEX, 0x001B},
1480 {0x0B, AC_VERB_SET_PROC_COEF, 0X1006}, /* Remove noise */
1481
1482 {} /* terminator */
1483};
1484
1485/* Speaker Amp Gain is controlled by the vendor widget's coef 4 */
1486static const DECLARE_TLV_DB_SCALE(cs421x_speaker_boost_db_scale, 900, 300, 0);
1487
1488static int cs421x_boost_vol_info(struct snd_kcontrol *kcontrol,
1489 struct snd_ctl_elem_info *uinfo)
1490{
1491 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
1492 uinfo->count = 1;
1493 uinfo->value.integer.min = 0;
1494 uinfo->value.integer.max = 3;
1495 return 0;
1496}
1497
1498static int cs421x_boost_vol_get(struct snd_kcontrol *kcontrol,
1499 struct snd_ctl_elem_value *ucontrol)
1500{
1501 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1502
1503 ucontrol->value.integer.value[0] =
1504 cs_vendor_coef_get(codec, CS421X_IDX_SPK_CTL) & 0x0003;
1505 return 0;
1506}
1507
1508static int cs421x_boost_vol_put(struct snd_kcontrol *kcontrol,
1509 struct snd_ctl_elem_value *ucontrol)
1510{
1511 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1512
1513 unsigned int vol = ucontrol->value.integer.value[0];
1514 unsigned int coef =
1515 cs_vendor_coef_get(codec, CS421X_IDX_SPK_CTL);
1516 unsigned int original_coef = coef;
1517
1518 coef &= ~0x0003;
1519 coef |= (vol & 0x0003);
1520 if (original_coef == coef)
1521 return 0;
1522 else {
1523 cs_vendor_coef_set(codec, CS421X_IDX_SPK_CTL, coef);
1524 return 1;
1525 }
1526}
1527
1528static const struct snd_kcontrol_new cs421x_speaker_bost_ctl = {
1529
1530 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1531 .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
1532 SNDRV_CTL_ELEM_ACCESS_TLV_READ),
1533 .name = "Speaker Boost Playback Volume",
1534 .info = cs421x_boost_vol_info,
1535 .get = cs421x_boost_vol_get,
1536 .put = cs421x_boost_vol_put,
1537 .tlv = { .p = cs421x_speaker_boost_db_scale },
1538};
1539
1540static void cs421x_pinmux_init(struct hda_codec *codec)
1541{
1542 struct cs_spec *spec = codec->spec;
1543 unsigned int def_conf, coef;
1544
1545 /* GPIO, DMIC_SCL, DMIC_SDA and SENSE_B are multiplexed */
1546 coef = cs_vendor_coef_get(codec, CS421X_IDX_DEV_CFG);
1547
1548 if (spec->gpio_mask)
1549 coef |= 0x0008; /* B1,B2 are GPIOs */
1550 else
1551 coef &= ~0x0008;
1552
1553 if (spec->sense_b)
1554 coef |= 0x0010; /* B2 is SENSE_B, not inverted */
1555 else
1556 coef &= ~0x0010;
1557
1558 cs_vendor_coef_set(codec, CS421X_IDX_DEV_CFG, coef);
1559
1560 if ((spec->gpio_mask || spec->sense_b) &&
1561 is_active_pin(codec, CS421X_DMIC_PIN_NID)) {
1562
1563 /*
1564 GPIO or SENSE_B forced - disconnect the DMIC pin.
1565 */
1566 def_conf = snd_hda_codec_get_pincfg(codec, CS421X_DMIC_PIN_NID);
1567 def_conf &= ~AC_DEFCFG_PORT_CONN;
1568 def_conf |= (AC_JACK_PORT_NONE << AC_DEFCFG_PORT_CONN_SHIFT);
1569 snd_hda_codec_set_pincfg(codec, CS421X_DMIC_PIN_NID, def_conf);
1570 }
1571}
1572
1573static void init_cs421x_digital(struct hda_codec *codec)
1574{
1575 struct cs_spec *spec = codec->spec;
1576 struct auto_pin_cfg *cfg = &spec->autocfg;
1577 int i;
1578
1579
1580 for (i = 0; i < cfg->dig_outs; i++) {
1581 hda_nid_t nid = cfg->dig_out_pins[i];
1582 if (!cfg->speaker_outs)
1583 continue;
1584 if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
1585
1586 snd_hda_codec_write(codec, nid, 0,
1587 AC_VERB_SET_UNSOLICITED_ENABLE,
1588 AC_USRSP_EN | SPDIF_EVENT);
1589 spec->spdif_detect = 1;
1590 }
1591 }
1592}
1593
1594static int cs421x_init(struct hda_codec *codec)
1595{
1596 struct cs_spec *spec = codec->spec;
1597
1598 snd_hda_sequence_write(codec, cs421x_coef_init_verbs);
1599 snd_hda_sequence_write(codec, cs421x_coef_init_verbs_A1_silicon_fixes);
1600
1601 cs421x_pinmux_init(codec);
1602
1603 if (spec->gpio_mask) {
1604 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
1605 spec->gpio_mask);
1606 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION,
1607 spec->gpio_dir);
1608 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
1609 spec->gpio_data);
1610 }
1611
1612 init_output(codec);
1613 init_input(codec);
1614 init_cs421x_digital(codec);
1615
1616 return 0;
1617}
1618
1619/*
1620 * CS4210 Input MUX (1 ADC)
1621 */
1622static int cs421x_mux_enum_info(struct snd_kcontrol *kcontrol,
1623 struct snd_ctl_elem_info *uinfo)
1624{
1625 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1626 struct cs_spec *spec = codec->spec;
1627
1628 return snd_hda_input_mux_info(&spec->input_mux, uinfo);
1629}
1630
1631static int cs421x_mux_enum_get(struct snd_kcontrol *kcontrol,
1632 struct snd_ctl_elem_value *ucontrol)
1633{
1634 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1635 struct cs_spec *spec = codec->spec;
1636
1637 ucontrol->value.enumerated.item[0] = spec->cur_input;
1638 return 0;
1639}
1640
1641static int cs421x_mux_enum_put(struct snd_kcontrol *kcontrol,
1642 struct snd_ctl_elem_value *ucontrol)
1643{
1644 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1645 struct cs_spec *spec = codec->spec;
1646
1647 return snd_hda_input_mux_put(codec, &spec->input_mux, ucontrol,
1648 spec->adc_nid[0], &spec->cur_input);
1649
1650}
1651
1652static struct snd_kcontrol_new cs421x_capture_source = {
1653
1654 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1655 .name = "Capture Source",
1656 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
1657 .info = cs421x_mux_enum_info,
1658 .get = cs421x_mux_enum_get,
1659 .put = cs421x_mux_enum_put,
1660};
1661
1662static int cs421x_add_input_volume_control(struct hda_codec *codec, int item)
1663{
1664 struct cs_spec *spec = codec->spec;
1665 struct auto_pin_cfg *cfg = &spec->autocfg;
1666 const struct hda_input_mux *imux = &spec->input_mux;
1667 hda_nid_t pin = cfg->inputs[item].pin;
1668 struct snd_kcontrol *kctl;
1669 u32 caps;
1670
1671 if (!(get_wcaps(codec, pin) & AC_WCAP_IN_AMP))
1672 return 0;
1673
1674 caps = query_amp_caps(codec, pin, HDA_INPUT);
1675 caps = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT;
1676 if (caps <= 1)
1677 return 0;
1678
1679 return add_volume(codec, imux->items[item].label, 0,
1680 HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_INPUT), 1, &kctl);
1681}
1682
1683/* add a (input-boost) volume control to the given input pin */
1684static int build_cs421x_input(struct hda_codec *codec)
1685{
1686 struct cs_spec *spec = codec->spec;
1687 struct auto_pin_cfg *cfg = &spec->autocfg;
1688 struct hda_input_mux *imux = &spec->input_mux;
1689 int i, err, type_idx;
1690 const char *label;
1691
1692 if (!spec->num_inputs)
1693 return 0;
1694
1695 /* make bind-capture */
1696 spec->capture_bind[0] = make_bind_capture(codec, &snd_hda_bind_sw);
1697 spec->capture_bind[1] = make_bind_capture(codec, &snd_hda_bind_vol);
1698 for (i = 0; i < 2; i++) {
1699 struct snd_kcontrol *kctl;
1700 int n;
1701 if (!spec->capture_bind[i])
1702 return -ENOMEM;
1703 kctl = snd_ctl_new1(&cs_capture_ctls[i], codec);
1704 if (!kctl)
1705 return -ENOMEM;
1706 kctl->private_value = (long)spec->capture_bind[i];
1707 err = snd_hda_ctl_add(codec, 0, kctl);
1708 if (err < 0)
1709 return err;
1710 for (n = 0; n < AUTO_PIN_LAST; n++) {
1711 if (!spec->adc_nid[n])
1712 continue;
1713 err = snd_hda_add_nid(codec, kctl, 0, spec->adc_nid[n]);
1714 if (err < 0)
1715 return err;
1716 }
1717 }
1718
1719 /* Add Input MUX Items + Capture Volume/Switch */
1720 for (i = 0; i < spec->num_inputs; i++) {
1721 label = hda_get_autocfg_input_label(codec, cfg, i);
1722 snd_hda_add_imux_item(imux, label, spec->adc_idx[i], &type_idx);
1723
1724 err = cs421x_add_input_volume_control(codec, i);
1725 if (err < 0)
1726 return err;
1727 }
1728
1729 /*
1730 Add 'Capture Source' Switch if
1731 * 2 inputs and no mic detec
1732 * 3 inputs
1733 */
1734 if ((spec->num_inputs == 2 && !spec->mic_detect) ||
1735 (spec->num_inputs == 3)) {
1736
1737 err = snd_hda_ctl_add(codec, spec->adc_nid[0],
1738 snd_ctl_new1(&cs421x_capture_source, codec));
1739 if (err < 0)
1740 return err;
1741 }
1742
1743 return 0;
1744}
1745
1746/* Single DAC (Mute/Gain) */
1747static int build_cs421x_output(struct hda_codec *codec)
1748{
1749 hda_nid_t dac = CS4210_DAC_NID;
1750 struct cs_spec *spec = codec->spec;
1751 struct auto_pin_cfg *cfg = &spec->autocfg;
1752 struct snd_kcontrol *kctl;
1753 int err;
1754 char *name = "HP/Speakers";
1755
1756 fix_volume_caps(codec, dac);
1757 if (!spec->vmaster_sw) {
1758 err = add_vmaster(codec, dac);
1759 if (err < 0)
1760 return err;
1761 }
1762
1763 err = add_mute(codec, name, 0,
1764 HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
1765 if (err < 0)
1766 return err;
1767 err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
1768 if (err < 0)
1769 return err;
1770
1771 err = add_volume(codec, name, 0,
1772 HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
1773 if (err < 0)
1774 return err;
1775 err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
1776 if (err < 0)
1777 return err;
1778
1779 if (cfg->speaker_outs) {
1780 err = snd_hda_ctl_add(codec, 0,
1781 snd_ctl_new1(&cs421x_speaker_bost_ctl, codec));
1782 if (err < 0)
1783 return err;
1784 }
1785 return err;
1786}
1787
1788static int cs421x_build_controls(struct hda_codec *codec)
1789{
1790 int err;
1791
1792 err = build_cs421x_output(codec);
1793 if (err < 0)
1794 return err;
1795 err = build_cs421x_input(codec);
1796 if (err < 0)
1797 return err;
1798 err = build_digital_output(codec);
1799 if (err < 0)
1800 return err;
1801 return cs421x_init(codec);
1802}
1803
1804static void cs421x_unsol_event(struct hda_codec *codec, unsigned int res)
1805{
1806 switch ((res >> 26) & 0x3f) {
1807 case HP_EVENT:
1808 case SPDIF_EVENT:
1809 cs_automute(codec);
1810 break;
1811
1812 case MIC_EVENT:
1813 cs_automic(codec);
1814 break;
1815 }
1816}
1817
1818static int parse_cs421x_input(struct hda_codec *codec)
1819{
1820 struct cs_spec *spec = codec->spec;
1821 struct auto_pin_cfg *cfg = &spec->autocfg;
1822 int i;
1823
1824 for (i = 0; i < cfg->num_inputs; i++) {
1825 hda_nid_t pin = cfg->inputs[i].pin;
1826 spec->adc_nid[i] = get_adc(codec, pin, &spec->adc_idx[i]);
1827 spec->cur_input = spec->last_input = i;
1828 spec->num_inputs++;
1829
1830 /* check whether the automatic mic switch is available */
1831 if (is_ext_mic(codec, i) && cfg->num_inputs >= 2) {
1832 spec->mic_detect = 1;
1833 spec->automic_idx = i;
1834 }
1835 }
1836 return 0;
1837}
1838
1839static int cs421x_parse_auto_config(struct hda_codec *codec)
1840{
1841 struct cs_spec *spec = codec->spec;
1842 int err;
1843
1844 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
1845 if (err < 0)
1846 return err;
1847 err = parse_output(codec);
1848 if (err < 0)
1849 return err;
1850 err = parse_cs421x_input(codec);
1851 if (err < 0)
1852 return err;
1853 err = parse_digital_output(codec);
1854 if (err < 0)
1855 return err;
1856 return 0;
1857}
1858
1859#ifdef CONFIG_PM
1860/*
1861 Manage PDREF, when transitioning to D3hot
1862 (DAC,ADC) -> D3, PDREF=1, AFG->D3
1863*/
1864static int cs421x_suspend(struct hda_codec *codec, pm_message_t state)
1865{
1866 unsigned int coef;
1867
1868 snd_hda_shutup_pins(codec);
1869
1870 snd_hda_codec_write(codec, CS4210_DAC_NID, 0,
1871 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
1872 snd_hda_codec_write(codec, CS4210_ADC_NID, 0,
1873 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
1874
1875 coef = cs_vendor_coef_get(codec, CS421X_IDX_DEV_CFG);
1876 coef |= 0x0004; /* PDREF */
1877 cs_vendor_coef_set(codec, CS421X_IDX_DEV_CFG, coef);
1878
1879 return 0;
1880}
1881#endif
1882
1883static struct hda_codec_ops cs4210_patch_ops = {
1884 .build_controls = cs421x_build_controls,
1885 .build_pcms = cs_build_pcms,
1886 .init = cs421x_init,
1887 .free = cs_free,
1888 .unsol_event = cs421x_unsol_event,
1889#ifdef CONFIG_PM
1890 .suspend = cs421x_suspend,
1891#endif
1892};
1893
1894static int patch_cs421x(struct hda_codec *codec)
1895{
1896 struct cs_spec *spec;
1897 int err;
1898
1899 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1900 if (!spec)
1901 return -ENOMEM;
1902 codec->spec = spec;
1903
1904 spec->vendor_nid = CS421X_VENDOR_NID;
1905
1906 spec->board_config =
1907 snd_hda_check_board_config(codec, CS421X_MODELS,
1908 cs421x_models, cs421x_cfg_tbl);
1909 if (spec->board_config >= 0)
1910 fix_pincfg(codec, spec->board_config, cs421x_pincfgs);
1911 /*
1912 Setup GPIO/SENSE for each board (if used)
1913 */
1914 switch (spec->board_config) {
1915 case CS421X_CDB4210:
1916 snd_printd("CS4210 board: %s\n",
1917 cs421x_models[spec->board_config]);
1918/* spec->gpio_mask = 3;
1919 spec->gpio_dir = 3;
1920 spec->gpio_data = 3;
1921*/
1922 spec->sense_b = 1;
1923
1924 break;
1925 }
1926
1927 /*
1928 Update the GPIO/DMIC/SENSE_B pinmux before the configuration
1929 is auto-parsed. If GPIO or SENSE_B is forced, DMIC input
1930 is disabled.
1931 */
1932 cs421x_pinmux_init(codec);
1933
1934 err = cs421x_parse_auto_config(codec);
1935 if (err < 0)
1936 goto error;
1937
1938 codec->patch_ops = cs4210_patch_ops;
1939
1940 return 0;
1941
1942 error:
1943 kfree(codec->spec);
1944 codec->spec = NULL;
1945 return err;
1946}
1947
1275 1948
1276/* 1949/*
1277 * patch entries 1950 * patch entries
@@ -1279,11 +1952,13 @@ static int patch_cs420x(struct hda_codec *codec)
1279static const struct hda_codec_preset snd_hda_preset_cirrus[] = { 1952static const struct hda_codec_preset snd_hda_preset_cirrus[] = {
1280 { .id = 0x10134206, .name = "CS4206", .patch = patch_cs420x }, 1953 { .id = 0x10134206, .name = "CS4206", .patch = patch_cs420x },
1281 { .id = 0x10134207, .name = "CS4207", .patch = patch_cs420x }, 1954 { .id = 0x10134207, .name = "CS4207", .patch = patch_cs420x },
1955 { .id = 0x10134210, .name = "CS4210", .patch = patch_cs421x },
1282 {} /* terminator */ 1956 {} /* terminator */
1283}; 1957};
1284 1958
1285MODULE_ALIAS("snd-hda-codec-id:10134206"); 1959MODULE_ALIAS("snd-hda-codec-id:10134206");
1286MODULE_ALIAS("snd-hda-codec-id:10134207"); 1960MODULE_ALIAS("snd-hda-codec-id:10134207");
1961MODULE_ALIAS("snd-hda-codec-id:10134210");
1287 1962
1288MODULE_LICENSE("GPL"); 1963MODULE_LICENSE("GPL");
1289MODULE_DESCRIPTION("Cirrus Logic HD-audio codec"); 1964MODULE_DESCRIPTION("Cirrus Logic HD-audio codec");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 884f67b8f4e0..502fc9499453 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -446,6 +446,19 @@ static int conexant_init_jacks(struct hda_codec *codec)
446 return 0; 446 return 0;
447} 447}
448 448
449static void conexant_set_power(struct hda_codec *codec, hda_nid_t fg,
450 unsigned int power_state)
451{
452 if (power_state == AC_PWRST_D3)
453 msleep(100);
454 snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
455 power_state);
456 /* partial workaround for "azx_get_response timeout" */
457 if (power_state == AC_PWRST_D0)
458 msleep(10);
459 snd_hda_codec_set_power_to_all(codec, fg, power_state, true);
460}
461
449static int conexant_init(struct hda_codec *codec) 462static int conexant_init(struct hda_codec *codec)
450{ 463{
451 struct conexant_spec *spec = codec->spec; 464 struct conexant_spec *spec = codec->spec;
@@ -588,6 +601,7 @@ static const struct hda_codec_ops conexant_patch_ops = {
588 .build_pcms = conexant_build_pcms, 601 .build_pcms = conexant_build_pcms,
589 .init = conexant_init, 602 .init = conexant_init,
590 .free = conexant_free, 603 .free = conexant_free,
604 .set_power_state = conexant_set_power,
591#ifdef CONFIG_SND_HDA_POWER_SAVE 605#ifdef CONFIG_SND_HDA_POWER_SAVE
592 .suspend = conexant_suspend, 606 .suspend = conexant_suspend,
593#endif 607#endif
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 52ce07534e5b..694327ae8b71 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2386,7 +2386,7 @@ static int alc_suspend(struct hda_codec *codec, pm_message_t state)
2386} 2386}
2387#endif 2387#endif
2388 2388
2389#ifdef SND_HDA_NEEDS_RESUME 2389#ifdef CONFIG_PM
2390static int alc_resume(struct hda_codec *codec) 2390static int alc_resume(struct hda_codec *codec)
2391{ 2391{
2392 msleep(150); /* to avoid pop noise */ 2392 msleep(150); /* to avoid pop noise */
@@ -2406,7 +2406,7 @@ static const struct hda_codec_ops alc_patch_ops = {
2406 .init = alc_init, 2406 .init = alc_init,
2407 .free = alc_free, 2407 .free = alc_free,
2408 .unsol_event = alc_unsol_event, 2408 .unsol_event = alc_unsol_event,
2409#ifdef SND_HDA_NEEDS_RESUME 2409#ifdef CONFIG_PM
2410 .resume = alc_resume, 2410 .resume = alc_resume,
2411#endif 2411#endif
2412#ifdef CONFIG_SND_HDA_POWER_SAVE 2412#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -2801,7 +2801,8 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
2801 int i; 2801 int i;
2802 2802
2803 again: 2803 again:
2804 spec->multiout.num_dacs = 0; 2804 /* set num_dacs once to full for alc_auto_look_for_dac() */
2805 spec->multiout.num_dacs = cfg->line_outs;
2805 spec->multiout.hp_nid = 0; 2806 spec->multiout.hp_nid = 0;
2806 spec->multiout.extra_out_nid[0] = 0; 2807 spec->multiout.extra_out_nid[0] = 0;
2807 memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids)); 2808 memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
@@ -2834,6 +2835,8 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
2834 } 2835 }
2835 } 2836 }
2836 2837
2838 /* re-count num_dacs and squash invalid entries */
2839 spec->multiout.num_dacs = 0;
2837 for (i = 0; i < cfg->line_outs; i++) { 2840 for (i = 0; i < cfg->line_outs; i++) {
2838 if (spec->private_dac_nids[i]) 2841 if (spec->private_dac_nids[i])
2839 spec->multiout.num_dacs++; 2842 spec->multiout.num_dacs++;
@@ -4410,7 +4413,7 @@ static void alc269_shutup(struct hda_codec *codec)
4410 } 4413 }
4411} 4414}
4412 4415
4413#ifdef SND_HDA_NEEDS_RESUME 4416#ifdef CONFIG_PM
4414static int alc269_resume(struct hda_codec *codec) 4417static int alc269_resume(struct hda_codec *codec)
4415{ 4418{
4416 if ((alc_read_coef_idx(codec, 0) & 0x00ff) == 0x018) { 4419 if ((alc_read_coef_idx(codec, 0) & 0x00ff) == 0x018) {
@@ -4433,7 +4436,7 @@ static int alc269_resume(struct hda_codec *codec)
4433 hda_call_check_power_status(codec, 0x01); 4436 hda_call_check_power_status(codec, 0x01);
4434 return 0; 4437 return 0;
4435} 4438}
4436#endif /* SND_HDA_NEEDS_RESUME */ 4439#endif /* CONFIG_PM */
4437 4440
4438static void alc269_fixup_hweq(struct hda_codec *codec, 4441static void alc269_fixup_hweq(struct hda_codec *codec,
4439 const struct alc_fixup *fix, int action) 4442 const struct alc_fixup *fix, int action)
@@ -4725,7 +4728,7 @@ static int patch_alc269(struct hda_codec *codec)
4725 spec->vmaster_nid = 0x02; 4728 spec->vmaster_nid = 0x02;
4726 4729
4727 codec->patch_ops = alc_patch_ops; 4730 codec->patch_ops = alc_patch_ops;
4728#ifdef SND_HDA_NEEDS_RESUME 4731#ifdef CONFIG_PM
4729 codec->patch_ops.resume = alc269_resume; 4732 codec->patch_ops.resume = alc269_resume;
4730#endif 4733#endif
4731 if (board_config == ALC_MODEL_AUTO) 4734 if (board_config == ALC_MODEL_AUTO)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 56425a53cf1b..fcf4c7142103 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -95,6 +95,7 @@ enum {
95 STAC_92HD83XXX_PWR_REF, 95 STAC_92HD83XXX_PWR_REF,
96 STAC_DELL_S14, 96 STAC_DELL_S14,
97 STAC_92HD83XXX_HP, 97 STAC_92HD83XXX_HP,
98 STAC_92HD83XXX_HP_cNB11_INTQUAD,
98 STAC_HP_DV7_4000, 99 STAC_HP_DV7_4000,
99 STAC_92HD83XXX_MODELS 100 STAC_92HD83XXX_MODELS
100}; 101};
@@ -1636,10 +1637,17 @@ static const unsigned int hp_dv7_4000_pin_configs[10] = {
1636 0x40f000f0, 0x40f000f0, 1637 0x40f000f0, 0x40f000f0,
1637}; 1638};
1638 1639
1640static const unsigned int hp_cNB11_intquad_pin_configs[10] = {
1641 0x40f000f0, 0x0221101f, 0x02a11020, 0x92170110,
1642 0x40f000f0, 0x92170110, 0x40f000f0, 0xd5a30130,
1643 0x40f000f0, 0x40f000f0,
1644};
1645
1639static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { 1646static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
1640 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, 1647 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
1641 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, 1648 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
1642 [STAC_DELL_S14] = dell_s14_pin_configs, 1649 [STAC_DELL_S14] = dell_s14_pin_configs,
1650 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs,
1643 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, 1651 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
1644}; 1652};
1645 1653
@@ -1649,6 +1657,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
1649 [STAC_92HD83XXX_PWR_REF] = "mic-ref", 1657 [STAC_92HD83XXX_PWR_REF] = "mic-ref",
1650 [STAC_DELL_S14] = "dell-s14", 1658 [STAC_DELL_S14] = "dell-s14",
1651 [STAC_92HD83XXX_HP] = "hp", 1659 [STAC_92HD83XXX_HP] = "hp",
1660 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
1652 [STAC_HP_DV7_4000] = "hp-dv7-4000", 1661 [STAC_HP_DV7_4000] = "hp-dv7-4000",
1653}; 1662};
1654 1663
@@ -1661,7 +1670,47 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
1661 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, 1670 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
1662 "unknown Dell", STAC_DELL_S14), 1671 "unknown Dell", STAC_DELL_S14),
1663 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, 1672 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
1664 "HP", STAC_92HD83XXX_HP), 1673 "HP", STAC_92HD83XXX_HP),
1674 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656,
1675 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1676 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1657,
1677 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1678 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658,
1679 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1680 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659,
1681 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1682 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A,
1683 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1684 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B,
1685 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1686 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3388,
1687 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1688 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3389,
1689 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1690 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355B,
1691 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1692 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355C,
1693 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1694 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355D,
1695 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1696 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355E,
1697 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1698 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x355F,
1699 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1700 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3560,
1701 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1702 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358B,
1703 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1704 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358C,
1705 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1706 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x358D,
1707 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1708 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3591,
1709 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1710 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3592,
1711 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1712 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3593,
1713 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
1665 {} /* terminator */ 1714 {} /* terminator */
1666}; 1715};
1667 1716
@@ -4885,7 +4934,18 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer,
4885#define stac927x_proc_hook NULL 4934#define stac927x_proc_hook NULL
4886#endif 4935#endif
4887 4936
4888#ifdef SND_HDA_NEEDS_RESUME 4937#ifdef CONFIG_PM
4938static int stac92xx_pre_resume(struct hda_codec *codec)
4939{
4940 struct sigmatel_spec *spec = codec->spec;
4941
4942 /* sync mute LED */
4943 if (spec->gpio_led)
4944 stac_gpio_set(codec, spec->gpio_mask,
4945 spec->gpio_dir, spec->gpio_data);
4946 return 0;
4947}
4948
4889static int stac92xx_resume(struct hda_codec *codec) 4949static int stac92xx_resume(struct hda_codec *codec)
4890{ 4950{
4891 struct sigmatel_spec *spec = codec->spec; 4951 struct sigmatel_spec *spec = codec->spec;
@@ -4901,29 +4961,19 @@ static int stac92xx_resume(struct hda_codec *codec)
4901 stac_issue_unsol_event(codec, 4961 stac_issue_unsol_event(codec,
4902 spec->autocfg.line_out_pins[0]); 4962 spec->autocfg.line_out_pins[0]);
4903 } 4963 }
4904 /* sync mute LED */
4905 if (spec->gpio_led)
4906 hda_call_check_power_status(codec, 0x01);
4907 return 0; 4964 return 0;
4908} 4965}
4909 4966
4967#ifdef CONFIG_SND_HDA_POWER_SAVE
4910/* 4968/*
4911 * using power check for controlling mute led of HP notebooks 4969 * For this feature CONFIG_SND_HDA_POWER_SAVE is needed
4912 * check for mute state only on Speakers (nid = 0x10) 4970 * as mute LED state is updated in check_power_status hook
4913 *
4914 * For this feature CONFIG_SND_HDA_POWER_SAVE is needed, otherwise
4915 * the LED is NOT working properly !
4916 *
4917 * Changed name to reflect that it now works for any designated
4918 * model, not just HP HDX.
4919 */ 4971 */
4920 4972static int stac92xx_update_led_status(struct hda_codec *codec)
4921#ifdef CONFIG_SND_HDA_POWER_SAVE
4922static int stac92xx_hp_check_power_status(struct hda_codec *codec,
4923 hda_nid_t nid)
4924{ 4973{
4925 struct sigmatel_spec *spec = codec->spec; 4974 struct sigmatel_spec *spec = codec->spec;
4926 int i, muted = 1; 4975 int i, num_ext_dacs, muted = 1;
4976 hda_nid_t nid;
4927 4977
4928 for (i = 0; i < spec->multiout.num_dacs; i++) { 4978 for (i = 0; i < spec->multiout.num_dacs; i++) {
4929 nid = spec->multiout.dac_nids[i]; 4979 nid = spec->multiout.dac_nids[i];
@@ -4933,6 +4983,22 @@ static int stac92xx_hp_check_power_status(struct hda_codec *codec,
4933 break; 4983 break;
4934 } 4984 }
4935 } 4985 }
4986 if (muted && spec->multiout.hp_nid)
4987 if (!(snd_hda_codec_amp_read(codec,
4988 spec->multiout.hp_nid, 0, HDA_OUTPUT, 0) &
4989 HDA_AMP_MUTE)) {
4990 muted = 0; /* HP is not muted */
4991 }
4992 num_ext_dacs = ARRAY_SIZE(spec->multiout.extra_out_nid);
4993 for (i = 0; muted && i < num_ext_dacs; i++) {
4994 nid = spec->multiout.extra_out_nid[i];
4995 if (nid == 0)
4996 break;
4997 if (!(snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
4998 HDA_AMP_MUTE)) {
4999 muted = 0; /* extra output is not muted */
5000 }
5001 }
4936 if (muted) 5002 if (muted)
4937 spec->gpio_data &= ~spec->gpio_led; /* orange */ 5003 spec->gpio_data &= ~spec->gpio_led; /* orange */
4938 else 5004 else
@@ -4946,6 +5012,17 @@ static int stac92xx_hp_check_power_status(struct hda_codec *codec,
4946 stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); 5012 stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
4947 return 0; 5013 return 0;
4948} 5014}
5015
5016/*
5017 * use power check for controlling mute led of HP notebooks
5018 */
5019static int stac92xx_check_power_status(struct hda_codec *codec,
5020 hda_nid_t nid)
5021{
5022 stac92xx_update_led_status(codec);
5023
5024 return 0;
5025}
4949#endif 5026#endif
4950 5027
4951static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state) 5028static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state)
@@ -4953,7 +5030,7 @@ static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state)
4953 stac92xx_shutup(codec); 5030 stac92xx_shutup(codec);
4954 return 0; 5031 return 0;
4955} 5032}
4956#endif 5033#endif /* CONFIG_PM */
4957 5034
4958static const struct hda_codec_ops stac92xx_patch_ops = { 5035static const struct hda_codec_ops stac92xx_patch_ops = {
4959 .build_controls = stac92xx_build_controls, 5036 .build_controls = stac92xx_build_controls,
@@ -4961,9 +5038,10 @@ static const struct hda_codec_ops stac92xx_patch_ops = {
4961 .init = stac92xx_init, 5038 .init = stac92xx_init,
4962 .free = stac92xx_free, 5039 .free = stac92xx_free,
4963 .unsol_event = stac92xx_unsol_event, 5040 .unsol_event = stac92xx_unsol_event,
4964#ifdef SND_HDA_NEEDS_RESUME 5041#ifdef CONFIG_PM
4965 .suspend = stac92xx_suspend, 5042 .suspend = stac92xx_suspend,
4966 .resume = stac92xx_resume, 5043 .resume = stac92xx_resume,
5044 .pre_resume = stac92xx_pre_resume,
4967#endif 5045#endif
4968 .reboot_notify = stac92xx_shutup, 5046 .reboot_notify = stac92xx_shutup,
4969}; 5047};
@@ -5482,7 +5560,7 @@ again:
5482 spec->gpio_data |= spec->gpio_led; 5560 spec->gpio_data |= spec->gpio_led;
5483 /* register check_power_status callback. */ 5561 /* register check_power_status callback. */
5484 codec->patch_ops.check_power_status = 5562 codec->patch_ops.check_power_status =
5485 stac92xx_hp_check_power_status; 5563 stac92xx_check_power_status;
5486 } 5564 }
5487#endif 5565#endif
5488 5566
@@ -5810,7 +5888,7 @@ again:
5810 spec->gpio_data |= spec->gpio_led; 5888 spec->gpio_data |= spec->gpio_led;
5811 /* register check_power_status callback. */ 5889 /* register check_power_status callback. */
5812 codec->patch_ops.check_power_status = 5890 codec->patch_ops.check_power_status =
5813 stac92xx_hp_check_power_status; 5891 stac92xx_check_power_status;
5814 } 5892 }
5815#endif 5893#endif
5816 5894
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index f38160b00e16..84d8798bf33a 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1708,7 +1708,7 @@ static void via_unsol_event(struct hda_codec *codec,
1708 via_gpio_control(codec); 1708 via_gpio_control(codec);
1709} 1709}
1710 1710
1711#ifdef SND_HDA_NEEDS_RESUME 1711#ifdef CONFIG_PM
1712static int via_suspend(struct hda_codec *codec, pm_message_t state) 1712static int via_suspend(struct hda_codec *codec, pm_message_t state)
1713{ 1713{
1714 struct via_spec *spec = codec->spec; 1714 struct via_spec *spec = codec->spec;
@@ -1736,7 +1736,7 @@ static const struct hda_codec_ops via_patch_ops = {
1736 .init = via_init, 1736 .init = via_init,
1737 .free = via_free, 1737 .free = via_free,
1738 .unsol_event = via_unsol_event, 1738 .unsol_event = via_unsol_event,
1739#ifdef SND_HDA_NEEDS_RESUME 1739#ifdef CONFIG_PM
1740 .suspend = via_suspend, 1740 .suspend = via_suspend,
1741#endif 1741#endif
1742#ifdef CONFIG_SND_HDA_POWER_SAVE 1742#ifdef CONFIG_SND_HDA_POWER_SAVE
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index ff29380c9ed3..76258f2a2ffb 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -907,6 +907,7 @@ static int ldo_regulator_register(struct snd_soc_codec *codec,
907 struct regulator_init_data *init_data, 907 struct regulator_init_data *init_data,
908 int voltage) 908 int voltage)
909{ 909{
910 dev_err(codec->dev, "this setup needs regulator support in the kernel\n");
910 return -EINVAL; 911 return -EINVAL;
911} 912}
912 913
@@ -1218,6 +1219,34 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
1218 return 0; 1219 return 0;
1219} 1220}
1220 1221
1222static int sgtl5000_replace_vddd_with_ldo(struct snd_soc_codec *codec)
1223{
1224 struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
1225 int ret;
1226
1227 /* set internal ldo to 1.2v */
1228 ret = ldo_regulator_register(codec, &ldo_init_data, LDO_VOLTAGE);
1229 if (ret) {
1230 dev_err(codec->dev,
1231 "Failed to register vddd internal supplies: %d\n", ret);
1232 return ret;
1233 }
1234
1235 sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
1236
1237 ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
1238 sgtl5000->supplies);
1239
1240 if (ret) {
1241 ldo_regulator_remove(codec);
1242 dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
1243 return ret;
1244 }
1245
1246 dev_info(codec->dev, "Using internal LDO instead of VDDD\n");
1247 return 0;
1248}
1249
1221static int sgtl5000_enable_regulators(struct snd_soc_codec *codec) 1250static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
1222{ 1251{
1223 u16 reg; 1252 u16 reg;
@@ -1235,30 +1264,9 @@ static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
1235 if (!ret) 1264 if (!ret)
1236 external_vddd = 1; 1265 external_vddd = 1;
1237 else { 1266 else {
1238 /* set internal ldo to 1.2v */ 1267 ret = sgtl5000_replace_vddd_with_ldo(codec);
1239 int voltage = LDO_VOLTAGE; 1268 if (ret)
1240
1241 ret = ldo_regulator_register(codec, &ldo_init_data, voltage);
1242 if (ret) {
1243 dev_err(codec->dev,
1244 "Failed to register vddd internal supplies: %d\n",
1245 ret);
1246 return ret;
1247 }
1248
1249 sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
1250
1251 ret = regulator_bulk_get(codec->dev,
1252 ARRAY_SIZE(sgtl5000->supplies),
1253 sgtl5000->supplies);
1254
1255 if (ret) {
1256 ldo_regulator_remove(codec);
1257 dev_err(codec->dev,
1258 "Failed to request supplies: %d\n", ret);
1259
1260 return ret; 1269 return ret;
1261 }
1262 } 1270 }
1263 1271
1264 ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies), 1272 ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
@@ -1287,7 +1295,6 @@ static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
1287 * roll back to use internal LDO 1295 * roll back to use internal LDO
1288 */ 1296 */
1289 if (external_vddd && rev >= 0x11) { 1297 if (external_vddd && rev >= 0x11) {
1290 int voltage = LDO_VOLTAGE;
1291 /* disable all regulator first */ 1298 /* disable all regulator first */
1292 regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies), 1299 regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
1293 sgtl5000->supplies); 1300 sgtl5000->supplies);
@@ -1295,23 +1302,10 @@ static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
1295 regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies), 1302 regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
1296 sgtl5000->supplies); 1303 sgtl5000->supplies);
1297 1304
1298 ret = ldo_regulator_register(codec, &ldo_init_data, voltage); 1305 ret = sgtl5000_replace_vddd_with_ldo(codec);
1299 if (ret) 1306 if (ret)
1300 return ret; 1307 return ret;
1301 1308
1302 sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
1303
1304 ret = regulator_bulk_get(codec->dev,
1305 ARRAY_SIZE(sgtl5000->supplies),
1306 sgtl5000->supplies);
1307 if (ret) {
1308 ldo_regulator_remove(codec);
1309 dev_err(codec->dev,
1310 "Failed to request supplies: %d\n", ret);
1311
1312 return ret;
1313 }
1314
1315 ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies), 1309 ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
1316 sgtl5000->supplies); 1310 sgtl5000->supplies);
1317 if (ret) 1311 if (ret)
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 8499c563a9b5..60d740ebeb5b 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3409,6 +3409,9 @@ static irqreturn_t wm8962_irq(int irq, void *data)
3409 active = snd_soc_read(codec, WM8962_INTERRUPT_STATUS_2); 3409 active = snd_soc_read(codec, WM8962_INTERRUPT_STATUS_2);
3410 active &= ~mask; 3410 active &= ~mask;
3411 3411
3412 /* Acknowledge the interrupts */
3413 snd_soc_write(codec, WM8962_INTERRUPT_STATUS_2, active);
3414
3412 if (active & WM8962_FLL_LOCK_EINT) { 3415 if (active & WM8962_FLL_LOCK_EINT) {
3413 dev_dbg(codec->dev, "FLL locked\n"); 3416 dev_dbg(codec->dev, "FLL locked\n");
3414 complete(&wm8962->fll_lock); 3417 complete(&wm8962->fll_lock);
@@ -3433,9 +3436,6 @@ static irqreturn_t wm8962_irq(int irq, void *data)
3433 msecs_to_jiffies(250)); 3436 msecs_to_jiffies(250));
3434 } 3437 }
3435 3438
3436 /* Acknowledge the interrupts */
3437 snd_soc_write(codec, WM8962_INTERRUPT_STATUS_2, active);
3438
3439 return IRQ_HANDLED; 3439 return IRQ_HANDLED;
3440} 3440}
3441 3441
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index 9259f1f34899..1f11525d97e8 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -62,9 +62,9 @@ static void davinci_vcif_start(struct snd_pcm_substream *substream)
62 w = readl(davinci_vc->base + DAVINCI_VC_CTRL); 62 w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
63 63
64 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 64 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
65 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 1); 65 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 0);
66 else 66 else
67 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 1); 67 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 0);
68 68
69 writel(w, davinci_vc->base + DAVINCI_VC_CTRL); 69 writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
70} 70}
@@ -80,9 +80,9 @@ static void davinci_vcif_stop(struct snd_pcm_substream *substream)
80 /* Reset transmitter/receiver and sample rate/frame sync generators */ 80 /* Reset transmitter/receiver and sample rate/frame sync generators */
81 w = readl(davinci_vc->base + DAVINCI_VC_CTRL); 81 w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
82 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 82 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
83 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 0); 83 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 1);
84 else 84 else
85 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 0); 85 MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 1);
86 86
87 writel(w, davinci_vc->base + DAVINCI_VC_CTRL); 87 writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
88} 88}
@@ -159,6 +159,7 @@ static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd,
159 case SNDRV_PCM_TRIGGER_RESUME: 159 case SNDRV_PCM_TRIGGER_RESUME:
160 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 160 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
161 davinci_vcif_start(substream); 161 davinci_vcif_start(substream);
162 break;
162 case SNDRV_PCM_TRIGGER_STOP: 163 case SNDRV_PCM_TRIGGER_STOP:
163 case SNDRV_PCM_TRIGGER_SUSPEND: 164 case SNDRV_PCM_TRIGGER_SUSPEND:
164 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 165 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 1568eea31f41..c086b78539ee 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -21,6 +21,7 @@
21#include <plat/audio.h> 21#include <plat/audio.h>
22 22
23#include "dma.h" 23#include "dma.h"
24#include "idma.h"
24#include "i2s.h" 25#include "i2s.h"
25#include "i2s-regs.h" 26#include "i2s-regs.h"
26 27
@@ -60,6 +61,7 @@ struct i2s_dai {
60 /* DMA parameters */ 61 /* DMA parameters */
61 struct s3c_dma_params dma_playback; 62 struct s3c_dma_params dma_playback;
62 struct s3c_dma_params dma_capture; 63 struct s3c_dma_params dma_capture;
64 struct s3c_dma_params idma_playback;
63 u32 quirks; 65 u32 quirks;
64 u32 suspend_i2smod; 66 u32 suspend_i2smod;
65 u32 suspend_i2scon; 67 u32 suspend_i2scon;
@@ -877,6 +879,10 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
877 if (i2s->quirks & QUIRK_NEED_RSTCLR) 879 if (i2s->quirks & QUIRK_NEED_RSTCLR)
878 writel(CON_RSTCLR, i2s->addr + I2SCON); 880 writel(CON_RSTCLR, i2s->addr + I2SCON);
879 881
882 if (i2s->quirks & QUIRK_SEC_DAI)
883 idma_reg_addr_init((void *)i2s->addr,
884 i2s->sec_dai->idma_playback.dma_addr);
885
880probe_exit: 886probe_exit:
881 /* Reset any constraint on RFS and BFS */ 887 /* Reset any constraint on RFS and BFS */
882 i2s->rfs = 0; 888 i2s->rfs = 0;
@@ -1077,6 +1083,7 @@ static __devinit int samsung_i2s_probe(struct platform_device *pdev)
1077 sec_dai->dma_playback.dma_size = 4; 1083 sec_dai->dma_playback.dma_size = 4;
1078 sec_dai->base = regs_base; 1084 sec_dai->base = regs_base;
1079 sec_dai->quirks = quirks; 1085 sec_dai->quirks = quirks;
1086 sec_dai->idma_playback.dma_addr = i2s_cfg->idma_addr;
1080 sec_dai->pri_dai = pri_dai; 1087 sec_dai->pri_dai = pri_dai;
1081 pri_dai->sec_dai = sec_dai; 1088 pri_dai->sec_dai = sec_dai;
1082 } 1089 }
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e44267f66216..83ad8ca27490 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -577,6 +577,7 @@ int snd_soc_suspend(struct device *dev)
577 case SND_SOC_BIAS_OFF: 577 case SND_SOC_BIAS_OFF:
578 codec->driver->suspend(codec, PMSG_SUSPEND); 578 codec->driver->suspend(codec, PMSG_SUSPEND);
579 codec->suspended = 1; 579 codec->suspended = 1;
580 codec->cache_sync = 1;
580 break; 581 break;
581 default: 582 default:
582 dev_dbg(codec->dev, "CODEC is on over suspend\n"); 583 dev_dbg(codec->dev, "CODEC is on over suspend\n");
@@ -1140,7 +1141,7 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
1140 } 1141 }
1141 } 1142 }
1142 cpu_dai->probed = 1; 1143 cpu_dai->probed = 1;
1143 /* mark cpu_dai as probed and add to card cpu_dai list */ 1144 /* mark cpu_dai as probed and add to card dai list */
1144 list_add(&cpu_dai->card_list, &card->dai_dev_list); 1145 list_add(&cpu_dai->card_list, &card->dai_dev_list);
1145 } 1146 }
1146 1147
@@ -1171,7 +1172,7 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
1171 } 1172 }
1172 } 1173 }
1173 1174
1174 /* mark cpu_dai as probed and add to card cpu_dai list */ 1175 /* mark codec_dai as probed and add to card dai list */
1175 codec_dai->probed = 1; 1176 codec_dai->probed = 1;
1176 list_add(&codec_dai->card_list, &card->dai_dev_list); 1177 list_add(&codec_dai->card_list, &card->dai_dev_list);
1177 } 1178 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index fbfcda062839..7e15914b3633 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -124,6 +124,36 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
124 return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL); 124 return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
125} 125}
126 126
127/* get snd_card from DAPM context */
128static inline struct snd_card *dapm_get_snd_card(
129 struct snd_soc_dapm_context *dapm)
130{
131 if (dapm->codec)
132 return dapm->codec->card->snd_card;
133 else if (dapm->platform)
134 return dapm->platform->card->snd_card;
135 else
136 BUG();
137
138 /* unreachable */
139 return NULL;
140}
141
142/* get soc_card from DAPM context */
143static inline struct snd_soc_card *dapm_get_soc_card(
144 struct snd_soc_dapm_context *dapm)
145{
146 if (dapm->codec)
147 return dapm->codec->card;
148 else if (dapm->platform)
149 return dapm->platform->card;
150 else
151 BUG();
152
153 /* unreachable */
154 return NULL;
155}
156
127static int soc_widget_read(struct snd_soc_dapm_widget *w, int reg) 157static int soc_widget_read(struct snd_soc_dapm_widget *w, int reg)
128{ 158{
129 if (w->codec) 159 if (w->codec)